1
2
3
4
5
6
7#include <linux/delay.h>
8#include <linux/console.h>
9#include <linux/bootmem.h>
10#include <linux/seq_file.h>
11#include <linux/cpu.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/tty.h>
15#include <linux/pfn.h>
16
17#ifdef CONFIG_MTD_UCLINUX
18#include <linux/mtd/map.h>
19#include <linux/ext2_fs.h>
20#include <linux/cramfs_fs.h>
21#include <linux/romfs_fs.h>
22#endif
23
24#include <asm/cplb.h>
25#include <asm/cacheflush.h>
26#include <asm/blackfin.h>
27#include <asm/cplbinit.h>
28#include <asm/div64.h>
29#include <asm/cpu.h>
30#include <asm/fixed_code.h>
31#include <asm/early_printk.h>
32#include <asm/irq_handler.h>
33
34u16 _bfin_swrst;
35EXPORT_SYMBOL(_bfin_swrst);
36
37unsigned long memory_start, memory_end, physical_mem_end;
38unsigned long _rambase, _ramstart, _ramend;
39unsigned long reserved_mem_dcache_on;
40unsigned long reserved_mem_icache_on;
41EXPORT_SYMBOL(memory_start);
42EXPORT_SYMBOL(memory_end);
43EXPORT_SYMBOL(physical_mem_end);
44EXPORT_SYMBOL(_ramend);
45EXPORT_SYMBOL(reserved_mem_dcache_on);
46
47#ifdef CONFIG_MTD_UCLINUX
48extern struct map_info uclinux_ram_map;
49unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
50unsigned long _ebss;
51EXPORT_SYMBOL(memory_mtd_end);
52EXPORT_SYMBOL(memory_mtd_start);
53EXPORT_SYMBOL(mtd_size);
54#endif
55
56char __initdata command_line[COMMAND_LINE_SIZE];
57struct blackfin_initial_pda __initdata initial_pda;
58
59
60#define BFIN_MEMMAP_MAX 128
61#define BFIN_MEMMAP_RAM 1
62#define BFIN_MEMMAP_RESERVED 2
63static struct bfin_memmap {
64 int nr_map;
65 struct bfin_memmap_entry {
66 unsigned long long addr;
67 unsigned long long size;
68 unsigned long type;
69 } map[BFIN_MEMMAP_MAX];
70} bfin_memmap __initdata;
71
72
73struct change_member {
74 struct bfin_memmap_entry *pentry;
75 unsigned long long addr;
76};
77static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
78static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
79static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
80static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
81
82DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
83
84static int early_init_clkin_hz(char *buf);
85
86#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
87void __init generate_cplb_tables(void)
88{
89 unsigned int cpu;
90
91 generate_cplb_tables_all();
92
93 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
94 generate_cplb_tables_cpu(cpu);
95}
96#endif
97
98void __cpuinit bfin_setup_caches(unsigned int cpu)
99{
100#ifdef CONFIG_BFIN_ICACHE
101 bfin_icache_init(icplb_tbl[cpu]);
102#endif
103
104#ifdef CONFIG_BFIN_DCACHE
105 bfin_dcache_init(dcplb_tbl[cpu]);
106#endif
107
108 bfin_setup_cpudata(cpu);
109
110
111
112
113
114
115
116#ifdef CONFIG_BFIN_ICACHE
117 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
118 printk(KERN_INFO " External memory:"
119# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
120 " cacheable"
121# else
122 " uncacheable"
123# endif
124 " in instruction cache\n");
125 if (L2_LENGTH)
126 printk(KERN_INFO " L2 SRAM :"
127# ifdef CONFIG_BFIN_L2_ICACHEABLE
128 " cacheable"
129# else
130 " uncacheable"
131# endif
132 " in instruction cache\n");
133
134#else
135 printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
136#endif
137
138#ifdef CONFIG_BFIN_DCACHE
139 printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
140 printk(KERN_INFO " External memory:"
141# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
142 " cacheable (write-back)"
143# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
144 " cacheable (write-through)"
145# else
146 " uncacheable"
147# endif
148 " in data cache\n");
149 if (L2_LENGTH)
150 printk(KERN_INFO " L2 SRAM :"
151# if defined CONFIG_BFIN_L2_WRITEBACK
152 " cacheable (write-back)"
153# elif defined CONFIG_BFIN_L2_WRITETHROUGH
154 " cacheable (write-through)"
155# else
156 " uncacheable"
157# endif
158 " in data cache\n");
159#else
160 printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
161#endif
162}
163
164void __cpuinit bfin_setup_cpudata(unsigned int cpu)
165{
166 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
167
168 cpudata->imemctl = bfin_read_IMEM_CONTROL();
169 cpudata->dmemctl = bfin_read_DMEM_CONTROL();
170}
171
172void __init bfin_cache_init(void)
173{
174#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
175 generate_cplb_tables();
176#endif
177 bfin_setup_caches(0);
178}
179
180void __init bfin_relocate_l1_mem(void)
181{
182 unsigned long text_l1_len = (unsigned long)_text_l1_len;
183 unsigned long data_l1_len = (unsigned long)_data_l1_len;
184 unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
185 unsigned long l2_len = (unsigned long)_l2_len;
186
187 early_shadow_stamp();
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203 blackfin_dma_early_init();
204
205
206 if (L1_CODE_LENGTH && text_l1_len)
207 early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
208
209
210 if (L1_DATA_A_LENGTH && data_l1_len)
211 early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
212
213
214 if (L1_DATA_B_LENGTH && data_b_l1_len)
215 early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
216
217 early_dma_memcpy_done();
218
219#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
220 blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
221#endif
222
223
224 if (L2_LENGTH && l2_len)
225 memcpy(_stext_l2, _l2_lma, l2_len);
226}
227
228#ifdef CONFIG_SMP
229void __init bfin_relocate_coreb_l1_mem(void)
230{
231 unsigned long text_l1_len = (unsigned long)_text_l1_len;
232 unsigned long data_l1_len = (unsigned long)_data_l1_len;
233 unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
234
235 blackfin_dma_early_init();
236
237
238 if (L1_CODE_LENGTH && text_l1_len)
239 early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
240 text_l1_len);
241
242
243 if (L1_DATA_A_LENGTH && data_l1_len)
244 early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
245 data_l1_len);
246
247
248 if (L1_DATA_B_LENGTH && data_b_l1_len)
249 early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
250 data_b_l1_len);
251
252 early_dma_memcpy_done();
253
254#ifdef CONFIG_ICACHE_FLUSH_L1
255 blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
256 (unsigned long)_stext_l1 + COREB_L1_CODE_START;
257#endif
258}
259#endif
260
261#ifdef CONFIG_ROMKERNEL
262void __init bfin_relocate_xip_data(void)
263{
264 early_shadow_stamp();
265
266 memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
267 memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
268}
269#endif
270
271
272static void __init add_memory_region(unsigned long long start,
273 unsigned long long size, int type)
274{
275 int i;
276
277 i = bfin_memmap.nr_map;
278
279 if (i == BFIN_MEMMAP_MAX) {
280 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
281 return;
282 }
283
284 bfin_memmap.map[i].addr = start;
285 bfin_memmap.map[i].size = size;
286 bfin_memmap.map[i].type = type;
287 bfin_memmap.nr_map++;
288}
289
290
291
292
293static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
294{
295 struct change_member *change_tmp;
296 unsigned long current_type, last_type;
297 unsigned long long last_addr;
298 int chgidx, still_changing;
299 int overlap_entries;
300 int new_entry;
301 int old_nr, new_nr, chg_nr;
302 int i;
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340 if (*pnr_map < 2)
341 return -1;
342
343 old_nr = *pnr_map;
344
345
346 for (i = 0; i < old_nr; i++)
347 if (map[i].addr + map[i].size < map[i].addr)
348 return -1;
349
350
351 for (i = 0; i < 2*old_nr; i++)
352 change_point[i] = &change_point_list[i];
353
354
355
356 chgidx = 0;
357 for (i = 0; i < old_nr; i++) {
358 if (map[i].size != 0) {
359 change_point[chgidx]->addr = map[i].addr;
360 change_point[chgidx++]->pentry = &map[i];
361 change_point[chgidx]->addr = map[i].addr + map[i].size;
362 change_point[chgidx++]->pentry = &map[i];
363 }
364 }
365 chg_nr = chgidx;
366
367
368 still_changing = 1;
369 while (still_changing) {
370 still_changing = 0;
371 for (i = 1; i < chg_nr; i++) {
372
373
374 if ((change_point[i]->addr < change_point[i-1]->addr) ||
375 ((change_point[i]->addr == change_point[i-1]->addr) &&
376 (change_point[i]->addr == change_point[i]->pentry->addr) &&
377 (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
378 ) {
379 change_tmp = change_point[i];
380 change_point[i] = change_point[i-1];
381 change_point[i-1] = change_tmp;
382 still_changing = 1;
383 }
384 }
385 }
386
387
388 overlap_entries = 0;
389 new_entry = 0;
390 last_type = 0;
391 last_addr = 0;
392
393 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
394
395 if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
396
397 overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
398 } else {
399
400 for (i = 0; i < overlap_entries; i++) {
401 if (overlap_list[i] == change_point[chgidx]->pentry)
402 overlap_list[i] = overlap_list[overlap_entries-1];
403 }
404 overlap_entries--;
405 }
406
407
408 current_type = 0;
409 for (i = 0; i < overlap_entries; i++)
410 if (overlap_list[i]->type > current_type)
411 current_type = overlap_list[i]->type;
412
413 if (current_type != last_type) {
414 if (last_type != 0) {
415 new_map[new_entry].size =
416 change_point[chgidx]->addr - last_addr;
417
418 if (new_map[new_entry].size != 0)
419 if (++new_entry >= BFIN_MEMMAP_MAX)
420 break;
421 }
422 if (current_type != 0) {
423 new_map[new_entry].addr = change_point[chgidx]->addr;
424 new_map[new_entry].type = current_type;
425 last_addr = change_point[chgidx]->addr;
426 }
427 last_type = current_type;
428 }
429 }
430 new_nr = new_entry;
431
432
433 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
434 *pnr_map = new_nr;
435
436 return 0;
437}
438
439static void __init print_memory_map(char *who)
440{
441 int i;
442
443 for (i = 0; i < bfin_memmap.nr_map; i++) {
444 printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
445 bfin_memmap.map[i].addr,
446 bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
447 switch (bfin_memmap.map[i].type) {
448 case BFIN_MEMMAP_RAM:
449 printk(KERN_CONT "(usable)\n");
450 break;
451 case BFIN_MEMMAP_RESERVED:
452 printk(KERN_CONT "(reserved)\n");
453 break;
454 default:
455 printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
456 break;
457 }
458 }
459}
460
461static __init int parse_memmap(char *arg)
462{
463 unsigned long long start_at, mem_size;
464
465 if (!arg)
466 return -EINVAL;
467
468 mem_size = memparse(arg, &arg);
469 if (*arg == '@') {
470 start_at = memparse(arg+1, &arg);
471 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
472 } else if (*arg == '$') {
473 start_at = memparse(arg+1, &arg);
474 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
475 }
476
477 return 0;
478}
479
480
481
482
483
484
485
486
487
488
489
490static __init void parse_cmdline_early(char *cmdline_p)
491{
492 char c = ' ', *to = cmdline_p;
493 unsigned int memsize;
494 for (;;) {
495 if (c == ' ') {
496 if (!memcmp(to, "mem=", 4)) {
497 to += 4;
498 memsize = memparse(to, &to);
499 if (memsize)
500 _ramend = memsize;
501
502 } else if (!memcmp(to, "max_mem=", 8)) {
503 to += 8;
504 memsize = memparse(to, &to);
505 if (memsize) {
506 physical_mem_end = memsize;
507 if (*to != ' ') {
508 if (*to == '$'
509 || *(to + 1) == '$')
510 reserved_mem_dcache_on = 1;
511 if (*to == '#'
512 || *(to + 1) == '#')
513 reserved_mem_icache_on = 1;
514 }
515 }
516 } else if (!memcmp(to, "clkin_hz=", 9)) {
517 to += 9;
518 early_init_clkin_hz(to);
519#ifdef CONFIG_EARLY_PRINTK
520 } else if (!memcmp(to, "earlyprintk=", 12)) {
521 to += 12;
522 setup_early_printk(to);
523#endif
524 } else if (!memcmp(to, "memmap=", 7)) {
525 to += 7;
526 parse_memmap(to);
527 }
528 }
529 c = *(to++);
530 if (!c)
531 break;
532 }
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548static __init void memory_setup(void)
549{
550#ifdef CONFIG_MTD_UCLINUX
551 unsigned long mtd_phys = 0;
552#endif
553 unsigned long max_mem;
554
555 _rambase = CONFIG_BOOT_LOAD;
556 _ramstart = (unsigned long)_end;
557
558 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
559 console_init();
560 panic("DMA region exceeds memory limit: %lu.",
561 _ramend - _ramstart);
562 }
563 max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
564
565#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
566
567
568
569
570# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
571 if (max_mem >= 56 * 1024 * 1024)
572 max_mem = 56 * 1024 * 1024;
573# else
574 if (max_mem >= 60 * 1024 * 1024)
575 max_mem = 60 * 1024 * 1024;
576# endif
577#endif
578
579
580#ifdef CONFIG_MPU
581
582 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
583#else
584 memory_start = PAGE_ALIGN(_ramstart);
585#endif
586
587#if defined(CONFIG_MTD_UCLINUX)
588
589 memory_mtd_end = memory_end;
590
591 mtd_phys = _ramstart;
592 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
593
594# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
595 if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
596 mtd_size =
597 PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
598# endif
599
600# if defined(CONFIG_CRAMFS)
601 if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
602 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
603# endif
604
605# if defined(CONFIG_ROMFS_FS)
606 if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
607 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
608 mtd_size =
609 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
610
611
612 if (memory_end > max_mem) {
613 pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
614 memory_end = max_mem;
615 }
616 }
617# endif
618
619
620
621
622
623
624 if (mtd_size == 0 || memory_end <= mtd_size) {
625 pr_emerg("Could not find valid ram mtd attached.\n");
626 } else {
627 memory_end -= mtd_size;
628
629
630 uclinux_ram_map.phys = memory_mtd_start = memory_end;
631 uclinux_ram_map.size = mtd_size;
632 pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
633 _end, mtd_size, (void *)memory_mtd_start);
634 dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
635 }
636#endif
637
638
639
640
641
642 if (memory_end > max_mem) {
643 pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
644 memory_end = max_mem;
645 }
646
647#ifdef CONFIG_MPU
648#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
649 page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
650 ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
651#else
652 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
653#endif
654 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
655#endif
656
657 init_mm.start_code = (unsigned long)_stext;
658 init_mm.end_code = (unsigned long)_etext;
659 init_mm.end_data = (unsigned long)_edata;
660 init_mm.brk = (unsigned long)0;
661
662 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
663 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
664
665 printk(KERN_INFO "Memory map:\n"
666 " fixedcode = 0x%p-0x%p\n"
667 " text = 0x%p-0x%p\n"
668 " rodata = 0x%p-0x%p\n"
669 " bss = 0x%p-0x%p\n"
670 " data = 0x%p-0x%p\n"
671 " stack = 0x%p-0x%p\n"
672 " init = 0x%p-0x%p\n"
673 " available = 0x%p-0x%p\n"
674#ifdef CONFIG_MTD_UCLINUX
675 " rootfs = 0x%p-0x%p\n"
676#endif
677#if DMA_UNCACHED_REGION > 0
678 " DMA Zone = 0x%p-0x%p\n"
679#endif
680 , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
681 _stext, _etext,
682 __start_rodata, __end_rodata,
683 __bss_start, __bss_stop,
684 _sdata, _edata,
685 (void *)&init_thread_union,
686 (void *)((int)(&init_thread_union) + THREAD_SIZE),
687 __init_begin, __init_end,
688 (void *)_ramstart, (void *)memory_end
689#ifdef CONFIG_MTD_UCLINUX
690 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
691#endif
692#if DMA_UNCACHED_REGION > 0
693 , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
694#endif
695 );
696}
697
698
699
700
701void __init find_min_max_pfn(void)
702{
703 int i;
704
705 max_pfn = 0;
706 min_low_pfn = memory_end;
707
708 for (i = 0; i < bfin_memmap.nr_map; i++) {
709 unsigned long start, end;
710
711 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
712 continue;
713 start = PFN_UP(bfin_memmap.map[i].addr);
714 end = PFN_DOWN(bfin_memmap.map[i].addr +
715 bfin_memmap.map[i].size);
716 if (start >= end)
717 continue;
718 if (end > max_pfn)
719 max_pfn = end;
720 if (start < min_low_pfn)
721 min_low_pfn = start;
722 }
723}
724
725static __init void setup_bootmem_allocator(void)
726{
727 int bootmap_size;
728 int i;
729 unsigned long start_pfn, end_pfn;
730 unsigned long curr_pfn, last_pfn, size;
731
732
733 add_memory_region(memory_start,
734 memory_end - memory_start, BFIN_MEMMAP_RAM);
735
736 sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
737 print_memory_map("boot memmap");
738
739
740 find_min_max_pfn();
741
742 if (max_pfn > memory_end >> PAGE_SHIFT)
743 max_pfn = memory_end >> PAGE_SHIFT;
744
745 max_low_pfn = max_pfn;
746
747 if (min_low_pfn < memory_start >> PAGE_SHIFT)
748 min_low_pfn = memory_start >> PAGE_SHIFT;
749
750 start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
751 end_pfn = memory_end >> PAGE_SHIFT;
752
753
754
755
756
757 bootmap_size = init_bootmem_node(NODE_DATA(0),
758 memory_start >> PAGE_SHIFT,
759 start_pfn, end_pfn);
760
761
762 for (i = 0; i < bfin_memmap.nr_map; i++) {
763
764
765
766 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
767 continue;
768
769
770
771 curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
772 if (curr_pfn >= end_pfn)
773 continue;
774
775
776
777 last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
778 bfin_memmap.map[i].size);
779
780 if (last_pfn > end_pfn)
781 last_pfn = end_pfn;
782
783
784
785
786
787 if (last_pfn <= curr_pfn)
788 continue;
789
790 size = last_pfn - curr_pfn;
791 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
792 }
793
794
795 reserve_bootmem(PAGE_OFFSET,
796 memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
797 BOOTMEM_DEFAULT);
798}
799
800#define EBSZ_TO_MEG(ebsz) \
801({ \
802 int meg = 0; \
803 switch (ebsz & 0xf) { \
804 case 0x1: meg = 16; break; \
805 case 0x3: meg = 32; break; \
806 case 0x5: meg = 64; break; \
807 case 0x7: meg = 128; break; \
808 case 0x9: meg = 256; break; \
809 case 0xb: meg = 512; break; \
810 } \
811 meg; \
812})
813static inline int __init get_mem_size(void)
814{
815#if defined(EBIU_SDBCTL)
816# if defined(BF561_FAMILY)
817 int ret = 0;
818 u32 sdbctl = bfin_read_EBIU_SDBCTL();
819 ret += EBSZ_TO_MEG(sdbctl >> 0);
820 ret += EBSZ_TO_MEG(sdbctl >> 8);
821 ret += EBSZ_TO_MEG(sdbctl >> 16);
822 ret += EBSZ_TO_MEG(sdbctl >> 24);
823 return ret;
824# else
825 return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
826# endif
827#elif defined(EBIU_DDRCTL1)
828 u32 ddrctl = bfin_read_EBIU_DDRCTL1();
829 int ret = 0;
830 switch (ddrctl & 0xc0000) {
831 case DEVSZ_64:
832 ret = 64 / 8;
833 break;
834 case DEVSZ_128:
835 ret = 128 / 8;
836 break;
837 case DEVSZ_256:
838 ret = 256 / 8;
839 break;
840 case DEVSZ_512:
841 ret = 512 / 8;
842 break;
843 }
844 switch (ddrctl & 0x30000) {
845 case DEVWD_4: ret *= 2;
846 case DEVWD_8: ret *= 2;
847 case DEVWD_16: break;
848 }
849 if ((ddrctl & 0xc000) == 0x4000)
850 ret *= 2;
851 return ret;
852#endif
853 BUG();
854}
855
856__attribute__((weak))
857void __init native_machine_early_platform_add_devices(void)
858{
859}
860
861void __init setup_arch(char **cmdline_p)
862{
863 u32 mmr;
864 unsigned long sclk, cclk;
865
866 native_machine_early_platform_add_devices();
867
868 enable_shadow_console();
869
870
871 if (unlikely(CPUID != bfin_cpuid()))
872 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
873 CPU, bfin_cpuid(), bfin_revid());
874
875#ifdef CONFIG_DUMMY_CONSOLE
876 conswitchp = &dummy_con;
877#endif
878
879#if defined(CONFIG_CMDLINE_BOOL)
880 strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
881 command_line[sizeof(command_line) - 1] = 0;
882#endif
883
884
885 *cmdline_p = &command_line[0];
886 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
887 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
888
889 memset(&bfin_memmap, 0, sizeof(bfin_memmap));
890
891
892
893
894 physical_mem_end = 0;
895 parse_cmdline_early(&command_line[0]);
896
897 if (_ramend == 0)
898 _ramend = get_mem_size() * 1024 * 1024;
899
900 if (physical_mem_end == 0)
901 physical_mem_end = _ramend;
902
903 memory_setup();
904
905
906 bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
907 bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
908 bfin_write_EBIU_AMGCTL(AMGCTLVAL);
909#ifdef CONFIG_EBIU_MBSCTLVAL
910 bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
911 bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
912 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
913#endif
914#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
915 bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
916 bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
917 bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
918 bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
919 ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
920#endif
921
922 cclk = get_cclk();
923 sclk = get_sclk();
924
925 if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
926 panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
927
928#ifdef BF561_FAMILY
929 if (ANOMALY_05000266) {
930 bfin_read_IMDMA_D0_IRQ_STATUS();
931 bfin_read_IMDMA_D1_IRQ_STATUS();
932 }
933#endif
934
935 mmr = bfin_read_TBUFCTL();
936 printk(KERN_INFO "Hardware Trace %s and %sabled\n",
937 (mmr & 0x1) ? "active" : "off",
938 (mmr & 0x2) ? "en" : "dis");
939
940 mmr = bfin_read_SYSCR();
941 printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
942
943
944#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
945 defined(CONFIG_BF538) || defined(CONFIG_BF539)
946 _bfin_swrst = bfin_read_SWRST();
947#else
948
949 _bfin_swrst = mmr & ~0xf;
950#endif
951
952#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
953 bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
954#endif
955#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
956 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
957#endif
958
959#ifdef CONFIG_SMP
960 if (_bfin_swrst & SWRST_DBL_FAULT_A) {
961#else
962 if (_bfin_swrst & RESET_DOUBLE) {
963#endif
964 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
965#ifdef CONFIG_DEBUG_DOUBLEFAULT
966
967 printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
968 initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
969 initial_pda.retx_doublefault);
970 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
971 initial_pda.dcplb_doublefault_addr);
972 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
973 initial_pda.icplb_doublefault_addr);
974#endif
975 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
976 initial_pda.retx);
977 } else if (_bfin_swrst & RESET_WDOG)
978 printk(KERN_INFO "Recovering from Watchdog event\n");
979 else if (_bfin_swrst & RESET_SOFTWARE)
980 printk(KERN_NOTICE "Reset caused by Software reset\n");
981
982 printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
983 if (bfin_compiled_revid() == 0xffff)
984 printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
985 else if (bfin_compiled_revid() == -1)
986 printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
987 else
988 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
989
990 if (likely(CPUID == bfin_cpuid())) {
991 if (bfin_revid() != bfin_compiled_revid()) {
992 if (bfin_compiled_revid() == -1)
993 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
994 bfin_revid());
995 else if (bfin_compiled_revid() != 0xffff) {
996 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
997 bfin_compiled_revid(), bfin_revid());
998 if (bfin_compiled_revid() > bfin_revid())
999 panic("Error: you are missing anomaly workarounds for this rev");
1000 }
1001 }
1002 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
1003 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
1004 CPU, bfin_revid());
1005 }
1006
1007 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
1008
1009 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
1010 cclk / 1000000, sclk / 1000000);
1011
1012 setup_bootmem_allocator();
1013
1014 paging_init();
1015
1016
1017
1018 memcpy((void *)FIXED_CODE_START, &fixed_code_start,
1019 FIXED_CODE_END - FIXED_CODE_START);
1020 BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
1021 != SIGRETURN_STUB - FIXED_CODE_START);
1022 BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
1023 != ATOMIC_XCHG32 - FIXED_CODE_START);
1024 BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
1025 != ATOMIC_CAS32 - FIXED_CODE_START);
1026 BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
1027 != ATOMIC_ADD32 - FIXED_CODE_START);
1028 BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
1029 != ATOMIC_SUB32 - FIXED_CODE_START);
1030 BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
1031 != ATOMIC_IOR32 - FIXED_CODE_START);
1032 BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
1033 != ATOMIC_AND32 - FIXED_CODE_START);
1034 BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
1035 != ATOMIC_XOR32 - FIXED_CODE_START);
1036 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
1037 != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
1038
1039#ifdef CONFIG_SMP
1040 platform_init_cpus();
1041#endif
1042 init_exception_vectors();
1043 bfin_cache_init();
1044}
1045
1046static int __init topology_init(void)
1047{
1048 unsigned int cpu;
1049
1050 for_each_possible_cpu(cpu) {
1051 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
1052 }
1053
1054 return 0;
1055}
1056
1057subsys_initcall(topology_init);
1058
1059
1060static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
1061static u_long get_clkin_hz(void)
1062{
1063 return cached_clkin_hz;
1064}
1065static int __init early_init_clkin_hz(char *buf)
1066{
1067 cached_clkin_hz = simple_strtoul(buf, NULL, 0);
1068#ifdef BFIN_KERNEL_CLOCK
1069 if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1070 panic("cannot change clkin_hz when reprogramming clocks");
1071#endif
1072 return 1;
1073}
1074early_param("clkin_hz=", early_init_clkin_hz);
1075
1076
1077static u_long get_vco(void)
1078{
1079 static u_long cached_vco;
1080 u_long msel, pll_ctl;
1081
1082
1083
1084
1085 if (cached_vco)
1086 return cached_vco;
1087
1088 pll_ctl = bfin_read_PLL_CTL();
1089 msel = (pll_ctl >> 9) & 0x3F;
1090 if (0 == msel)
1091 msel = 64;
1092
1093 cached_vco = get_clkin_hz();
1094 cached_vco >>= (1 & pll_ctl);
1095 cached_vco *= msel;
1096 return cached_vco;
1097}
1098
1099
1100u_long get_cclk(void)
1101{
1102 static u_long cached_cclk_pll_div, cached_cclk;
1103 u_long csel, ssel;
1104
1105 if (bfin_read_PLL_STAT() & 0x1)
1106 return get_clkin_hz();
1107
1108 ssel = bfin_read_PLL_DIV();
1109 if (ssel == cached_cclk_pll_div)
1110 return cached_cclk;
1111 else
1112 cached_cclk_pll_div = ssel;
1113
1114 csel = ((ssel >> 4) & 0x03);
1115 ssel &= 0xf;
1116 if (ssel && ssel < (1 << csel))
1117 cached_cclk = get_vco() / ssel;
1118 else
1119 cached_cclk = get_vco() >> csel;
1120 return cached_cclk;
1121}
1122EXPORT_SYMBOL(get_cclk);
1123
1124
1125u_long get_sclk(void)
1126{
1127 static u_long cached_sclk;
1128 u_long ssel;
1129
1130
1131
1132
1133 if (cached_sclk)
1134 return cached_sclk;
1135
1136 if (bfin_read_PLL_STAT() & 0x1)
1137 return get_clkin_hz();
1138
1139 ssel = bfin_read_PLL_DIV() & 0xf;
1140 if (0 == ssel) {
1141 printk(KERN_WARNING "Invalid System Clock\n");
1142 ssel = 1;
1143 }
1144
1145 cached_sclk = get_vco() / ssel;
1146 return cached_sclk;
1147}
1148EXPORT_SYMBOL(get_sclk);
1149
1150unsigned long sclk_to_usecs(unsigned long sclk)
1151{
1152 u64 tmp = USEC_PER_SEC * (u64)sclk;
1153 do_div(tmp, get_sclk());
1154 return tmp;
1155}
1156EXPORT_SYMBOL(sclk_to_usecs);
1157
1158unsigned long usecs_to_sclk(unsigned long usecs)
1159{
1160 u64 tmp = get_sclk() * (u64)usecs;
1161 do_div(tmp, USEC_PER_SEC);
1162 return tmp;
1163}
1164EXPORT_SYMBOL(usecs_to_sclk);
1165
1166
1167
1168
1169static int show_cpuinfo(struct seq_file *m, void *v)
1170{
1171 char *cpu, *mmu, *fpu, *vendor, *cache;
1172 uint32_t revid;
1173 int cpu_num = *(unsigned int *)v;
1174 u_long sclk, cclk;
1175 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1176 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
1177
1178 cpu = CPU;
1179 mmu = "none";
1180 fpu = "none";
1181 revid = bfin_revid();
1182
1183 sclk = get_sclk();
1184 cclk = get_cclk();
1185
1186 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
1187 case 0xca:
1188 vendor = "Analog Devices";
1189 break;
1190 default:
1191 vendor = "unknown";
1192 break;
1193 }
1194
1195 seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1196
1197 if (CPUID == bfin_cpuid())
1198 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
1199 else
1200 seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1201 CPUID, bfin_cpuid());
1202
1203 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1204 "stepping\t: %d ",
1205 cpu, cclk/1000000, sclk/1000000,
1206#ifdef CONFIG_MPU
1207 "mpu on",
1208#else
1209 "mpu off",
1210#endif
1211 revid);
1212
1213 if (bfin_revid() != bfin_compiled_revid()) {
1214 if (bfin_compiled_revid() == -1)
1215 seq_printf(m, "(Compiled for Rev none)");
1216 else if (bfin_compiled_revid() == 0xffff)
1217 seq_printf(m, "(Compiled for Rev any)");
1218 else
1219 seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1220 }
1221
1222 seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1223 cclk/1000000, cclk%1000000,
1224 sclk/1000000, sclk%1000000);
1225 seq_printf(m, "bogomips\t: %lu.%02lu\n"
1226 "Calibration\t: %lu loops\n",
1227 (loops_per_jiffy * HZ) / 500000,
1228 ((loops_per_jiffy * HZ) / 5000) % 100,
1229 (loops_per_jiffy * HZ));
1230
1231
1232 switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1233 case ACACHE_BSRAM:
1234 cache = "dbank-A/B\t: cache/sram";
1235 dcache_size = 16;
1236 dsup_banks = 1;
1237 break;
1238 case ACACHE_BCACHE:
1239 cache = "dbank-A/B\t: cache/cache";
1240 dcache_size = 32;
1241 dsup_banks = 2;
1242 break;
1243 case ASRAM_BSRAM:
1244 cache = "dbank-A/B\t: sram/sram";
1245 dcache_size = 0;
1246 dsup_banks = 0;
1247 break;
1248 default:
1249 cache = "unknown";
1250 dcache_size = 0;
1251 dsup_banks = 0;
1252 break;
1253 }
1254
1255
1256 if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1257 dcache_size = 0;
1258
1259 if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1260 icache_size = 0;
1261
1262 seq_printf(m, "cache size\t: %d KB(L1 icache) "
1263 "%d KB(L1 dcache) %d KB(L2 cache)\n",
1264 icache_size, dcache_size, 0);
1265 seq_printf(m, "%s\n", cache);
1266 seq_printf(m, "external memory\t: "
1267#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1268 "cacheable"
1269#else
1270 "uncacheable"
1271#endif
1272 " in instruction cache\n");
1273 seq_printf(m, "external memory\t: "
1274#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1275 "cacheable (write-back)"
1276#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1277 "cacheable (write-through)"
1278#else
1279 "uncacheable"
1280#endif
1281 " in data cache\n");
1282
1283 if (icache_size)
1284 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1285 BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
1286 else
1287 seq_printf(m, "icache setup\t: off\n");
1288
1289 seq_printf(m,
1290 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1291 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1292 BFIN_DLINES);
1293#ifdef __ARCH_SYNC_CORE_DCACHE
1294 seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1295#endif
1296#ifdef __ARCH_SYNC_CORE_ICACHE
1297 seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1298#endif
1299
1300 seq_printf(m, "\n");
1301
1302 if (cpu_num != num_possible_cpus() - 1)
1303 return 0;
1304
1305 if (L2_LENGTH) {
1306 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1307 seq_printf(m, "L2 SRAM\t\t: "
1308#if defined(CONFIG_BFIN_L2_ICACHEABLE)
1309 "cacheable"
1310#else
1311 "uncacheable"
1312#endif
1313 " in instruction cache\n");
1314 seq_printf(m, "L2 SRAM\t\t: "
1315#if defined(CONFIG_BFIN_L2_WRITEBACK)
1316 "cacheable (write-back)"
1317#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1318 "cacheable (write-through)"
1319#else
1320 "uncacheable"
1321#endif
1322 " in data cache\n");
1323 }
1324 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1325 seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1326 physical_mem_end >> 10, 0ul, physical_mem_end);
1327 seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1328 ((int)memory_end - (int)_rambase) >> 10,
1329 _rambase, memory_end);
1330
1331 return 0;
1332}
1333
1334static void *c_start(struct seq_file *m, loff_t *pos)
1335{
1336 if (*pos == 0)
1337 *pos = cpumask_first(cpu_online_mask);
1338 if (*pos >= num_online_cpus())
1339 return NULL;
1340
1341 return pos;
1342}
1343
1344static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1345{
1346 *pos = cpumask_next(*pos, cpu_online_mask);
1347
1348 return c_start(m, pos);
1349}
1350
1351static void c_stop(struct seq_file *m, void *v)
1352{
1353}
1354
1355const struct seq_operations cpuinfo_op = {
1356 .start = c_start,
1357 .next = c_next,
1358 .stop = c_stop,
1359 .show = show_cpuinfo,
1360};
1361
1362void __init cmdline_init(const char *r0)
1363{
1364 early_shadow_stamp();
1365 if (r0)
1366 strncpy(command_line, r0, COMMAND_LINE_SIZE);
1367}
1368