1
2
3
4
5
6
7#include <linux/delay.h>
8#include <linux/console.h>
9#include <linux/bootmem.h>
10#include <linux/seq_file.h>
11#include <linux/cpu.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/tty.h>
15#include <linux/pfn.h>
16
17#ifdef CONFIG_MTD_UCLINUX
18#include <linux/mtd/map.h>
19#include <linux/ext2_fs.h>
20#include <linux/cramfs_fs.h>
21#include <linux/romfs_fs.h>
22#endif
23
24#include <asm/cplb.h>
25#include <asm/cacheflush.h>
26#include <asm/blackfin.h>
27#include <asm/cplbinit.h>
28#include <asm/div64.h>
29#include <asm/cpu.h>
30#include <asm/fixed_code.h>
31#include <asm/early_printk.h>
32
33u16 _bfin_swrst;
34EXPORT_SYMBOL(_bfin_swrst);
35
36unsigned long memory_start, memory_end, physical_mem_end;
37unsigned long _rambase, _ramstart, _ramend;
38unsigned long reserved_mem_dcache_on;
39unsigned long reserved_mem_icache_on;
40EXPORT_SYMBOL(memory_start);
41EXPORT_SYMBOL(memory_end);
42EXPORT_SYMBOL(physical_mem_end);
43EXPORT_SYMBOL(_ramend);
44EXPORT_SYMBOL(reserved_mem_dcache_on);
45
46#ifdef CONFIG_MTD_UCLINUX
47extern struct map_info uclinux_ram_map;
48unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
49unsigned long _ebss;
50EXPORT_SYMBOL(memory_mtd_end);
51EXPORT_SYMBOL(memory_mtd_start);
52EXPORT_SYMBOL(mtd_size);
53#endif
54
55char __initdata command_line[COMMAND_LINE_SIZE];
56void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat,
57 *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr;
58
59
60#define BFIN_MEMMAP_MAX 128
61#define BFIN_MEMMAP_RAM 1
62#define BFIN_MEMMAP_RESERVED 2
63static struct bfin_memmap {
64 int nr_map;
65 struct bfin_memmap_entry {
66 unsigned long long addr;
67 unsigned long long size;
68 unsigned long type;
69 } map[BFIN_MEMMAP_MAX];
70} bfin_memmap __initdata;
71
72
73struct change_member {
74 struct bfin_memmap_entry *pentry;
75 unsigned long long addr;
76};
77static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
78static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
79static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
80static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
81
82DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
83
84static int early_init_clkin_hz(char *buf);
85
86#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
87void __init generate_cplb_tables(void)
88{
89 unsigned int cpu;
90
91 generate_cplb_tables_all();
92
93 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
94 generate_cplb_tables_cpu(cpu);
95}
96#endif
97
98void __cpuinit bfin_setup_caches(unsigned int cpu)
99{
100#ifdef CONFIG_BFIN_ICACHE
101 bfin_icache_init(icplb_tbl[cpu]);
102#endif
103
104#ifdef CONFIG_BFIN_DCACHE
105 bfin_dcache_init(dcplb_tbl[cpu]);
106#endif
107
108
109
110
111
112
113
114#ifdef CONFIG_BFIN_ICACHE
115 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
116 printk(KERN_INFO " External memory:"
117# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
118 " cacheable"
119# else
120 " uncacheable"
121# endif
122 " in instruction cache\n");
123 if (L2_LENGTH)
124 printk(KERN_INFO " L2 SRAM :"
125# ifdef CONFIG_BFIN_L2_ICACHEABLE
126 " cacheable"
127# else
128 " uncacheable"
129# endif
130 " in instruction cache\n");
131
132#else
133 printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
134#endif
135
136#ifdef CONFIG_BFIN_DCACHE
137 printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
138 printk(KERN_INFO " External memory:"
139# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
140 " cacheable (write-back)"
141# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
142 " cacheable (write-through)"
143# else
144 " uncacheable"
145# endif
146 " in data cache\n");
147 if (L2_LENGTH)
148 printk(KERN_INFO " L2 SRAM :"
149# if defined CONFIG_BFIN_L2_WRITEBACK
150 " cacheable (write-back)"
151# elif defined CONFIG_BFIN_L2_WRITETHROUGH
152 " cacheable (write-through)"
153# else
154 " uncacheable"
155# endif
156 " in data cache\n");
157#else
158 printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
159#endif
160}
161
162void __cpuinit bfin_setup_cpudata(unsigned int cpu)
163{
164 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
165
166 cpudata->idle = current;
167 cpudata->imemctl = bfin_read_IMEM_CONTROL();
168 cpudata->dmemctl = bfin_read_DMEM_CONTROL();
169}
170
171void __init bfin_cache_init(void)
172{
173#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
174 generate_cplb_tables();
175#endif
176 bfin_setup_caches(0);
177}
178
179void __init bfin_relocate_l1_mem(void)
180{
181 unsigned long l1_code_length;
182 unsigned long l1_data_a_length;
183 unsigned long l1_data_b_length;
184 unsigned long l2_length;
185
186 early_shadow_stamp();
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202 blackfin_dma_early_init();
203
204
205 l1_code_length = _etext_l1 - _stext_l1;
206 if (l1_code_length)
207 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
208
209
210 l1_data_a_length = _sbss_l1 - _sdata_l1;
211 if (l1_data_a_length)
212 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
213
214
215 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1;
216 if (l1_data_b_length)
217 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
218 l1_data_a_length, l1_data_b_length);
219
220 early_dma_memcpy_done();
221
222
223 if (L2_LENGTH != 0) {
224 l2_length = _sbss_l2 - _stext_l2;
225 if (l2_length)
226 memcpy(_stext_l2, _l2_lma_start, l2_length);
227 }
228}
229
230
231static void __init add_memory_region(unsigned long long start,
232 unsigned long long size, int type)
233{
234 int i;
235
236 i = bfin_memmap.nr_map;
237
238 if (i == BFIN_MEMMAP_MAX) {
239 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
240 return;
241 }
242
243 bfin_memmap.map[i].addr = start;
244 bfin_memmap.map[i].size = size;
245 bfin_memmap.map[i].type = type;
246 bfin_memmap.nr_map++;
247}
248
249
250
251
252static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
253{
254 struct change_member *change_tmp;
255 unsigned long current_type, last_type;
256 unsigned long long last_addr;
257 int chgidx, still_changing;
258 int overlap_entries;
259 int new_entry;
260 int old_nr, new_nr, chg_nr;
261 int i;
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299 if (*pnr_map < 2)
300 return -1;
301
302 old_nr = *pnr_map;
303
304
305 for (i = 0; i < old_nr; i++)
306 if (map[i].addr + map[i].size < map[i].addr)
307 return -1;
308
309
310 for (i = 0; i < 2*old_nr; i++)
311 change_point[i] = &change_point_list[i];
312
313
314
315 chgidx = 0;
316 for (i = 0; i < old_nr; i++) {
317 if (map[i].size != 0) {
318 change_point[chgidx]->addr = map[i].addr;
319 change_point[chgidx++]->pentry = &map[i];
320 change_point[chgidx]->addr = map[i].addr + map[i].size;
321 change_point[chgidx++]->pentry = &map[i];
322 }
323 }
324 chg_nr = chgidx;
325
326
327 still_changing = 1;
328 while (still_changing) {
329 still_changing = 0;
330 for (i = 1; i < chg_nr; i++) {
331
332
333 if ((change_point[i]->addr < change_point[i-1]->addr) ||
334 ((change_point[i]->addr == change_point[i-1]->addr) &&
335 (change_point[i]->addr == change_point[i]->pentry->addr) &&
336 (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
337 ) {
338 change_tmp = change_point[i];
339 change_point[i] = change_point[i-1];
340 change_point[i-1] = change_tmp;
341 still_changing = 1;
342 }
343 }
344 }
345
346
347 overlap_entries = 0;
348 new_entry = 0;
349 last_type = 0;
350 last_addr = 0;
351
352 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
353
354 if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
355
356 overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
357 } else {
358
359 for (i = 0; i < overlap_entries; i++) {
360 if (overlap_list[i] == change_point[chgidx]->pentry)
361 overlap_list[i] = overlap_list[overlap_entries-1];
362 }
363 overlap_entries--;
364 }
365
366
367 current_type = 0;
368 for (i = 0; i < overlap_entries; i++)
369 if (overlap_list[i]->type > current_type)
370 current_type = overlap_list[i]->type;
371
372 if (current_type != last_type) {
373 if (last_type != 0) {
374 new_map[new_entry].size =
375 change_point[chgidx]->addr - last_addr;
376
377 if (new_map[new_entry].size != 0)
378 if (++new_entry >= BFIN_MEMMAP_MAX)
379 break;
380 }
381 if (current_type != 0) {
382 new_map[new_entry].addr = change_point[chgidx]->addr;
383 new_map[new_entry].type = current_type;
384 last_addr = change_point[chgidx]->addr;
385 }
386 last_type = current_type;
387 }
388 }
389 new_nr = new_entry;
390
391
392 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
393 *pnr_map = new_nr;
394
395 return 0;
396}
397
398static void __init print_memory_map(char *who)
399{
400 int i;
401
402 for (i = 0; i < bfin_memmap.nr_map; i++) {
403 printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
404 bfin_memmap.map[i].addr,
405 bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
406 switch (bfin_memmap.map[i].type) {
407 case BFIN_MEMMAP_RAM:
408 printk(KERN_CONT "(usable)\n");
409 break;
410 case BFIN_MEMMAP_RESERVED:
411 printk(KERN_CONT "(reserved)\n");
412 break;
413 default:
414 printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
415 break;
416 }
417 }
418}
419
420static __init int parse_memmap(char *arg)
421{
422 unsigned long long start_at, mem_size;
423
424 if (!arg)
425 return -EINVAL;
426
427 mem_size = memparse(arg, &arg);
428 if (*arg == '@') {
429 start_at = memparse(arg+1, &arg);
430 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
431 } else if (*arg == '$') {
432 start_at = memparse(arg+1, &arg);
433 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
434 }
435
436 return 0;
437}
438
439
440
441
442
443
444
445
446
447
448
449static __init void parse_cmdline_early(char *cmdline_p)
450{
451 char c = ' ', *to = cmdline_p;
452 unsigned int memsize;
453 for (;;) {
454 if (c == ' ') {
455 if (!memcmp(to, "mem=", 4)) {
456 to += 4;
457 memsize = memparse(to, &to);
458 if (memsize)
459 _ramend = memsize;
460
461 } else if (!memcmp(to, "max_mem=", 8)) {
462 to += 8;
463 memsize = memparse(to, &to);
464 if (memsize) {
465 physical_mem_end = memsize;
466 if (*to != ' ') {
467 if (*to == '$'
468 || *(to + 1) == '$')
469 reserved_mem_dcache_on = 1;
470 if (*to == '#'
471 || *(to + 1) == '#')
472 reserved_mem_icache_on = 1;
473 }
474 }
475 } else if (!memcmp(to, "clkin_hz=", 9)) {
476 to += 9;
477 early_init_clkin_hz(to);
478#ifdef CONFIG_EARLY_PRINTK
479 } else if (!memcmp(to, "earlyprintk=", 12)) {
480 to += 12;
481 setup_early_printk(to);
482#endif
483 } else if (!memcmp(to, "memmap=", 7)) {
484 to += 7;
485 parse_memmap(to);
486 }
487 }
488 c = *(to++);
489 if (!c)
490 break;
491 }
492}
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507static __init void memory_setup(void)
508{
509#ifdef CONFIG_MTD_UCLINUX
510 unsigned long mtd_phys = 0;
511#endif
512 unsigned long max_mem;
513
514 _rambase = (unsigned long)_stext;
515 _ramstart = (unsigned long)_end;
516
517 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
518 console_init();
519 panic("DMA region exceeds memory limit: %lu.",
520 _ramend - _ramstart);
521 }
522 max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
523
524#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
525
526
527
528
529# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
530 if (max_mem >= 56 * 1024 * 1024)
531 max_mem = 56 * 1024 * 1024;
532# else
533 if (max_mem >= 60 * 1024 * 1024)
534 max_mem = 60 * 1024 * 1024;
535# endif
536#endif
537
538
539#ifdef CONFIG_MPU
540
541 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
542#else
543 memory_start = PAGE_ALIGN(_ramstart);
544#endif
545
546#if defined(CONFIG_MTD_UCLINUX)
547
548 memory_mtd_end = memory_end;
549
550 mtd_phys = _ramstart;
551 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
552
553# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
554 if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
555 mtd_size =
556 PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
557# endif
558
559# if defined(CONFIG_CRAMFS)
560 if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
561 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
562# endif
563
564# if defined(CONFIG_ROMFS_FS)
565 if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
566 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
567 mtd_size =
568 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
569
570
571 if (memory_end > max_mem) {
572 pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
573 memory_end = max_mem;
574 }
575 }
576# endif
577
578
579
580
581
582
583 if (mtd_size == 0 || memory_end <= mtd_size) {
584 pr_emerg("Could not find valid ram mtd attached.\n");
585 } else {
586 memory_end -= mtd_size;
587
588
589 uclinux_ram_map.phys = memory_mtd_start = memory_end;
590 uclinux_ram_map.size = mtd_size;
591 pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
592 _end, mtd_size, (void *)memory_mtd_start);
593 dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
594 }
595#endif
596
597
598
599
600
601 if (memory_end > max_mem) {
602 pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
603 memory_end = max_mem;
604 }
605
606#ifdef CONFIG_MPU
607 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
608 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
609#endif
610
611#if !defined(CONFIG_MTD_UCLINUX)
612
613 memory_end -= SIZE_4K;
614#endif
615
616 init_mm.start_code = (unsigned long)_stext;
617 init_mm.end_code = (unsigned long)_etext;
618 init_mm.end_data = (unsigned long)_edata;
619 init_mm.brk = (unsigned long)0;
620
621 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
622 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
623
624 printk(KERN_INFO "Memory map:\n"
625 " fixedcode = 0x%p-0x%p\n"
626 " text = 0x%p-0x%p\n"
627 " rodata = 0x%p-0x%p\n"
628 " bss = 0x%p-0x%p\n"
629 " data = 0x%p-0x%p\n"
630 " stack = 0x%p-0x%p\n"
631 " init = 0x%p-0x%p\n"
632 " available = 0x%p-0x%p\n"
633#ifdef CONFIG_MTD_UCLINUX
634 " rootfs = 0x%p-0x%p\n"
635#endif
636#if DMA_UNCACHED_REGION > 0
637 " DMA Zone = 0x%p-0x%p\n"
638#endif
639 , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
640 _stext, _etext,
641 __start_rodata, __end_rodata,
642 __bss_start, __bss_stop,
643 _sdata, _edata,
644 (void *)&init_thread_union,
645 (void *)((int)(&init_thread_union) + 0x2000),
646 __init_begin, __init_end,
647 (void *)_ramstart, (void *)memory_end
648#ifdef CONFIG_MTD_UCLINUX
649 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
650#endif
651#if DMA_UNCACHED_REGION > 0
652 , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
653#endif
654 );
655}
656
657
658
659
660void __init find_min_max_pfn(void)
661{
662 int i;
663
664 max_pfn = 0;
665 min_low_pfn = memory_end;
666
667 for (i = 0; i < bfin_memmap.nr_map; i++) {
668 unsigned long start, end;
669
670 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
671 continue;
672 start = PFN_UP(bfin_memmap.map[i].addr);
673 end = PFN_DOWN(bfin_memmap.map[i].addr +
674 bfin_memmap.map[i].size);
675 if (start >= end)
676 continue;
677 if (end > max_pfn)
678 max_pfn = end;
679 if (start < min_low_pfn)
680 min_low_pfn = start;
681 }
682}
683
684static __init void setup_bootmem_allocator(void)
685{
686 int bootmap_size;
687 int i;
688 unsigned long start_pfn, end_pfn;
689 unsigned long curr_pfn, last_pfn, size;
690
691
692 add_memory_region(memory_start,
693 memory_end - memory_start, BFIN_MEMMAP_RAM);
694
695 sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
696 print_memory_map("boot memmap");
697
698
699 find_min_max_pfn();
700
701 if (max_pfn > memory_end >> PAGE_SHIFT)
702 max_pfn = memory_end >> PAGE_SHIFT;
703
704 max_low_pfn = max_pfn;
705
706 if (min_low_pfn < memory_start >> PAGE_SHIFT)
707 min_low_pfn = memory_start >> PAGE_SHIFT;
708
709 start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
710 end_pfn = memory_end >> PAGE_SHIFT;
711
712
713
714
715
716 bootmap_size = init_bootmem_node(NODE_DATA(0),
717 memory_start >> PAGE_SHIFT,
718 start_pfn, end_pfn);
719
720
721 for (i = 0; i < bfin_memmap.nr_map; i++) {
722
723
724
725 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
726 continue;
727
728
729
730 curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
731 if (curr_pfn >= end_pfn)
732 continue;
733
734
735
736 last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
737 bfin_memmap.map[i].size);
738
739 if (last_pfn > end_pfn)
740 last_pfn = end_pfn;
741
742
743
744
745
746 if (last_pfn <= curr_pfn)
747 continue;
748
749 size = last_pfn - curr_pfn;
750 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
751 }
752
753
754 reserve_bootmem(PAGE_OFFSET,
755 memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
756 BOOTMEM_DEFAULT);
757}
758
759#define EBSZ_TO_MEG(ebsz) \
760({ \
761 int meg = 0; \
762 switch (ebsz & 0xf) { \
763 case 0x1: meg = 16; break; \
764 case 0x3: meg = 32; break; \
765 case 0x5: meg = 64; break; \
766 case 0x7: meg = 128; break; \
767 case 0x9: meg = 256; break; \
768 case 0xb: meg = 512; break; \
769 } \
770 meg; \
771})
772static inline int __init get_mem_size(void)
773{
774#if defined(EBIU_SDBCTL)
775# if defined(BF561_FAMILY)
776 int ret = 0;
777 u32 sdbctl = bfin_read_EBIU_SDBCTL();
778 ret += EBSZ_TO_MEG(sdbctl >> 0);
779 ret += EBSZ_TO_MEG(sdbctl >> 8);
780 ret += EBSZ_TO_MEG(sdbctl >> 16);
781 ret += EBSZ_TO_MEG(sdbctl >> 24);
782 return ret;
783# else
784 return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
785# endif
786#elif defined(EBIU_DDRCTL1)
787 u32 ddrctl = bfin_read_EBIU_DDRCTL1();
788 int ret = 0;
789 switch (ddrctl & 0xc0000) {
790 case DEVSZ_64: ret = 64 / 8;
791 case DEVSZ_128: ret = 128 / 8;
792 case DEVSZ_256: ret = 256 / 8;
793 case DEVSZ_512: ret = 512 / 8;
794 }
795 switch (ddrctl & 0x30000) {
796 case DEVWD_4: ret *= 2;
797 case DEVWD_8: ret *= 2;
798 case DEVWD_16: break;
799 }
800 if ((ddrctl & 0xc000) == 0x4000)
801 ret *= 2;
802 return ret;
803#endif
804 BUG();
805}
806
807void __init setup_arch(char **cmdline_p)
808{
809 unsigned long sclk, cclk;
810
811 enable_shadow_console();
812
813
814 if (unlikely(CPUID != bfin_cpuid()))
815 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
816 CPU, bfin_cpuid(), bfin_revid());
817
818#ifdef CONFIG_DUMMY_CONSOLE
819 conswitchp = &dummy_con;
820#endif
821
822#if defined(CONFIG_CMDLINE_BOOL)
823 strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
824 command_line[sizeof(command_line) - 1] = 0;
825#endif
826
827
828 *cmdline_p = &command_line[0];
829 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
830 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
831
832 memset(&bfin_memmap, 0, sizeof(bfin_memmap));
833
834
835
836
837 physical_mem_end = 0;
838 parse_cmdline_early(&command_line[0]);
839
840 if (_ramend == 0)
841 _ramend = get_mem_size() * 1024 * 1024;
842
843 if (physical_mem_end == 0)
844 physical_mem_end = _ramend;
845
846 memory_setup();
847
848
849 bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
850 bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
851 bfin_write_EBIU_AMGCTL(AMGCTLVAL);
852#ifdef CONFIG_EBIU_MBSCTLVAL
853 bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
854 bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
855 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
856#endif
857
858 cclk = get_cclk();
859 sclk = get_sclk();
860
861 if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
862 panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
863
864#ifdef BF561_FAMILY
865 if (ANOMALY_05000266) {
866 bfin_read_IMDMA_D0_IRQ_STATUS();
867 bfin_read_IMDMA_D1_IRQ_STATUS();
868 }
869#endif
870 printk(KERN_INFO "Hardware Trace ");
871 if (bfin_read_TBUFCTL() & 0x1)
872 printk(KERN_CONT "Active ");
873 else
874 printk(KERN_CONT "Off ");
875 if (bfin_read_TBUFCTL() & 0x2)
876 printk(KERN_CONT "and Enabled\n");
877 else
878 printk(KERN_CONT "and Disabled\n");
879
880 printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF);
881
882
883#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
884 defined(CONFIG_BF538) || defined(CONFIG_BF539)
885 _bfin_swrst = bfin_read_SWRST();
886#else
887
888 _bfin_swrst = bfin_read_SYSCR() & ~0xf;
889#endif
890
891#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
892 bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
893#endif
894#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
895 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
896#endif
897
898#ifdef CONFIG_SMP
899 if (_bfin_swrst & SWRST_DBL_FAULT_A) {
900#else
901 if (_bfin_swrst & RESET_DOUBLE) {
902#endif
903 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
904#ifdef CONFIG_DEBUG_DOUBLEFAULT
905
906 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
907 (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx);
908 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr);
909 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr);
910#endif
911 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
912 init_retx);
913 } else if (_bfin_swrst & RESET_WDOG)
914 printk(KERN_INFO "Recovering from Watchdog event\n");
915 else if (_bfin_swrst & RESET_SOFTWARE)
916 printk(KERN_NOTICE "Reset caused by Software reset\n");
917
918 printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n");
919 if (bfin_compiled_revid() == 0xffff)
920 printk(KERN_INFO "Compiled for ADSP-%s Rev any\n", CPU);
921 else if (bfin_compiled_revid() == -1)
922 printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
923 else
924 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
925
926 if (likely(CPUID == bfin_cpuid())) {
927 if (bfin_revid() != bfin_compiled_revid()) {
928 if (bfin_compiled_revid() == -1)
929 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
930 bfin_revid());
931 else if (bfin_compiled_revid() != 0xffff) {
932 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
933 bfin_compiled_revid(), bfin_revid());
934 if (bfin_compiled_revid() > bfin_revid())
935 panic("Error: you are missing anomaly workarounds for this rev");
936 }
937 }
938 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
939 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
940 CPU, bfin_revid());
941 }
942
943 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
944
945 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
946 cclk / 1000000, sclk / 1000000);
947
948 setup_bootmem_allocator();
949
950 paging_init();
951
952
953
954 memcpy((void *)FIXED_CODE_START, &fixed_code_start,
955 FIXED_CODE_END - FIXED_CODE_START);
956 BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
957 != SIGRETURN_STUB - FIXED_CODE_START);
958 BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
959 != ATOMIC_XCHG32 - FIXED_CODE_START);
960 BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
961 != ATOMIC_CAS32 - FIXED_CODE_START);
962 BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
963 != ATOMIC_ADD32 - FIXED_CODE_START);
964 BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
965 != ATOMIC_SUB32 - FIXED_CODE_START);
966 BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
967 != ATOMIC_IOR32 - FIXED_CODE_START);
968 BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
969 != ATOMIC_AND32 - FIXED_CODE_START);
970 BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
971 != ATOMIC_XOR32 - FIXED_CODE_START);
972 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
973 != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
974
975#ifdef CONFIG_SMP
976 platform_init_cpus();
977#endif
978 init_exception_vectors();
979 bfin_cache_init();
980}
981
982static int __init topology_init(void)
983{
984 unsigned int cpu;
985
986 bfin_setup_cpudata(0);
987
988 for_each_possible_cpu(cpu) {
989 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
990 }
991
992 return 0;
993}
994
995subsys_initcall(topology_init);
996
997
998static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
999static u_long get_clkin_hz(void)
1000{
1001 return cached_clkin_hz;
1002}
1003static int __init early_init_clkin_hz(char *buf)
1004{
1005 cached_clkin_hz = simple_strtoul(buf, NULL, 0);
1006#ifdef BFIN_KERNEL_CLOCK
1007 if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1008 panic("cannot change clkin_hz when reprogramming clocks");
1009#endif
1010 return 1;
1011}
1012early_param("clkin_hz=", early_init_clkin_hz);
1013
1014
1015static u_long get_vco(void)
1016{
1017 static u_long cached_vco;
1018 u_long msel, pll_ctl;
1019
1020
1021
1022
1023 if (cached_vco)
1024 return cached_vco;
1025
1026 pll_ctl = bfin_read_PLL_CTL();
1027 msel = (pll_ctl >> 9) & 0x3F;
1028 if (0 == msel)
1029 msel = 64;
1030
1031 cached_vco = get_clkin_hz();
1032 cached_vco >>= (1 & pll_ctl);
1033 cached_vco *= msel;
1034 return cached_vco;
1035}
1036
1037
1038u_long get_cclk(void)
1039{
1040 static u_long cached_cclk_pll_div, cached_cclk;
1041 u_long csel, ssel;
1042
1043 if (bfin_read_PLL_STAT() & 0x1)
1044 return get_clkin_hz();
1045
1046 ssel = bfin_read_PLL_DIV();
1047 if (ssel == cached_cclk_pll_div)
1048 return cached_cclk;
1049 else
1050 cached_cclk_pll_div = ssel;
1051
1052 csel = ((ssel >> 4) & 0x03);
1053 ssel &= 0xf;
1054 if (ssel && ssel < (1 << csel))
1055 cached_cclk = get_vco() / ssel;
1056 else
1057 cached_cclk = get_vco() >> csel;
1058 return cached_cclk;
1059}
1060EXPORT_SYMBOL(get_cclk);
1061
1062
1063u_long get_sclk(void)
1064{
1065 static u_long cached_sclk;
1066 u_long ssel;
1067
1068
1069
1070
1071 if (cached_sclk)
1072 return cached_sclk;
1073
1074 if (bfin_read_PLL_STAT() & 0x1)
1075 return get_clkin_hz();
1076
1077 ssel = bfin_read_PLL_DIV() & 0xf;
1078 if (0 == ssel) {
1079 printk(KERN_WARNING "Invalid System Clock\n");
1080 ssel = 1;
1081 }
1082
1083 cached_sclk = get_vco() / ssel;
1084 return cached_sclk;
1085}
1086EXPORT_SYMBOL(get_sclk);
1087
1088unsigned long sclk_to_usecs(unsigned long sclk)
1089{
1090 u64 tmp = USEC_PER_SEC * (u64)sclk;
1091 do_div(tmp, get_sclk());
1092 return tmp;
1093}
1094EXPORT_SYMBOL(sclk_to_usecs);
1095
1096unsigned long usecs_to_sclk(unsigned long usecs)
1097{
1098 u64 tmp = get_sclk() * (u64)usecs;
1099 do_div(tmp, USEC_PER_SEC);
1100 return tmp;
1101}
1102EXPORT_SYMBOL(usecs_to_sclk);
1103
1104
1105
1106
1107static int show_cpuinfo(struct seq_file *m, void *v)
1108{
1109 char *cpu, *mmu, *fpu, *vendor, *cache;
1110 uint32_t revid;
1111 int cpu_num = *(unsigned int *)v;
1112 u_long sclk, cclk;
1113 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1114 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
1115
1116 cpu = CPU;
1117 mmu = "none";
1118 fpu = "none";
1119 revid = bfin_revid();
1120
1121 sclk = get_sclk();
1122 cclk = get_cclk();
1123
1124 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
1125 case 0xca:
1126 vendor = "Analog Devices";
1127 break;
1128 default:
1129 vendor = "unknown";
1130 break;
1131 }
1132
1133 seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1134
1135 if (CPUID == bfin_cpuid())
1136 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
1137 else
1138 seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1139 CPUID, bfin_cpuid());
1140
1141 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1142 "stepping\t: %d ",
1143 cpu, cclk/1000000, sclk/1000000,
1144#ifdef CONFIG_MPU
1145 "mpu on",
1146#else
1147 "mpu off",
1148#endif
1149 revid);
1150
1151 if (bfin_revid() != bfin_compiled_revid()) {
1152 if (bfin_compiled_revid() == -1)
1153 seq_printf(m, "(Compiled for Rev none)");
1154 else if (bfin_compiled_revid() == 0xffff)
1155 seq_printf(m, "(Compiled for Rev any)");
1156 else
1157 seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1158 }
1159
1160 seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1161 cclk/1000000, cclk%1000000,
1162 sclk/1000000, sclk%1000000);
1163 seq_printf(m, "bogomips\t: %lu.%02lu\n"
1164 "Calibration\t: %lu loops\n",
1165 (loops_per_jiffy * HZ) / 500000,
1166 ((loops_per_jiffy * HZ) / 5000) % 100,
1167 (loops_per_jiffy * HZ));
1168
1169
1170 switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1171 case ACACHE_BSRAM:
1172 cache = "dbank-A/B\t: cache/sram";
1173 dcache_size = 16;
1174 dsup_banks = 1;
1175 break;
1176 case ACACHE_BCACHE:
1177 cache = "dbank-A/B\t: cache/cache";
1178 dcache_size = 32;
1179 dsup_banks = 2;
1180 break;
1181 case ASRAM_BSRAM:
1182 cache = "dbank-A/B\t: sram/sram";
1183 dcache_size = 0;
1184 dsup_banks = 0;
1185 break;
1186 default:
1187 cache = "unknown";
1188 dcache_size = 0;
1189 dsup_banks = 0;
1190 break;
1191 }
1192
1193
1194 if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1195 dcache_size = 0;
1196
1197 if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1198 icache_size = 0;
1199
1200 seq_printf(m, "cache size\t: %d KB(L1 icache) "
1201 "%d KB(L1 dcache) %d KB(L2 cache)\n",
1202 icache_size, dcache_size, 0);
1203 seq_printf(m, "%s\n", cache);
1204 seq_printf(m, "external memory\t: "
1205#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1206 "cacheable"
1207#else
1208 "uncacheable"
1209#endif
1210 " in instruction cache\n");
1211 seq_printf(m, "external memory\t: "
1212#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1213 "cacheable (write-back)"
1214#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1215 "cacheable (write-through)"
1216#else
1217 "uncacheable"
1218#endif
1219 " in data cache\n");
1220
1221 if (icache_size)
1222 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1223 BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
1224 else
1225 seq_printf(m, "icache setup\t: off\n");
1226
1227 seq_printf(m,
1228 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1229 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1230 BFIN_DLINES);
1231#ifdef __ARCH_SYNC_CORE_DCACHE
1232 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
1233#endif
1234#ifdef __ARCH_SYNC_CORE_ICACHE
1235 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count);
1236#endif
1237
1238 if (cpu_num != num_possible_cpus() - 1)
1239 return 0;
1240
1241 if (L2_LENGTH) {
1242 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1243 seq_printf(m, "L2 SRAM\t\t: "
1244#if defined(CONFIG_BFIN_L2_ICACHEABLE)
1245 "cacheable"
1246#else
1247 "uncacheable"
1248#endif
1249 " in instruction cache\n");
1250 seq_printf(m, "L2 SRAM\t\t: "
1251#if defined(CONFIG_BFIN_L2_WRITEBACK)
1252 "cacheable (write-back)"
1253#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1254 "cacheable (write-through)"
1255#else
1256 "uncacheable"
1257#endif
1258 " in data cache\n");
1259 }
1260 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1261 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1262 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
1263 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
1264 ((int)memory_end - (int)_stext) >> 10,
1265 _stext,
1266 (void *)memory_end);
1267 seq_printf(m, "\n");
1268
1269 return 0;
1270}
1271
1272static void *c_start(struct seq_file *m, loff_t *pos)
1273{
1274 if (*pos == 0)
1275 *pos = first_cpu(cpu_online_map);
1276 if (*pos >= num_online_cpus())
1277 return NULL;
1278
1279 return pos;
1280}
1281
1282static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1283{
1284 *pos = next_cpu(*pos, cpu_online_map);
1285
1286 return c_start(m, pos);
1287}
1288
1289static void c_stop(struct seq_file *m, void *v)
1290{
1291}
1292
1293const struct seq_operations cpuinfo_op = {
1294 .start = c_start,
1295 .next = c_next,
1296 .stop = c_stop,
1297 .show = show_cpuinfo,
1298};
1299
1300void __init cmdline_init(const char *r0)
1301{
1302 early_shadow_stamp();
1303 if (r0)
1304 strncpy(command_line, r0, COMMAND_LINE_SIZE);
1305}
1306