1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/linkage.h>
15#include <linux/init.h>
16
17#include <asm/assembler.h>
18#include <asm/cp15.h>
19#include <asm/domain.h>
20#include <asm/ptrace.h>
21#include <asm/asm-offsets.h>
22#include <asm/memory.h>
23#include <asm/thread_info.h>
24#include <asm/pgtable.h>
25
26
27
28#endif
29
30
31
32
33
34
35
36
37#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
38
39
40#endif
41
42#ifdef CONFIG_ARM_LPAE
43
44#define PG_DIR_SIZE 0x5000
45#define PMD_ORDER 3
46#else
47#define PG_DIR_SIZE 0x4000
48#define PMD_ORDER 2
49#endif
50
51 .globl swapper_pg_dir
52 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
53
54 .macro pgtbl, rd, phys
55 add \rd, \phys,
56 sub \rd, \rd,
57 .endm
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77 .arm
78
79 __HEAD
80ENTRY(stext)
81 ARM_BE8(setend be ) @ ensure we are in BE8 mode
82
83 THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
84 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
85 THUMB( .thumb ) @ switch to Thumb now.
86 THUMB(1: )
87
88#ifdef CONFIG_ARM_VIRT_EXT
89 bl __hyp_stub_install
90#endif
91 @ ensure svc mode and all interrupts masked
92 safe_svcmode_maskall r9
93
94 mrc p15, 0, r9, c0, c0 @ get processor id
95 bl __lookup_processor_type @ r5=procinfo r9=cpuid
96 movs r10, r5 @ invalid processor (r5=0)?
97 THUMB( it eq ) @ force fixup-able long branch encoding
98 beq __error_p @ yes, error 'p'
99
100#ifdef CONFIG_ARM_LPAE
101 mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0
102 and r3, r3,
103 cmp r3,
104 THUMB( it lo ) @ force fixup-able long branch encoding
105 blo __error_lpae @ only classic page table format
106#endif
107
108#ifndef CONFIG_XIP_KERNEL
109 adr r3, 2f
110 ldmia r3, {r4, r8}
111 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
112 add r8, r8, r4 @ PHYS_OFFSET
113#else
114 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
115#endif
116
117
118
119
120
121 bl __vet_atags
122#ifdef CONFIG_SMP_ON_UP
123 bl __fixup_smp
124#endif
125#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
126 bl __fixup_pv_table
127#endif
128 bl __create_page_tables
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149 ldr r13, =__mmap_switched @ address to jump to after
150 @ mmu has been enabled
151 badr lr, 1f @ return (PIC) address
152#ifdef CONFIG_ARM_LPAE
153 mov r5,
154 mov r8, r4, lsr
155#else
156 mov r8, r4 @ set TTBR1 to swapper_pg_dir
157#endif
158 ldr r12, [r10,
159 add r12, r12, r10
160 ret r12
1611: b __enable_mmu
162ENDPROC(stext)
163 .ltorg
164#ifndef CONFIG_XIP_KERNEL
1652: .long .
166 .long PAGE_OFFSET
167#endif
168
169
170
171
172
173
174
175
176
177
178
179
180__create_page_tables:
181 pgtbl r4, r8 @ page table address
182
183
184
185
186 mov r0, r4
187 mov r3,
188 add r6, r0,
1891: str r3, [r0],
190 str r3, [r0],
191 str r3, [r0],
192 str r3, [r0],
193 teq r0, r6
194 bne 1b
195
196#ifdef CONFIG_ARM_LPAE
197
198
199
200
201 mov r0, r4
202 add r3, r4,
203 orr r3, r3,
204 mov r6,
205 mov r7,
2061:
207#ifdef CONFIG_CPU_ENDIAN_BE8
208 str r7, [r0],
209 str r3, [r0],
210#else
211 str r3, [r0],
212 str r7, [r0],
213#endif
214 add r3, r3,
215 subs r6, r6,
216 bne 1b
217
218 add r4, r4,
219#ifdef CONFIG_CPU_ENDIAN_BE8
220 add r4, r4,
221#endif
222#endif
223
224 ldr r7, [r10,
225
226
227
228
229
230 adr r0, __turn_mmu_on_loc
231 ldmia r0, {r3, r5, r6}
232 sub r0, r0, r3 @ virt->phys offset
233 add r5, r5, r0 @ phys __turn_mmu_on
234 add r6, r6, r0 @ phys __turn_mmu_on_end
235 mov r5, r5, lsr
236 mov r6, r6, lsr
237
2381: orr r3, r7, r5, lsl
239 str r3, [r4, r5, lsl
240 cmp r5, r6
241 addlo r5, r5,
242 blo 1b
243
244
245
246
247 add r0, r4,
248 ldr r6, =(_end - 1)
249 orr r3, r8, r7
250 add r6, r4, r6, lsr
2511: str r3, [r0],
252 add r3, r3,
253 cmp r0, r6
254 bls 1b
255
256#ifdef CONFIG_XIP_KERNEL
257
258
259
260#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
261 mov r3, pc
262 mov r3, r3, lsr
263 orr r3, r7, r3, lsl
264 add r0, r4,
265 str r3, [r0,
266 ldr r6, =(_edata_loc - 1)
267 add r0, r0,
268 add r6, r4, r6, lsr
2691: cmp r0, r6
270 add r3, r3,
271 strls r3, [r0],
272 bls 1b
273#endif
274
275
276
277
278
279 mov r0, r2, lsr
280 movs r0, r0, lsl
281 subne r3, r0, r8
282 addne r3, r3,
283 addne r3, r4, r3, lsr
284 orrne r6, r7, r0
285 strne r6, [r3],
286 addne r6, r6,
287 strne r6, [r3]
288
289
290 sub r4, r4,
291 @ for 64-bit descriptors
292#endif
293
294#ifdef CONFIG_DEBUG_LL
295
296
297
298
299
300
301 addruart r7, r3, r0
302
303 mov r3, r3, lsr
304 mov r3, r3, lsl
305
306 add r0, r4, r3
307 mov r3, r7, lsr
308 ldr r7, [r10,
309 orr r3, r7, r3, lsl
310#ifdef CONFIG_ARM_LPAE
311 mov r7,
312#ifdef CONFIG_CPU_ENDIAN_BE8
313 str r7, [r0],
314 str r3, [r0],
315#else
316 str r3, [r0],
317 str r7, [r0],
318#endif
319#else
320 orr r3, r3,
321 str r3, [r0],
322#endif
323
324#else
325
326 ldr r7, [r10,
327#endif
328
329
330
331
332
333
334 add r0, r4,
335 orr r3, r7,
336 str r3, [r0]
337#endif
338#ifdef CONFIG_ARCH_RPC
339
340
341
342
343
344 add r0, r4,
345 orr r3, r7,
346 str r3, [r0]
347 add r0, r4,
348 str r3, [r0]
349#endif
350#endif
351#ifdef CONFIG_ARM_LPAE
352 sub r4, r4,
353#endif
354 ret lr
355ENDPROC(__create_page_tables)
356 .ltorg
357 .align
358__turn_mmu_on_loc:
359 .long .
360 .long __turn_mmu_on
361 .long __turn_mmu_on_end
362
363
364 .text
365 .arm
366ENTRY(secondary_startup_arm)
367 THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
368 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
369 THUMB( .thumb ) @ switch to Thumb now.
370 THUMB(1: )
371ENTRY(secondary_startup)
372
373
374
375
376
377
378
379
380 ARM_BE8(setend be) @ ensure we are in BE8 mode
381
382#ifdef CONFIG_ARM_VIRT_EXT
383 bl __hyp_stub_install_secondary
384#endif
385 safe_svcmode_maskall r9
386
387 mrc p15, 0, r9, c0, c0 @ get processor id
388 bl __lookup_processor_type
389 movs r10, r5 @ invalid processor?
390 moveq r0,
391 THUMB( it eq ) @ force fixup-able long branch encoding
392 beq __error_p
393
394
395
396
397 adr r4, __secondary_data
398 ldmia r4, {r5, r7, r12} @ address to jump to after
399 sub lr, r4, r5 @ mmu has been enabled
400 add r3, r7, lr
401 ldrd r4, [r3,
402ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
403ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
404ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
405 ldr r8, [r3,
406 badr lr, __enable_mmu @ return address
407 mov r13, r12 @ __secondary_switched address
408 ldr r12, [r10,
409 add r12, r12, r10 @ initialise processor
410 @ (return control reg)
411 ret r12
412ENDPROC(secondary_startup)
413ENDPROC(secondary_startup_arm)
414
415
416
417
418ENTRY(__secondary_switched)
419 ldr sp, [r7,
420 mov fp,
421 b secondary_start_kernel
422ENDPROC(__secondary_switched)
423
424 .align
425
426 .type __secondary_data, %object
427__secondary_data:
428 .long .
429 .long secondary_data
430 .long __secondary_switched
431#endif
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449__enable_mmu:
450
451 orr r0, r0,
452#else
453 bic r0, r0,
454#endif
455#ifdef CONFIG_CPU_DCACHE_DISABLE
456 bic r0, r0,
457#endif
458#ifdef CONFIG_CPU_BPREDICT_DISABLE
459 bic r0, r0,
460#endif
461#ifdef CONFIG_CPU_ICACHE_DISABLE
462 bic r0, r0,
463#endif
464#ifdef CONFIG_ARM_LPAE
465 mcrr p15, 0, r4, r5, c2 @ load TTBR0
466#else
467 mov r5,
468 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
469 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
470#endif
471 b __turn_mmu_on
472ENDPROC(__enable_mmu)
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488 .align 5
489 .pushsection .idmap.text, "ax"
490ENTRY(__turn_mmu_on)
491 mov r0, r0
492 instr_sync
493 mcr p15, 0, r0, c1, c0, 0 @ write control reg
494 mrc p15, 0, r3, c0, c0, 0 @ read id reg
495 instr_sync
496 mov r3, r3
497 mov r3, r13
498 ret r3
499__turn_mmu_on_end:
500ENDPROC(__turn_mmu_on)
501 .popsection
502
503
504#ifdef CONFIG_SMP_ON_UP
505 __HEAD
506__fixup_smp:
507 and r3, r9,
508 teq r3,
509 bne __fixup_smp_on_up @ no, assume UP
510
511 bic r3, r9,
512 bic r3, r3,
513 mov r4,
514 orr r4, r4,
515 orr r4, r4,
516 teq r3, r4 @ ARM 11MPCore?
517 reteq lr @ yes, assume SMP
518
519 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
520 and r0, r0,
521 teq r0,
522 bne __fixup_smp_on_up @ no, assume UP
523
524 @ Core indicates it is SMP. Check for Aegis SOC where a single
525 @ Cortex-A9 CPU is present but SMP operations fault.
526 mov r4,
527 orr r4, r4,
528 orr r4, r4,
529 teq r3, r4 @ Check for ARM Cortex-A9
530 retne lr @ Not ARM Cortex-A9,
531
532 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
533 @ below address check will need to be #ifdef'd or equivalent
534 @ for the Aegis platform.
535 mrc p15, 4, r0, c15, c0 @ get SCU base address
536 teq r0,
537 beq __fixup_smp_on_up @ So its an A9 UP
538 ldr r0, [r0,
539ARM_BE8(rev r0, r0) @ byteswap if big endian
540 and r0, r0,
541 teq r0,
542 retne lr
543
544__fixup_smp_on_up:
545 adr r0, 1f
546 ldmia r0, {r3 - r5}
547 sub r3, r0, r3
548 add r4, r4, r3
549 add r5, r5, r3
550 b __do_fixup_smp_on_up
551ENDPROC(__fixup_smp)
552
553 .align
5541: .word .
555 .word __smpalt_begin
556 .word __smpalt_end
557
558 .pushsection .data
559 .globl smp_on_up
560smp_on_up:
561 ALT_SMP(.long 1)
562 ALT_UP(.long 0)
563 .popsection
564#endif
565
566 .text
567__do_fixup_smp_on_up:
568 cmp r4, r5
569 reths lr
570 ldmia r4!, {r0, r6}
571 ARM( str r6, [r0, r3] )
572 THUMB( add r0, r0, r3 )
573#ifdef __ARMEB__
574 THUMB( mov r6, r6, ror
575#endif
576 THUMB( strh r6, [r0],
577 THUMB( mov r6, r6, lsr
578 THUMB( strh r6, [r0] )
579 b __do_fixup_smp_on_up
580ENDPROC(__do_fixup_smp_on_up)
581
582ENTRY(fixup_smp)
583 stmfd sp!, {r4 - r6, lr}
584 mov r4, r0
585 add r5, r0, r1
586 mov r3,
587 bl __do_fixup_smp_on_up
588 ldmfd sp!, {r4 - r6, pc}
589ENDPROC(fixup_smp)
590
591#ifdef __ARMEB__
592#define LOW_OFFSET 0x4
593#define HIGH_OFFSET 0x0
594#else
595#define LOW_OFFSET 0x0
596#define HIGH_OFFSET 0x4
597#endif
598
599#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
600
601
602
603
604
605
606 __HEAD
607__fixup_pv_table:
608 adr r0, 1f
609 ldmia r0, {r3-r7}
610 mvn ip,
611 subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
612 add r4, r4, r3 @ adjust table start address
613 add r5, r5, r3 @ adjust table end address
614 add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
615 add r7, r7, r3 @ adjust __pv_offset address
616 mov r0, r8, lsr
617 str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
618 strcc ip, [r7,
619 mov r6, r3, lsr
620 teq r3, r6, lsl
621THUMB( it ne @ cross section branch )
622 bne __error
623 str r3, [r7,
624 b __fixup_a_pv_table
625ENDPROC(__fixup_pv_table)
626
627 .align
6281: .long .
629 .long __pv_table_begin
630 .long __pv_table_end
6312: .long __pv_phys_pfn_offset
632 .long __pv_offset
633
634 .text
635__fixup_a_pv_table:
636 adr r0, 3f
637 ldr r6, [r0]
638 add r6, r6, r3
639 ldr r0, [r6,
640 ldr r6, [r6,
641 mov r6, r6, lsr
642 cmn r0,
643#ifdef CONFIG_THUMB2_KERNEL
644 moveq r0,
645 lsls r6,
646 beq 2f
647 clz r7, r6
648 lsr r6,
649 lsl r6, r7
650 bic r6,
651 lsrs r7,
652 orrcs r6,
653 orr r6, r6, r7, lsl
654 orr r6,
655 b 2f
6561: add r7, r3
657 ldrh ip, [r7,
658ARM_BE8(rev16 ip, ip)
659 tst ip,
660 and ip,
661 orrne ip, r6 @ mask in offset bits 31-24
662 orreq ip, r0 @ mask in offset bits 7-0
663ARM_BE8(rev16 ip, ip)
664 strh ip, [r7,
665 bne 2f
666 ldrh ip, [r7]
667ARM_BE8(rev16 ip, ip)
668 bic ip,
669 orr ip, ip, r0, lsr
670ARM_BE8(rev16 ip, ip)
671 strh ip, [r7]
6722: cmp r4, r5
673 ldrcc r7, [r4],
674 bcc 1b
675 bx lr
676#else
677#ifdef CONFIG_CPU_ENDIAN_BE8
678 moveq r0,
679#else
680 moveq r0,
681#endif
682 b 2f
6831: ldr ip, [r7, r3]
684#ifdef CONFIG_CPU_ENDIAN_BE8
685 @ in BE8, we load data in BE, but instructions still in LE
686 bic ip, ip,
687 tst ip,
688 orrne ip, ip, r6, lsl
689 biceq ip, ip,
690 orreq ip, ip, r0 @ mask in offset bits 7-0
691#else
692 bic ip, ip,
693 tst ip,
694 orrne ip, ip, r6 @ mask in offset bits 31-24
695 biceq ip, ip,
696 orreq ip, ip, r0 @ mask in offset bits 7-0
697#endif
698 str ip, [r7, r3]
6992: cmp r4, r5
700 ldrcc r7, [r4],
701 bcc 1b
702 ret lr
703#endif
704ENDPROC(__fixup_a_pv_table)
705
706 .align
7073: .long __pv_offset
708
709ENTRY(fixup_pv_table)
710 stmfd sp!, {r4 - r7, lr}
711 mov r3,
712 mov r4, r0 @ r0 = table start
713 add r5, r0, r1 @ r1 = table size
714 bl __fixup_a_pv_table
715 ldmfd sp!, {r4 - r7, pc}
716ENDPROC(fixup_pv_table)
717
718 .data
719 .globl __pv_phys_pfn_offset
720 .type __pv_phys_pfn_offset, %object
721__pv_phys_pfn_offset:
722 .word 0
723 .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
724
725 .globl __pv_offset
726 .type __pv_offset, %object
727__pv_offset:
728 .quad 0
729 .size __pv_offset, . -__pv_offset
730#endif
731
732#include "head-common.S"
733