1
2
3
4
5
6
7
8
9
10
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <linux/pgtable.h>
14
15#include <asm/assembler.h>
16#include <asm/cp15.h>
17#include <asm/domain.h>
18#include <asm/ptrace.h>
19#include <asm/asm-offsets.h>
20#include <asm/memory.h>
21#include <asm/thread_info.h>
22
23
24
25#endif
26
27
28
29
30
31
32
33
34#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
35
36
37#endif
38
39#ifdef CONFIG_ARM_LPAE
40
41#define PG_DIR_SIZE 0x5000
42#define PMD_ORDER 3
43#else
44#define PG_DIR_SIZE 0x4000
45#define PMD_ORDER 2
46#endif
47
48 .globl swapper_pg_dir
49 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
50
51 .macro pgtbl, rd, phys
52 add \rd, \phys,
53 sub \rd, \rd,
54 .endm
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74 .arm
75
76 __HEAD
77ENTRY(stext)
78 ARM_BE8(setend be ) @ ensure we are in BE8 mode
79
80 THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
81 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
82 THUMB( .thumb ) @ switch to Thumb now.
83 THUMB(1: )
84
85#ifdef CONFIG_ARM_VIRT_EXT
86 bl __hyp_stub_install
87#endif
88 @ ensure svc mode and all interrupts masked
89 safe_svcmode_maskall r9
90
91 mrc p15, 0, r9, c0, c0 @ get processor id
92 bl __lookup_processor_type @ r5=procinfo r9=cpuid
93 movs r10, r5 @ invalid processor (r5=0)?
94 THUMB( it eq ) @ force fixup-able long branch encoding
95 beq __error_p @ yes, error 'p'
96
97#ifdef CONFIG_ARM_LPAE
98 mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0
99 and r3, r3,
100 cmp r3,
101 THUMB( it lo ) @ force fixup-able long branch encoding
102 blo __error_lpae @ only classic page table format
103#endif
104
105#ifndef CONFIG_XIP_KERNEL
106 adr r3, 2f
107 ldmia r3, {r4, r8}
108 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
109 add r8, r8, r4 @ PHYS_OFFSET
110#else
111 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
112#endif
113
114
115
116
117
118 bl __vet_atags
119#ifdef CONFIG_SMP_ON_UP
120 bl __fixup_smp
121#endif
122#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
123 bl __fixup_pv_table
124#endif
125 bl __create_page_tables
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146 ldr r13, =__mmap_switched @ address to jump to after
147 @ mmu has been enabled
148 badr lr, 1f @ return (PIC) address
149#ifdef CONFIG_ARM_LPAE
150 mov r5,
151 mov r8, r4, lsr
152#else
153 mov r8, r4 @ set TTBR1 to swapper_pg_dir
154#endif
155 ldr r12, [r10,
156 add r12, r12, r10
157 ret r12
1581: b __enable_mmu
159ENDPROC(stext)
160 .ltorg
161#ifndef CONFIG_XIP_KERNEL
1622: .long .
163 .long PAGE_OFFSET
164#endif
165
166
167
168
169
170
171
172
173
174
175
176
177__create_page_tables:
178 pgtbl r4, r8 @ page table address
179
180
181
182
183 mov r0, r4
184 mov r3,
185 add r6, r0,
1861: str r3, [r0],
187 str r3, [r0],
188 str r3, [r0],
189 str r3, [r0],
190 teq r0, r6
191 bne 1b
192
193#ifdef CONFIG_ARM_LPAE
194
195
196
197
198 mov r0, r4
199 add r3, r4,
200 orr r3, r3,
201 mov r6,
202 mov r7,
2031:
204#ifdef CONFIG_CPU_ENDIAN_BE8
205 str r7, [r0],
206 str r3, [r0],
207#else
208 str r3, [r0],
209 str r7, [r0],
210#endif
211 add r3, r3,
212 subs r6, r6,
213 bne 1b
214
215 add r4, r4,
216#ifdef CONFIG_CPU_ENDIAN_BE8
217 add r4, r4,
218#endif
219#endif
220
221 ldr r7, [r10,
222
223
224
225
226
227 adr r0, __turn_mmu_on_loc
228 ldmia r0, {r3, r5, r6}
229 sub r0, r0, r3 @ virt->phys offset
230 add r5, r5, r0 @ phys __turn_mmu_on
231 add r6, r6, r0 @ phys __turn_mmu_on_end
232 mov r5, r5, lsr
233 mov r6, r6, lsr
234
2351: orr r3, r7, r5, lsl
236 str r3, [r4, r5, lsl
237 cmp r5, r6
238 addlo r5, r5,
239 blo 1b
240
241
242
243
244 add r0, r4,
245 ldr r6, =(_end - 1)
246 orr r3, r8, r7
247 add r6, r4, r6, lsr
2481: str r3, [r0],
249 add r3, r3,
250 cmp r0, r6
251 bls 1b
252
253#ifdef CONFIG_XIP_KERNEL
254
255
256
257#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
258 mov r3, pc
259 mov r3, r3, lsr
260 orr r3, r7, r3, lsl
261 add r0, r4,
262 str r3, [r0,
263 ldr r6, =(_edata_loc - 1)
264 add r0, r0,
265 add r6, r4, r6, lsr
2661: cmp r0, r6
267 add r3, r3,
268 strls r3, [r0],
269 bls 1b
270#endif
271
272
273
274
275
276 mov r0, r2, lsr
277 movs r0, r0, lsl
278 subne r3, r0, r8
279 addne r3, r3,
280 addne r3, r4, r3, lsr
281 orrne r6, r7, r0
282 strne r6, [r3],
283 addne r6, r6,
284 strne r6, [r3]
285
286
287 sub r4, r4,
288 @ for 64-bit descriptors
289#endif
290
291#ifdef CONFIG_DEBUG_LL
292
293
294
295
296
297
298 addruart r7, r3, r0
299
300 mov r3, r3, lsr
301 mov r3, r3, lsl
302
303 add r0, r4, r3
304 mov r3, r7, lsr
305 ldr r7, [r10,
306 orr r3, r7, r3, lsl
307#ifdef CONFIG_ARM_LPAE
308 mov r7,
309#ifdef CONFIG_CPU_ENDIAN_BE8
310 str r7, [r0],
311 str r3, [r0],
312#else
313 str r3, [r0],
314 str r7, [r0],
315#endif
316#else
317 orr r3, r3,
318 str r3, [r0],
319#endif
320
321#else
322
323 ldr r7, [r10,
324#endif
325
326
327
328
329
330
331 add r0, r4,
332 orr r3, r7,
333 str r3, [r0]
334#endif
335#ifdef CONFIG_ARCH_RPC
336
337
338
339
340
341 add r0, r4,
342 orr r3, r7,
343 str r3, [r0]
344 add r0, r4,
345 str r3, [r0]
346#endif
347#endif
348#ifdef CONFIG_ARM_LPAE
349 sub r4, r4,
350#endif
351 ret lr
352ENDPROC(__create_page_tables)
353 .ltorg
354 .align
355__turn_mmu_on_loc:
356 .long .
357 .long __turn_mmu_on
358 .long __turn_mmu_on_end
359
360
361 .text
362 .arm
363ENTRY(secondary_startup_arm)
364 THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
365 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
366 THUMB( .thumb ) @ switch to Thumb now.
367 THUMB(1: )
368ENTRY(secondary_startup)
369
370
371
372
373
374
375
376
377 ARM_BE8(setend be) @ ensure we are in BE8 mode
378
379#ifdef CONFIG_ARM_VIRT_EXT
380 bl __hyp_stub_install_secondary
381#endif
382 safe_svcmode_maskall r9
383
384 mrc p15, 0, r9, c0, c0 @ get processor id
385 bl __lookup_processor_type
386 movs r10, r5 @ invalid processor?
387 moveq r0,
388 THUMB( it eq ) @ force fixup-able long branch encoding
389 beq __error_p
390
391
392
393
394 adr r4, __secondary_data
395 ldmia r4, {r5, r7, r12} @ address to jump to after
396 sub lr, r4, r5 @ mmu has been enabled
397 add r3, r7, lr
398 ldrd r4, r5, [r3,
399ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
400ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
401ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
402 ldr r8, [r3,
403 badr lr, __enable_mmu @ return address
404 mov r13, r12 @ __secondary_switched address
405 ldr r12, [r10,
406 add r12, r12, r10 @ initialise processor
407 @ (return control reg)
408 ret r12
409ENDPROC(secondary_startup)
410ENDPROC(secondary_startup_arm)
411
412
413
414
415ENTRY(__secondary_switched)
416 ldr sp, [r7,
417 mov fp,
418 b secondary_start_kernel
419ENDPROC(__secondary_switched)
420
421 .align
422
423 .type __secondary_data, %object
424__secondary_data:
425 .long .
426 .long secondary_data
427 .long __secondary_switched
428#endif
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446__enable_mmu:
447
448 orr r0, r0,
449#else
450 bic r0, r0,
451#endif
452#ifdef CONFIG_CPU_DCACHE_DISABLE
453 bic r0, r0,
454#endif
455#ifdef CONFIG_CPU_BPREDICT_DISABLE
456 bic r0, r0,
457#endif
458#ifdef CONFIG_CPU_ICACHE_DISABLE
459 bic r0, r0,
460#endif
461#ifdef CONFIG_ARM_LPAE
462 mcrr p15, 0, r4, r5, c2 @ load TTBR0
463#else
464 mov r5,
465 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
466 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
467#endif
468 b __turn_mmu_on
469ENDPROC(__enable_mmu)
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485 .align 5
486 .pushsection .idmap.text, "ax"
487ENTRY(__turn_mmu_on)
488 mov r0, r0
489 instr_sync
490 mcr p15, 0, r0, c1, c0, 0 @ write control reg
491 mrc p15, 0, r3, c0, c0, 0 @ read id reg
492 instr_sync
493 mov r3, r3
494 mov r3, r13
495 ret r3
496__turn_mmu_on_end:
497ENDPROC(__turn_mmu_on)
498 .popsection
499
500
501#ifdef CONFIG_SMP_ON_UP
502 __HEAD
503__fixup_smp:
504 and r3, r9,
505 teq r3,
506 bne __fixup_smp_on_up @ no, assume UP
507
508 bic r3, r9,
509 bic r3, r3,
510 mov r4,
511 orr r4, r4,
512 orr r4, r4,
513 teq r3, r4 @ ARM 11MPCore?
514 reteq lr @ yes, assume SMP
515
516 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
517 and r0, r0,
518 teq r0,
519 bne __fixup_smp_on_up @ no, assume UP
520
521 @ Core indicates it is SMP. Check for Aegis SOC where a single
522 @ Cortex-A9 CPU is present but SMP operations fault.
523 mov r4,
524 orr r4, r4,
525 orr r4, r4,
526 teq r3, r4 @ Check for ARM Cortex-A9
527 retne lr @ Not ARM Cortex-A9,
528
529 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
530 @ below address check will need to be #ifdef'd or equivalent
531 @ for the Aegis platform.
532 mrc p15, 4, r0, c15, c0 @ get SCU base address
533 teq r0,
534 beq __fixup_smp_on_up @ So its an A9 UP
535 ldr r0, [r0,
536ARM_BE8(rev r0, r0) @ byteswap if big endian
537 and r0, r0,
538 teq r0,
539 retne lr
540
541__fixup_smp_on_up:
542 adr r0, 1f
543 ldmia r0, {r3 - r5}
544 sub r3, r0, r3
545 add r4, r4, r3
546 add r5, r5, r3
547 b __do_fixup_smp_on_up
548ENDPROC(__fixup_smp)
549
550 .align
5511: .word .
552 .word __smpalt_begin
553 .word __smpalt_end
554
555 .pushsection .data
556 .align 2
557 .globl smp_on_up
558smp_on_up:
559 ALT_SMP(.long 1)
560 ALT_UP(.long 0)
561 .popsection
562#endif
563
564 .text
565__do_fixup_smp_on_up:
566 cmp r4, r5
567 reths lr
568 ldmia r4!, {r0, r6}
569 ARM( str r6, [r0, r3] )
570 THUMB( add r0, r0, r3 )
571#ifdef __ARMEB__
572 THUMB( mov r6, r6, ror
573#endif
574 THUMB( strh r6, [r0],
575 THUMB( mov r6, r6, lsr
576 THUMB( strh r6, [r0] )
577 b __do_fixup_smp_on_up
578ENDPROC(__do_fixup_smp_on_up)
579
580ENTRY(fixup_smp)
581 stmfd sp!, {r4 - r6, lr}
582 mov r4, r0
583 add r5, r0, r1
584 mov r3,
585 bl __do_fixup_smp_on_up
586 ldmfd sp!, {r4 - r6, pc}
587ENDPROC(fixup_smp)
588
589#ifdef __ARMEB__
590#define LOW_OFFSET 0x4
591#define HIGH_OFFSET 0x0
592#else
593#define LOW_OFFSET 0x0
594#define HIGH_OFFSET 0x4
595#endif
596
597#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
598
599
600
601
602
603
604 __HEAD
605__fixup_pv_table:
606 adr r0, 1f
607 ldmia r0, {r3-r7}
608 mvn ip,
609 subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
610 add r4, r4, r3 @ adjust table start address
611 add r5, r5, r3 @ adjust table end address
612 add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
613 add r7, r7, r3 @ adjust __pv_offset address
614 mov r0, r8, lsr
615 str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
616 strcc ip, [r7,
617 mov r6, r3, lsr
618 teq r3, r6, lsl
619THUMB( it ne @ cross section branch )
620 bne __error
621 str r3, [r7,
622 b __fixup_a_pv_table
623ENDPROC(__fixup_pv_table)
624
625 .align
6261: .long .
627 .long __pv_table_begin
628 .long __pv_table_end
6292: .long __pv_phys_pfn_offset
630 .long __pv_offset
631
632 .text
633__fixup_a_pv_table:
634 adr r0, 3f
635 ldr r6, [r0]
636 add r6, r6, r3
637 ldr r0, [r6,
638 ldr r6, [r6,
639 mov r6, r6, lsr
640 cmn r0,
641#ifdef CONFIG_THUMB2_KERNEL
642 moveq r0,
643 lsls r6,
644 beq 2f
645 clz r7, r6
646 lsr r6,
647 lsl r6, r7
648 bic r6,
649 lsrs r7,
650 orrcs r6,
651 orr r6, r6, r7, lsl
652 orr r6,
653 b 2f
6541: add r7, r3
655 ldrh ip, [r7,
656ARM_BE8(rev16 ip, ip)
657 tst ip,
658 and ip,
659 orrne ip, r6 @ mask in offset bits 31-24
660 orreq ip, r0 @ mask in offset bits 7-0
661ARM_BE8(rev16 ip, ip)
662 strh ip, [r7,
663 bne 2f
664 ldrh ip, [r7]
665ARM_BE8(rev16 ip, ip)
666 bic ip,
667 orr ip, ip, r0, lsr
668ARM_BE8(rev16 ip, ip)
669 strh ip, [r7]
6702: cmp r4, r5
671 ldrcc r7, [r4],
672 bcc 1b
673 bx lr
674#else
675#ifdef CONFIG_CPU_ENDIAN_BE8
676 moveq r0,
677#else
678 moveq r0,
679#endif
680 b 2f
6811: ldr ip, [r7, r3]
682#ifdef CONFIG_CPU_ENDIAN_BE8
683 @ in BE8, we load data in BE, but instructions still in LE
684 bic ip, ip,
685 tst ip,
686 orrne ip, ip, r6, lsl
687 biceq ip, ip,
688 orreq ip, ip, r0 @ mask in offset bits 7-0
689#else
690 bic ip, ip,
691 tst ip,
692 orrne ip, ip, r6 @ mask in offset bits 31-24
693 biceq ip, ip,
694 orreq ip, ip, r0 @ mask in offset bits 7-0
695#endif
696 str ip, [r7, r3]
6972: cmp r4, r5
698 ldrcc r7, [r4],
699 bcc 1b
700 ret lr
701#endif
702ENDPROC(__fixup_a_pv_table)
703
704 .align
7053: .long __pv_offset
706
707ENTRY(fixup_pv_table)
708 stmfd sp!, {r4 - r7, lr}
709 mov r3,
710 mov r4, r0 @ r0 = table start
711 add r5, r0, r1 @ r1 = table size
712 bl __fixup_a_pv_table
713 ldmfd sp!, {r4 - r7, pc}
714ENDPROC(fixup_pv_table)
715
716 .data
717 .align 2
718 .globl __pv_phys_pfn_offset
719 .type __pv_phys_pfn_offset, %object
720__pv_phys_pfn_offset:
721 .word 0
722 .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
723
724 .globl __pv_offset
725 .type __pv_offset, %object
726__pv_offset:
727 .quad 0
728 .size __pv_offset, . -__pv_offset
729#endif
730
731#include "head-common.S"
732