1
2
3
4
5
6
7
8
9
10
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <linux/pgtable.h>
14
15#include <asm/assembler.h>
16#include <asm/cp15.h>
17#include <asm/domain.h>
18#include <asm/ptrace.h>
19#include <asm/asm-offsets.h>
20#include <asm/memory.h>
21#include <asm/thread_info.h>
22
23
24
25#endif
26
27
28
29
30
31
32
33#define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET)
34
35
36#endif
37
38#ifdef CONFIG_ARM_LPAE
39
40#define PG_DIR_SIZE 0x5000
41#define PMD_ORDER 3
42#else
43#define PG_DIR_SIZE 0x4000
44#define PMD_ORDER 2
45#endif
46
47 .globl swapper_pg_dir
48 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
49
50
51
52
53
54
55 .pushsection .data
56 .align 2
57 .globl kernel_sec_start
58 .globl kernel_sec_end
59kernel_sec_start:
60 .long 0
61 .long 0
62kernel_sec_end:
63 .long 0
64 .long 0
65 .popsection
66
67 .macro pgtbl, rd, phys
68 add \rd, \phys,
69 sub \rd, \rd,
70 .endm
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90 .arm
91
92 __HEAD
93ENTRY(stext)
94 ARM_BE8(setend be ) @ ensure we are in BE8 mode
95
96 THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
97 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
98 THUMB( .thumb ) @ switch to Thumb now.
99 THUMB(1: )
100
101#ifdef CONFIG_ARM_VIRT_EXT
102 bl __hyp_stub_install
103#endif
104 @ ensure svc mode and all interrupts masked
105 safe_svcmode_maskall r9
106
107 mrc p15, 0, r9, c0, c0 @ get processor id
108 bl __lookup_processor_type @ r5=procinfo r9=cpuid
109 movs r10, r5 @ invalid processor (r5=0)?
110 THUMB( it eq ) @ force fixup-able long branch encoding
111 beq __error_p @ yes, error 'p'
112
113#ifdef CONFIG_ARM_LPAE
114 mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0
115 and r3, r3,
116 cmp r3,
117 THUMB( it lo ) @ force fixup-able long branch encoding
118 blo __error_lpae @ only classic page table format
119#endif
120
121#ifndef CONFIG_XIP_KERNEL
122 adr_l r8, _text @ __pa(_text)
123 sub r8, r8,
124#else
125 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
126#endif
127
128
129
130
131
132 bl __vet_atags
133#ifdef CONFIG_SMP_ON_UP
134 bl __fixup_smp
135#endif
136#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
137 bl __fixup_pv_table
138#endif
139 bl __create_page_tables
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160 ldr r13, =__mmap_switched @ address to jump to after
161 @ mmu has been enabled
162 badr lr, 1f @ return (PIC) address
163#ifdef CONFIG_ARM_LPAE
164 mov r5,
165 mov r8, r4, lsr
166#else
167 mov r8, r4 @ set TTBR1 to swapper_pg_dir
168#endif
169 ldr r12, [r10,
170 add r12, r12, r10
171 ret r12
1721: b __enable_mmu
173ENDPROC(stext)
174 .ltorg
175
176
177
178
179
180
181
182
183
184
185
186
187__create_page_tables:
188 pgtbl r4, r8 @ page table address
189
190
191
192
193 mov r0, r4
194 mov r3,
195 add r6, r0,
1961: str r3, [r0],
197 str r3, [r0],
198 str r3, [r0],
199 str r3, [r0],
200 teq r0, r6
201 bne 1b
202
203#ifdef CONFIG_ARM_LPAE
204
205
206
207
208 mov r0, r4
209 add r3, r4,
210 orr r3, r3,
211 mov r6,
212 mov r7,
2131:
214#ifdef CONFIG_CPU_ENDIAN_BE8
215 str r7, [r0],
216 str r3, [r0],
217#else
218 str r3, [r0],
219 str r7, [r0],
220#endif
221 add r3, r3,
222 subs r6, r6,
223 bne 1b
224
225 add r4, r4,
226#ifdef CONFIG_CPU_ENDIAN_BE8
227 add r4, r4,
228#endif
229#endif
230
231 ldr r7, [r10,
232
233
234
235
236
237 adr_l r5, __turn_mmu_on @ _pa(__turn_mmu_on)
238 adr_l r6, __turn_mmu_on_end @ _pa(__turn_mmu_on_end)
239 mov r5, r5, lsr
240 mov r6, r6, lsr
241
2421: orr r3, r7, r5, lsl
243 str r3, [r4, r5, lsl
244 cmp r5, r6
245 addlo r5, r5,
246 blo 1b
247
248
249
250
251
252
253 add r0, r4,
254 ldr r6, =(_end - 1)
255 adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
256
257 str r8, [r5,
258#else
259 str r8, [r5] @ Save physical start of kernel (LE)
260#endif
261 orr r3, r8, r7 @ Add the MMU flags
262 add r6, r4, r6, lsr
2631: str r3, [r0],
264 add r3, r3,
265 cmp r0, r6
266 bls 1b
267 eor r3, r3, r7 @ Remove the MMU flags
268 adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
269
270 str r3, [r5,
271#else
272 str r3, [r5] @ Save physical end of kernel (LE)
273#endif
274
275#ifdef CONFIG_XIP_KERNEL
276
277
278
279#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
280 mov r3, pc
281 mov r3, r3, lsr
282 orr r3, r7, r3, lsl
283 add r0, r4,
284 str r3, [r0,
285 ldr r6, =(_edata_loc - 1)
286 add r0, r0,
287 add r6, r4, r6, lsr
2881: cmp r0, r6
289 add r3, r3,
290 strls r3, [r0],
291 bls 1b
292#endif
293
294
295
296
297
298 mov r0, r2, lsr
299 cmp r2,
300 ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
301 addne r3, r3, r4
302 orrne r6, r7, r0, lsl
303 strne r6, [r3],
304 addne r6, r6,
305 strne r6, [r3]
306
307
308 sub r4, r4,
309 @ for 64-bit descriptors
310#endif
311
312#ifdef CONFIG_DEBUG_LL
313
314
315
316
317
318
319 addruart r7, r3, r0
320
321 mov r3, r3, lsr
322 mov r3, r3, lsl
323
324 add r0, r4, r3
325 mov r3, r7, lsr
326 ldr r7, [r10,
327 orr r3, r7, r3, lsl
328#ifdef CONFIG_ARM_LPAE
329 mov r7,
330#ifdef CONFIG_CPU_ENDIAN_BE8
331 str r7, [r0],
332 str r3, [r0],
333#else
334 str r3, [r0],
335 str r7, [r0],
336#endif
337#else
338 orr r3, r3,
339 str r3, [r0],
340#endif
341
342#else
343
344 ldr r7, [r10,
345#endif
346
347
348
349
350
351
352 add r0, r4,
353 orr r3, r7,
354 str r3, [r0]
355#endif
356#ifdef CONFIG_ARCH_RPC
357
358
359
360
361
362 add r0, r4,
363 orr r3, r7,
364 str r3, [r0]
365 add r0, r4,
366 str r3, [r0]
367#endif
368#endif
369#ifdef CONFIG_ARM_LPAE
370 sub r4, r4,
371#endif
372 ret lr
373ENDPROC(__create_page_tables)
374 .ltorg
375
376
377 .text
378 .arm
379ENTRY(secondary_startup_arm)
380 THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
381 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
382 THUMB( .thumb ) @ switch to Thumb now.
383 THUMB(1: )
384ENTRY(secondary_startup)
385
386
387
388
389
390
391
392
393 ARM_BE8(setend be) @ ensure we are in BE8 mode
394
395#ifdef CONFIG_ARM_VIRT_EXT
396 bl __hyp_stub_install_secondary
397#endif
398 safe_svcmode_maskall r9
399
400 mrc p15, 0, r9, c0, c0 @ get processor id
401 bl __lookup_processor_type
402 movs r10, r5 @ invalid processor?
403 moveq r0,
404 THUMB( it eq ) @ force fixup-able long branch encoding
405 beq __error_p
406
407
408
409
410 adr_l r3, secondary_data
411 mov_l r12, __secondary_switched
412 ldrd r4, r5, [r3,
413ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
414ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
415ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
416 ldr r8, [r3,
417 badr lr, __enable_mmu @ return address
418 mov r13, r12 @ __secondary_switched address
419 ldr r12, [r10,
420 add r12, r12, r10 @ initialise processor
421 @ (return control reg)
422 ret r12
423ENDPROC(secondary_startup)
424ENDPROC(secondary_startup_arm)
425
426ENTRY(__secondary_switched)
427
428 @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
429 @ as the ID map does not cover the vmalloc region.
430 mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
431 mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
432 instr_sync
433#endif
434 adr_l r7, secondary_data + 12 @ get secondary_data.stack
435 ldr sp, [r7]
436 ldr r0, [r7,
437 mov fp,
438 b secondary_start_kernel
439ENDPROC(__secondary_switched)
440
441#endif
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459__enable_mmu:
460
461 orr r0, r0,
462#else
463 bic r0, r0,
464#endif
465#ifdef CONFIG_CPU_DCACHE_DISABLE
466 bic r0, r0,
467#endif
468#ifdef CONFIG_CPU_BPREDICT_DISABLE
469 bic r0, r0,
470#endif
471#ifdef CONFIG_CPU_ICACHE_DISABLE
472 bic r0, r0,
473#endif
474#ifdef CONFIG_ARM_LPAE
475 mcrr p15, 0, r4, r5, c2 @ load TTBR0
476#else
477 mov r5,
478 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
479 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
480#endif
481 b __turn_mmu_on
482ENDPROC(__enable_mmu)
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498 .align 5
499 .pushsection .idmap.text, "ax"
500ENTRY(__turn_mmu_on)
501 mov r0, r0
502 instr_sync
503 mcr p15, 0, r0, c1, c0, 0 @ write control reg
504 mrc p15, 0, r3, c0, c0, 0 @ read id reg
505 instr_sync
506 mov r3, r3
507 mov r3, r13
508 ret r3
509__turn_mmu_on_end:
510ENDPROC(__turn_mmu_on)
511 .popsection
512
513
514#ifdef CONFIG_SMP_ON_UP
515 __HEAD
516__fixup_smp:
517 and r3, r9,
518 teq r3,
519 bne __fixup_smp_on_up @ no, assume UP
520
521 bic r3, r9,
522 bic r3, r3,
523 mov r4,
524 orr r4, r4,
525 orr r4, r4,
526 teq r3, r4 @ ARM 11MPCore?
527 reteq lr @ yes, assume SMP
528
529 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
530 and r0, r0,
531 teq r0,
532 bne __fixup_smp_on_up @ no, assume UP
533
534 @ Core indicates it is SMP. Check for Aegis SOC where a single
535 @ Cortex-A9 CPU is present but SMP operations fault.
536 mov r4,
537 orr r4, r4,
538 orr r4, r4,
539 teq r3, r4 @ Check for ARM Cortex-A9
540 retne lr @ Not ARM Cortex-A9,
541
542 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
543 @ below address check will need to be #ifdef'd or equivalent
544 @ for the Aegis platform.
545 mrc p15, 4, r0, c15, c0 @ get SCU base address
546 teq r0,
547 beq __fixup_smp_on_up @ So its an A9 UP
548 ldr r0, [r0,
549ARM_BE8(rev r0, r0) @ byteswap if big endian
550 and r0, r0,
551 teq r0,
552 retne lr
553
554__fixup_smp_on_up:
555 adr_l r4, __smpalt_begin
556 adr_l r5, __smpalt_end
557 b __do_fixup_smp_on_up
558ENDPROC(__fixup_smp)
559
560 .pushsection .data
561 .align 2
562 .globl smp_on_up
563smp_on_up:
564 ALT_SMP(.long 1)
565 ALT_UP(.long 0)
566 .popsection
567#endif
568
569 .text
570__do_fixup_smp_on_up:
571 cmp r4, r5
572 reths lr
573 ldmia r4, {r0, r6}
574 ARM( str r6, [r0, r4] )
575 THUMB( add r0, r0, r4 )
576 add r4, r4,
577#ifdef __ARMEB__
578 THUMB( mov r6, r6, ror
579#endif
580 THUMB( strh r6, [r0],
581 THUMB( mov r6, r6, lsr
582 THUMB( strh r6, [r0] )
583 b __do_fixup_smp_on_up
584ENDPROC(__do_fixup_smp_on_up)
585
586ENTRY(fixup_smp)
587 stmfd sp!, {r4 - r6, lr}
588 mov r4, r0
589 add r5, r0, r1
590 bl __do_fixup_smp_on_up
591 ldmfd sp!, {r4 - r6, pc}
592ENDPROC(fixup_smp)
593
594#include "head-common.S"
595