1
2
3
4
5
6
7
8
9
10
11#include <asm/unistd.h>
12#include <asm/ftrace.h>
13#include <asm/unwind.h>
14
15#ifdef CONFIG_NEED_RET_TO_USER
16#include <mach/entry-macro.S>
17#else
18 .macro arch_ret_to_user, tmp1, tmp2
19 .endm
20#endif
21
22#include "entry-header.S"
23
24
25 .align 5
26
27
28
29
30
31ret_fast_syscall:
32 UNWIND(.fnstart )
33 UNWIND(.cantunwind )
34 disable_irq @ disable interrupts
35 ldr r1, [tsk,
36 tst r1,
37 bne fast_work_pending
38 asm_trace_hardirqs_on
39
40
41 arch_ret_to_user r1, lr
42 ct_user_enter
43
44 restore_user_regs fast = 1, offset = S_OFF
45 UNWIND(.fnend )
46
47
48
49
50fast_work_pending:
51 str r0, [sp,
52work_pending:
53 mov r0, sp @ 'regs'
54 mov r2, why @ 'syscall'
55 bl do_work_pending
56 cmp r0,
57 beq no_work_pending
58 movlt scno,
59 ldmia sp, {r0 - r6} @ have to reload r0 - r6
60 b local_restart @ ... and off we go
61
62
63
64
65ENTRY(ret_to_user)
66ret_slow_syscall:
67 disable_irq @ disable interrupts
68ENTRY(ret_to_user_from_irq)
69 ldr r1, [tsk,
70 tst r1,
71 bne work_pending
72no_work_pending:
73 asm_trace_hardirqs_on
74
75
76 arch_ret_to_user r1, lr
77 ct_user_enter save = 0
78
79 restore_user_regs fast = 0, offset = 0
80ENDPROC(ret_to_user_from_irq)
81ENDPROC(ret_to_user)
82
83
84
85
86ENTRY(ret_from_fork)
87 bl schedule_tail
88 cmp r5,
89 movne r0, r4
90 adrne lr, BSYM(1f)
91 movne pc, r5
921: get_thread_info tsk
93 b ret_slow_syscall
94ENDPROC(ret_from_fork)
95
96 .equ NR_syscalls,0
97#define CALL(x) .equ NR_syscalls,NR_syscalls+1
98#include "calls.S"
99
100
101
102
103
104.ifne NR_syscalls - __NR_syscalls
105.error "__NR_syscalls is not equal to the size of the syscall table"
106.endif
107
108#undef CALL
109#define CALL(x) .long x
110
111#ifdef CONFIG_FUNCTION_TRACER
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153#ifndef CONFIG_OLD_MCOUNT
154
155
156#endif
157#endif
158
159.macro mcount_adjust_addr rd, rn
160 bic \rd, \rn,
161 sub \rd, \rd,
162.endm
163
164.macro __mcount suffix
165 mcount_enter
166 ldr r0, =ftrace_trace_function
167 ldr r2, [r0]
168 adr r0, .Lftrace_stub
169 cmp r0, r2
170 bne 1f
171
172#ifdef CONFIG_FUNCTION_GRAPH_TRACER
173 ldr r1, =ftrace_graph_return
174 ldr r2, [r1]
175 cmp r0, r2
176 bne ftrace_graph_caller\suffix
177
178 ldr r1, =ftrace_graph_entry
179 ldr r2, [r1]
180 ldr r0, =ftrace_graph_entry_stub
181 cmp r0, r2
182 bne ftrace_graph_caller\suffix
183#endif
184
185 mcount_exit
186
1871: mcount_get_lr r1 @ lr of instrumented func
188 mcount_adjust_addr r0, lr @ instrumented function
189 adr lr, BSYM(2f)
190 mov pc, r2
1912: mcount_exit
192.endm
193
194.macro __ftrace_caller suffix
195 mcount_enter
196
197 mcount_get_lr r1 @ lr of instrumented func
198 mcount_adjust_addr r0, lr @ instrumented function
199
200 .globl ftrace_call\suffix
201ftrace_call\suffix:
202 bl ftrace_stub
203
204#ifdef CONFIG_FUNCTION_GRAPH_TRACER
205 .globl ftrace_graph_call\suffix
206ftrace_graph_call\suffix:
207 mov r0, r0
208#endif
209
210 mcount_exit
211.endm
212
213.macro __ftrace_graph_caller
214 sub r0, fp,
215#ifdef CONFIG_DYNAMIC_FTRACE
216 @ called from __ftrace_caller, saved in mcount_enter
217 ldr r1, [sp,
218 mcount_adjust_addr r1, r1
219#else
220 @ called from __mcount, untouched in lr
221 mcount_adjust_addr r1, lr @ instrumented routine (func)
222#endif
223 mov r2, fp @ frame pointer
224 bl prepare_ftrace_return
225 mcount_exit
226.endm
227
228#ifdef CONFIG_OLD_MCOUNT
229
230
231
232
233.macro mcount_enter
234 stmdb sp!, {r0-r3, lr}
235.endm
236
237.macro mcount_get_lr reg
238 ldr \reg, [fp,
239.endm
240
241.macro mcount_exit
242 ldr lr, [fp,
243 ldmia sp!, {r0-r3, pc}
244.endm
245
246ENTRY(mcount)
247#ifdef CONFIG_DYNAMIC_FTRACE
248 stmdb sp!, {lr}
249 ldr lr, [fp,
250 ldmia sp!, {pc}
251#else
252 __mcount _old
253#endif
254ENDPROC(mcount)
255
256#ifdef CONFIG_DYNAMIC_FTRACE
257ENTRY(ftrace_caller_old)
258 __ftrace_caller _old
259ENDPROC(ftrace_caller_old)
260#endif
261
262#ifdef CONFIG_FUNCTION_GRAPH_TRACER
263ENTRY(ftrace_graph_caller_old)
264 __ftrace_graph_caller
265ENDPROC(ftrace_graph_caller_old)
266#endif
267
268.purgem mcount_enter
269.purgem mcount_get_lr
270.purgem mcount_exit
271#endif
272
273
274
275
276
277.macro mcount_enter
278
279
280
281
282 UNWIND(.pad
283 stmdb sp!, {r0-r3, lr}
284 UNWIND(.save {r0-r3, lr})
285.endm
286
287.macro mcount_get_lr reg
288 ldr \reg, [sp,
289.endm
290
291.macro mcount_exit
292 ldmia sp!, {r0-r3, ip, lr}
293 mov pc, ip
294.endm
295
296ENTRY(__gnu_mcount_nc)
297UNWIND(.fnstart)
298#ifdef CONFIG_DYNAMIC_FTRACE
299 mov ip, lr
300 ldmia sp!, {lr}
301 mov pc, ip
302#else
303 __mcount
304#endif
305UNWIND(.fnend)
306ENDPROC(__gnu_mcount_nc)
307
308#ifdef CONFIG_DYNAMIC_FTRACE
309ENTRY(ftrace_caller)
310UNWIND(.fnstart)
311 __ftrace_caller
312UNWIND(.fnend)
313ENDPROC(ftrace_caller)
314#endif
315
316#ifdef CONFIG_FUNCTION_GRAPH_TRACER
317ENTRY(ftrace_graph_caller)
318UNWIND(.fnstart)
319 __ftrace_graph_caller
320UNWIND(.fnend)
321ENDPROC(ftrace_graph_caller)
322#endif
323
324.purgem mcount_enter
325.purgem mcount_get_lr
326.purgem mcount_exit
327
328#ifdef CONFIG_FUNCTION_GRAPH_TRACER
329 .globl return_to_handler
330return_to_handler:
331 stmdb sp!, {r0-r3}
332 mov r0, fp @ frame pointer
333 bl ftrace_return_to_handler
334 mov lr, r0 @ r0 has real ret addr
335 ldmia sp!, {r0-r3}
336 mov pc, lr
337#endif
338
339ENTRY(ftrace_stub)
340.Lftrace_stub:
341 mov pc, lr
342ENDPROC(ftrace_stub)
343
344#endif
345
346
347
348
349
350
351 .align 5
352ENTRY(vector_swi)
353#ifdef CONFIG_CPU_V7M
354 v7m_exception_entry
355#else
356 sub sp, sp,
357 stmia sp, {r0 - r12} @ Calling r0 - r12
358 ARM( add r8, sp,
359 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
360 THUMB( mov r8, sp )
361 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
362 mrs r8, spsr @ called from non-FIQ mode, so ok.
363 str lr, [sp,
364 str r8, [sp,
365 str r0, [sp,
366#endif
367 zero_fp
368
369#ifdef CONFIG_ALIGNMENT_TRAP
370 ldr ip, __cr_alignment
371 ldr ip, [ip]
372 mcr p15, 0, ip, c1, c0 @ update control register
373#endif
374
375 enable_irq
376 ct_user_exit
377 get_thread_info tsk
378
379
380
381
382
383
384
385
386
387
388
389#ifdef CONFIG_ARM_THUMB
390 tst r8,
391 movne r10,
392 USER( ldreq r10, [lr,
393#else
394 USER( ldr r10, [lr,
395#endif
396 ARM_BE8(rev r10, r10) @ little endian instruction
397
398
399
400
401
402
403
404
405 tst r8,
406 addne scno, r7,
407 USER( ldreq scno, [lr,
408
409#else
410
411 USER( ldr scno, [lr,
412#endif
413
414 adr tbl, sys_call_table @ load syscall table pointer
415
416
417
418
419
420
421
422
423 bics r10, r10,
424 eorne scno, r10,
425 ldrne tbl, =sys_oabi_call_table
426
427 bic scno, scno,
428 eor scno, scno,
429#endif
430
431local_restart:
432 ldr r10, [tsk,
433 stmdb sp!, {r4, r5} @ push fifth and sixth args
434
435 tst r10,
436 bne __sys_trace
437
438 cmp scno,
439 adr lr, BSYM(ret_fast_syscall) @ return address
440 ldrcc pc, [tbl, scno, lsl
441
442 add r1, sp,
4432: cmp scno,
444 eor r0, scno,
445 bcs arm_syscall
446 mov why,
447 b sys_ni_syscall @ not private func
448
449
450
451
452
453
454
455
456
457
4589001:
459 sub lr, lr,
460 str lr, [sp,
461 b ret_fast_syscall
462#endif
463ENDPROC(vector_swi)
464
465
466
467
468
469__sys_trace:
470 mov r1, scno
471 add r0, sp,
472 bl syscall_trace_enter
473
474 adr lr, BSYM(__sys_trace_return) @ return address
475 mov scno, r0 @ syscall number (possibly new)
476 add r1, sp,
477 cmp scno,
478 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
479 stmccia sp, {r4, r5} @ and update the stack args
480 ldrcc pc, [tbl, scno, lsl
481 cmp scno,
482 bne 2b
483 add sp, sp,
484 b ret_slow_syscall
485
486__sys_trace_return:
487 str r0, [sp,
488 mov r0, sp
489 bl syscall_trace_exit
490 b ret_slow_syscall
491
492 .align 5
493#ifdef CONFIG_ALIGNMENT_TRAP
494 .type __cr_alignment,
495__cr_alignment:
496 .word cr_alignment
497#endif
498 .ltorg
499
500
501
502
503
504#define ABI(native, compat) native
505#ifdef CONFIG_AEABI
506#define OBSOLETE(syscall) sys_ni_syscall
507#else
508#define OBSOLETE(syscall) syscall
509#endif
510
511 .type sys_call_table,
512ENTRY(sys_call_table)
513#include "calls.S"
514#undef ABI
515#undef OBSOLETE
516
517
518
519
520@ r0 = syscall number
521@ r8 = syscall table
522sys_syscall:
523 bic scno, r0,
524 cmp scno,
525 cmpne scno,
526 stmloia sp, {r5, r6} @ shuffle args
527 movlo r0, r1
528 movlo r1, r2
529 movlo r2, r3
530 movlo r3, r4
531 ldrlo pc, [tbl, scno, lsl
532 b sys_ni_syscall
533ENDPROC(sys_syscall)
534
535sys_sigreturn_wrapper:
536 add r0, sp,
537 mov why,
538 b sys_sigreturn
539ENDPROC(sys_sigreturn_wrapper)
540
541sys_rt_sigreturn_wrapper:
542 add r0, sp,
543 mov why,
544 b sys_rt_sigreturn
545ENDPROC(sys_rt_sigreturn_wrapper)
546
547sys_statfs64_wrapper:
548 teq r1,
549 moveq r1,
550 b sys_statfs64
551ENDPROC(sys_statfs64_wrapper)
552
553sys_fstatfs64_wrapper:
554 teq r1,
555 moveq r1,
556 b sys_fstatfs64
557ENDPROC(sys_fstatfs64_wrapper)
558
559
560
561
562
563sys_mmap2:
564
565 tst r5,
566 moveq r5, r5, lsr
567 streq r5, [sp,
568 beq sys_mmap_pgoff
569 mov r0,
570 mov pc, lr
571#else
572 str r5, [sp,
573 b sys_mmap_pgoff
574#endif
575ENDPROC(sys_mmap2)
576
577#ifdef CONFIG_OABI_COMPAT
578
579
580
581
582
583sys_oabi_pread64:
584 stmia sp, {r3, r4}
585 b sys_pread64
586ENDPROC(sys_oabi_pread64)
587
588sys_oabi_pwrite64:
589 stmia sp, {r3, r4}
590 b sys_pwrite64
591ENDPROC(sys_oabi_pwrite64)
592
593sys_oabi_truncate64:
594 mov r3, r2
595 mov r2, r1
596 b sys_truncate64
597ENDPROC(sys_oabi_truncate64)
598
599sys_oabi_ftruncate64:
600 mov r3, r2
601 mov r2, r1
602 b sys_ftruncate64
603ENDPROC(sys_oabi_ftruncate64)
604
605sys_oabi_readahead:
606 str r3, [sp]
607 mov r3, r2
608 mov r2, r1
609 b sys_readahead
610ENDPROC(sys_oabi_readahead)
611
612
613
614
615
616#define ABI(native, compat) compat
617#define OBSOLETE(syscall) syscall
618
619 .type sys_oabi_call_table,
620ENTRY(sys_oabi_call_table)
621#include "calls.S"
622#undef ABI
623#undef OBSOLETE
624
625#endif
626
627