1
2
3
4
5
6
7
8
9
10
11#include <asm/unistd.h>
12#include <asm/ftrace.h>
13#include <asm/unwind.h>
14
15#ifdef CONFIG_NEED_RET_TO_USER
16#include <mach/entry-macro.S>
17#else
18 .macro arch_ret_to_user, tmp1, tmp2
19 .endm
20#endif
21
22#include "entry-header.S"
23
24
25 .align 5
26
27
28
29
30
31ret_fast_syscall:
32 UNWIND(.fnstart )
33 UNWIND(.cantunwind )
34 disable_irq @ disable interrupts
35 ldr r1, [tsk,
36 tst r1,
37 bne fast_work_pending
38 asm_trace_hardirqs_on
39
40
41 arch_ret_to_user r1, lr
42 ct_user_enter
43
44 restore_user_regs fast = 1, offset = S_OFF
45 UNWIND(.fnend )
46
47
48
49
50fast_work_pending:
51 str r0, [sp,
52work_pending:
53 mov r0, sp @ 'regs'
54 mov r2, why @ 'syscall'
55 bl do_work_pending
56 cmp r0,
57 beq no_work_pending
58 movlt scno,
59 ldmia sp, {r0 - r6} @ have to reload r0 - r6
60 b local_restart @ ... and off we go
61
62
63
64
65ENTRY(ret_to_user)
66ret_slow_syscall:
67 disable_irq @ disable interrupts
68ENTRY(ret_to_user_from_irq)
69 ldr r1, [tsk,
70 tst r1,
71 bne work_pending
72no_work_pending:
73 asm_trace_hardirqs_on
74
75
76 arch_ret_to_user r1, lr
77 ct_user_enter save = 0
78
79 restore_user_regs fast = 0, offset = 0
80ENDPROC(ret_to_user_from_irq)
81ENDPROC(ret_to_user)
82
83
84
85
86ENTRY(ret_from_fork)
87 bl schedule_tail
88 cmp r5,
89 movne r0, r4
90 adrne lr, BSYM(1f)
91 movne pc, r5
921: get_thread_info tsk
93 b ret_slow_syscall
94ENDPROC(ret_from_fork)
95
96 .equ NR_syscalls,0
97#define CALL(x) .equ NR_syscalls,NR_syscalls+1
98#include "calls.S"
99
100
101
102
103
104.ifne NR_syscalls - __NR_syscalls
105.error "__NR_syscalls is not equal to the size of the syscall table"
106.endif
107
108#undef CALL
109#define CALL(x) .long x
110
111#ifdef CONFIG_FUNCTION_TRACER
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153#ifndef CONFIG_OLD_MCOUNT
154
155
156#endif
157#endif
158
159.macro mcount_adjust_addr rd, rn
160 bic \rd, \rn,
161 sub \rd, \rd,
162.endm
163
164.macro __mcount suffix
165 mcount_enter
166 ldr r0, =ftrace_trace_function
167 ldr r2, [r0]
168 adr r0, .Lftrace_stub
169 cmp r0, r2
170 bne 1f
171
172#ifdef CONFIG_FUNCTION_GRAPH_TRACER
173 ldr r1, =ftrace_graph_return
174 ldr r2, [r1]
175 cmp r0, r2
176 bne ftrace_graph_caller\suffix
177
178 ldr r1, =ftrace_graph_entry
179 ldr r2, [r1]
180 ldr r0, =ftrace_graph_entry_stub
181 cmp r0, r2
182 bne ftrace_graph_caller\suffix
183#endif
184
185 mcount_exit
186
1871: mcount_get_lr r1 @ lr of instrumented func
188 mcount_adjust_addr r0, lr @ instrumented function
189 adr lr, BSYM(2f)
190 mov pc, r2
1912: mcount_exit
192.endm
193
194.macro __ftrace_caller suffix
195 mcount_enter
196
197 mcount_get_lr r1 @ lr of instrumented func
198 mcount_adjust_addr r0, lr @ instrumented function
199
200 .globl ftrace_call\suffix
201ftrace_call\suffix:
202 bl ftrace_stub
203
204#ifdef CONFIG_FUNCTION_GRAPH_TRACER
205 .globl ftrace_graph_call\suffix
206ftrace_graph_call\suffix:
207 mov r0, r0
208#endif
209
210 mcount_exit
211.endm
212
213.macro __ftrace_graph_caller
214 sub r0, fp,
215#ifdef CONFIG_DYNAMIC_FTRACE
216 @ called from __ftrace_caller, saved in mcount_enter
217 ldr r1, [sp,
218 mcount_adjust_addr r1, r1
219#else
220 @ called from __mcount, untouched in lr
221 mcount_adjust_addr r1, lr @ instrumented routine (func)
222#endif
223 mov r2, fp @ frame pointer
224 bl prepare_ftrace_return
225 mcount_exit
226.endm
227
228#ifdef CONFIG_OLD_MCOUNT
229
230
231
232
233.macro mcount_enter
234 stmdb sp!, {r0-r3, lr}
235.endm
236
237.macro mcount_get_lr reg
238 ldr \reg, [fp,
239.endm
240
241.macro mcount_exit
242 ldr lr, [fp,
243 ldmia sp!, {r0-r3, pc}
244.endm
245
246ENTRY(mcount)
247#ifdef CONFIG_DYNAMIC_FTRACE
248 stmdb sp!, {lr}
249 ldr lr, [fp,
250 ldmia sp!, {pc}
251#else
252 __mcount _old
253#endif
254ENDPROC(mcount)
255
256#ifdef CONFIG_DYNAMIC_FTRACE
257ENTRY(ftrace_caller_old)
258 __ftrace_caller _old
259ENDPROC(ftrace_caller_old)
260#endif
261
262#ifdef CONFIG_FUNCTION_GRAPH_TRACER
263ENTRY(ftrace_graph_caller_old)
264 __ftrace_graph_caller
265ENDPROC(ftrace_graph_caller_old)
266#endif
267
268.purgem mcount_enter
269.purgem mcount_get_lr
270.purgem mcount_exit
271#endif
272
273
274
275
276
277.macro mcount_enter
278
279
280
281
282 UNWIND(.pad
283 stmdb sp!, {r0-r3, lr}
284 UNWIND(.save {r0-r3, lr})
285.endm
286
287.macro mcount_get_lr reg
288 ldr \reg, [sp,
289.endm
290
291.macro mcount_exit
292 ldmia sp!, {r0-r3, ip, lr}
293 mov pc, ip
294.endm
295
296ENTRY(__gnu_mcount_nc)
297UNWIND(.fnstart)
298#ifdef CONFIG_DYNAMIC_FTRACE
299 mov ip, lr
300 ldmia sp!, {lr}
301 mov pc, ip
302#else
303 __mcount
304#endif
305UNWIND(.fnend)
306ENDPROC(__gnu_mcount_nc)
307
308#ifdef CONFIG_DYNAMIC_FTRACE
309ENTRY(ftrace_caller)
310UNWIND(.fnstart)
311 __ftrace_caller
312UNWIND(.fnend)
313ENDPROC(ftrace_caller)
314#endif
315
316#ifdef CONFIG_FUNCTION_GRAPH_TRACER
317ENTRY(ftrace_graph_caller)
318UNWIND(.fnstart)
319 __ftrace_graph_caller
320UNWIND(.fnend)
321ENDPROC(ftrace_graph_caller)
322#endif
323
324.purgem mcount_enter
325.purgem mcount_get_lr
326.purgem mcount_exit
327
328#ifdef CONFIG_FUNCTION_GRAPH_TRACER
329 .globl return_to_handler
330return_to_handler:
331 stmdb sp!, {r0-r3}
332 mov r0, fp @ frame pointer
333 bl ftrace_return_to_handler
334 mov lr, r0 @ r0 has real ret addr
335 ldmia sp!, {r0-r3}
336 mov pc, lr
337#endif
338
339ENTRY(ftrace_stub)
340.Lftrace_stub:
341 mov pc, lr
342ENDPROC(ftrace_stub)
343
344#endif
345
346
347
348
349
350
351 .align 5
352ENTRY(vector_swi)
353#ifdef CONFIG_CPU_V7M
354 v7m_exception_entry
355#else
356 sub sp, sp,
357 stmia sp, {r0 - r12} @ Calling r0 - r12
358 ARM( add r8, sp,
359 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
360 THUMB( mov r8, sp )
361 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
362 mrs r8, spsr @ called from non-FIQ mode, so ok.
363 str lr, [sp,
364 str r8, [sp,
365 str r0, [sp,
366#endif
367 zero_fp
368
369#ifdef CONFIG_ALIGNMENT_TRAP
370 ldr ip, __cr_alignment
371 ldr ip, [ip]
372 mcr p15, 0, ip, c1, c0 @ update control register
373#endif
374
375 enable_irq
376 ct_user_exit
377 get_thread_info tsk
378
379
380
381
382
383
384
385
386
387
388
389#ifdef CONFIG_ARM_THUMB
390 tst r8,
391 movne r10,
392 USER( ldreq r10, [lr,
393#else
394 USER( ldr r10, [lr,
395#endif
396#ifdef CONFIG_CPU_ENDIAN_BE8
397 rev r10, r10 @ little endian instruction
398#endif
399
400
401
402
403
404
405
406
407 tst r8,
408 addne scno, r7,
409 USER( ldreq scno, [lr,
410
411#else
412
413 USER( ldr scno, [lr,
414#endif
415
416 adr tbl, sys_call_table @ load syscall table pointer
417
418
419
420
421
422
423
424
425 bics r10, r10,
426 eorne scno, r10,
427 ldrne tbl, =sys_oabi_call_table
428
429 bic scno, scno,
430 eor scno, scno,
431#endif
432
433local_restart:
434 ldr r10, [tsk,
435 stmdb sp!, {r4, r5} @ push fifth and sixth args
436
437 tst r10,
438 bne __sys_trace
439
440 cmp scno,
441 adr lr, BSYM(ret_fast_syscall) @ return address
442 ldrcc pc, [tbl, scno, lsl
443
444 add r1, sp,
4452: cmp scno,
446 eor r0, scno,
447 bcs arm_syscall
448 mov why,
449 b sys_ni_syscall @ not private func
450
451
452
453
454
455
456
457
458
459
4609001:
461 sub lr, lr,
462 str lr, [sp,
463 b ret_fast_syscall
464#endif
465ENDPROC(vector_swi)
466
467
468
469
470
471__sys_trace:
472 mov r1, scno
473 add r0, sp,
474 bl syscall_trace_enter
475
476 adr lr, BSYM(__sys_trace_return) @ return address
477 mov scno, r0 @ syscall number (possibly new)
478 add r1, sp,
479 cmp scno,
480 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
481 stmccia sp, {r4, r5} @ and update the stack args
482 ldrcc pc, [tbl, scno, lsl
483 cmp scno,
484 bne 2b
485 add sp, sp,
486 b ret_slow_syscall
487
488__sys_trace_return:
489 str r0, [sp,
490 mov r0, sp
491 bl syscall_trace_exit
492 b ret_slow_syscall
493
494 .align 5
495#ifdef CONFIG_ALIGNMENT_TRAP
496 .type __cr_alignment,
497__cr_alignment:
498 .word cr_alignment
499#endif
500 .ltorg
501
502
503
504
505
506#define ABI(native, compat) native
507#ifdef CONFIG_AEABI
508#define OBSOLETE(syscall) sys_ni_syscall
509#else
510#define OBSOLETE(syscall) syscall
511#endif
512
513 .type sys_call_table,
514ENTRY(sys_call_table)
515#include "calls.S"
516#undef ABI
517#undef OBSOLETE
518
519
520
521
522@ r0 = syscall number
523@ r8 = syscall table
524sys_syscall:
525 bic scno, r0,
526 cmp scno,
527 cmpne scno,
528 stmloia sp, {r5, r6} @ shuffle args
529 movlo r0, r1
530 movlo r1, r2
531 movlo r2, r3
532 movlo r3, r4
533 ldrlo pc, [tbl, scno, lsl
534 b sys_ni_syscall
535ENDPROC(sys_syscall)
536
537sys_sigreturn_wrapper:
538 add r0, sp,
539 mov why,
540 b sys_sigreturn
541ENDPROC(sys_sigreturn_wrapper)
542
543sys_rt_sigreturn_wrapper:
544 add r0, sp,
545 mov why,
546 b sys_rt_sigreturn
547ENDPROC(sys_rt_sigreturn_wrapper)
548
549sys_statfs64_wrapper:
550 teq r1,
551 moveq r1,
552 b sys_statfs64
553ENDPROC(sys_statfs64_wrapper)
554
555sys_fstatfs64_wrapper:
556 teq r1,
557 moveq r1,
558 b sys_fstatfs64
559ENDPROC(sys_fstatfs64_wrapper)
560
561
562
563
564
565sys_mmap2:
566
567 tst r5,
568 moveq r5, r5, lsr
569 streq r5, [sp,
570 beq sys_mmap_pgoff
571 mov r0,
572 mov pc, lr
573#else
574 str r5, [sp,
575 b sys_mmap_pgoff
576#endif
577ENDPROC(sys_mmap2)
578
579#ifdef CONFIG_OABI_COMPAT
580
581
582
583
584
585sys_oabi_pread64:
586 stmia sp, {r3, r4}
587 b sys_pread64
588ENDPROC(sys_oabi_pread64)
589
590sys_oabi_pwrite64:
591 stmia sp, {r3, r4}
592 b sys_pwrite64
593ENDPROC(sys_oabi_pwrite64)
594
595sys_oabi_truncate64:
596 mov r3, r2
597 mov r2, r1
598 b sys_truncate64
599ENDPROC(sys_oabi_truncate64)
600
601sys_oabi_ftruncate64:
602 mov r3, r2
603 mov r2, r1
604 b sys_ftruncate64
605ENDPROC(sys_oabi_ftruncate64)
606
607sys_oabi_readahead:
608 str r3, [sp]
609 mov r3, r2
610 mov r2, r1
611 b sys_readahead
612ENDPROC(sys_oabi_readahead)
613
614
615
616
617
618#define ABI(native, compat) compat
619#define OBSOLETE(syscall) syscall
620
621 .type sys_oabi_call_table,
622ENTRY(sys_oabi_call_table)
623#include "calls.S"
624#undef ABI
625#undef OBSOLETE
626
627#endif
628
629