1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/sched/task_stack.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h>
18#include <linux/audit.h>
19#include <linux/seccomp.h>
20#include <linux/signal.h>
21#include <linux/export.h>
22#include <linux/context_tracking.h>
23#include <linux/user-return-notifier.h>
24#include <linux/nospec.h>
25#include <linux/uprobes.h>
26#include <linux/livepatch.h>
27#include <linux/syscalls.h>
28
29#include <asm/desc.h>
30#include <asm/traps.h>
31#include <asm/vdso.h>
32#include <linux/uaccess.h>
33#include <asm/cpufeature.h>
34
35#define CREATE_TRACE_POINTS
36#include <trace/events/syscalls.h>
37
38#ifdef CONFIG_CONTEXT_TRACKING
39
40__visible inline void enter_from_user_mode(void)
41{
42 CT_WARN_ON(ct_state() != CONTEXT_USER);
43 user_exit_irqoff();
44}
45#else
46static inline void enter_from_user_mode(void) {}
47#endif
48
49static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
50{
51#ifdef CONFIG_X86_64
52 if (arch == AUDIT_ARCH_X86_64) {
53 audit_syscall_entry(regs->orig_ax, regs->di,
54 regs->si, regs->dx, regs->r10);
55 } else
56#endif
57 {
58 audit_syscall_entry(regs->orig_ax, regs->bx,
59 regs->cx, regs->dx, regs->si);
60 }
61}
62
63
64
65
66
67static long syscall_trace_enter(struct pt_regs *regs)
68{
69 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
70
71 struct thread_info *ti = current_thread_info();
72 unsigned long ret = 0;
73 bool emulated = false;
74 u32 work;
75
76 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
77 BUG_ON(regs != task_pt_regs(current));
78
79 work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
80
81 if (unlikely(work & _TIF_SYSCALL_EMU))
82 emulated = true;
83
84 if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
85 tracehook_report_syscall_entry(regs))
86 return -1L;
87
88 if (emulated)
89 return -1L;
90
91#ifdef CONFIG_SECCOMP
92
93
94
95 if (work & _TIF_SECCOMP) {
96 struct seccomp_data sd;
97
98 sd.arch = arch;
99 sd.nr = regs->orig_ax;
100 sd.instruction_pointer = regs->ip;
101#ifdef CONFIG_X86_64
102 if (arch == AUDIT_ARCH_X86_64) {
103 sd.args[0] = regs->di;
104 sd.args[1] = regs->si;
105 sd.args[2] = regs->dx;
106 sd.args[3] = regs->r10;
107 sd.args[4] = regs->r8;
108 sd.args[5] = regs->r9;
109 } else
110#endif
111 {
112 sd.args[0] = regs->bx;
113 sd.args[1] = regs->cx;
114 sd.args[2] = regs->dx;
115 sd.args[3] = regs->si;
116 sd.args[4] = regs->di;
117 sd.args[5] = regs->bp;
118 }
119
120 ret = __secure_computing(&sd);
121 if (ret == -1)
122 return ret;
123 }
124#endif
125
126 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
127 trace_sys_enter(regs, regs->orig_ax);
128
129 do_audit_syscall_entry(regs, arch);
130
131 return ret ?: regs->orig_ax;
132}
133
134#define EXIT_TO_USERMODE_LOOP_FLAGS \
135 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
136 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
137
138static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
139{
140
141
142
143
144
145
146
147 while (true) {
148
149 local_irq_enable();
150
151 if (cached_flags & _TIF_NEED_RESCHED)
152 schedule();
153
154 if (cached_flags & _TIF_UPROBE)
155 uprobe_notify_resume(regs);
156
157 if (cached_flags & _TIF_PATCH_PENDING)
158 klp_update_patch_state(current);
159
160
161 if (cached_flags & _TIF_SIGPENDING)
162 do_signal(regs);
163
164 if (cached_flags & _TIF_NOTIFY_RESUME) {
165 clear_thread_flag(TIF_NOTIFY_RESUME);
166 tracehook_notify_resume(regs);
167 }
168
169 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
170 fire_user_return_notifiers();
171
172
173 local_irq_disable();
174
175 cached_flags = READ_ONCE(current_thread_info()->flags);
176
177 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
178 break;
179 }
180}
181
182
183__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
184{
185 struct thread_info *ti = current_thread_info();
186 u32 cached_flags;
187
188 addr_limit_user_check();
189
190 lockdep_assert_irqs_disabled();
191 lockdep_sys_exit();
192
193 cached_flags = READ_ONCE(ti->flags);
194
195 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
196 exit_to_usermode_loop(regs, cached_flags);
197
198#ifdef CONFIG_COMPAT
199
200
201
202
203
204
205
206
207
208
209
210 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
211#endif
212
213 user_enter_irqoff();
214}
215
216#define SYSCALL_EXIT_WORK_FLAGS \
217 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
218 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
219
220static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
221{
222 bool step;
223
224 audit_syscall_exit(regs);
225
226 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
227 trace_sys_exit(regs, regs->ax);
228
229
230
231
232
233
234
235 step = unlikely(
236 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
237 == _TIF_SINGLESTEP);
238 if (step || cached_flags & _TIF_SYSCALL_TRACE)
239 tracehook_report_syscall_exit(regs, step);
240}
241
242
243
244
245
246__visible inline void syscall_return_slowpath(struct pt_regs *regs)
247{
248 struct thread_info *ti = current_thread_info();
249 u32 cached_flags = READ_ONCE(ti->flags);
250
251 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
252
253 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
254 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
255 local_irq_enable();
256
257
258
259
260
261 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
262 syscall_slow_exit_work(regs, cached_flags);
263
264 local_irq_disable();
265 prepare_exit_to_usermode(regs);
266}
267
268#ifdef CONFIG_X86_64
269__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
270{
271 struct thread_info *ti;
272
273 enter_from_user_mode();
274 local_irq_enable();
275 ti = current_thread_info();
276 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
277 nr = syscall_trace_enter(regs);
278
279
280
281
282
283
284 nr &= __SYSCALL_MASK;
285 if (likely(nr < NR_syscalls)) {
286 nr = array_index_nospec(nr, NR_syscalls);
287 regs->ax = sys_call_table[nr](regs);
288 }
289
290 syscall_return_slowpath(regs);
291}
292#endif
293
294#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
295
296
297
298
299
300
301static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
302{
303 struct thread_info *ti = current_thread_info();
304 unsigned int nr = (unsigned int)regs->orig_ax;
305
306#ifdef CONFIG_IA32_EMULATION
307 ti->status |= TS_COMPAT;
308#endif
309
310 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
311
312
313
314
315
316
317 nr = syscall_trace_enter(regs);
318 }
319
320 if (likely(nr < IA32_NR_syscalls)) {
321 nr = array_index_nospec(nr, IA32_NR_syscalls);
322#ifdef CONFIG_IA32_EMULATION
323 regs->ax = ia32_sys_call_table[nr](regs);
324#else
325
326
327
328
329
330
331 regs->ax = ia32_sys_call_table[nr](
332 (unsigned int)regs->bx, (unsigned int)regs->cx,
333 (unsigned int)regs->dx, (unsigned int)regs->si,
334 (unsigned int)regs->di, (unsigned int)regs->bp);
335#endif
336 }
337
338 syscall_return_slowpath(regs);
339}
340
341
342__visible void do_int80_syscall_32(struct pt_regs *regs)
343{
344 enter_from_user_mode();
345 local_irq_enable();
346 do_syscall_32_irqs_on(regs);
347}
348
349
350__visible long do_fast_syscall_32(struct pt_regs *regs)
351{
352
353
354
355
356
357 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
358 vdso_image_32.sym_int80_landing_pad;
359
360
361
362
363
364
365 regs->ip = landing_pad;
366
367 enter_from_user_mode();
368
369 local_irq_enable();
370
371
372 if (
373#ifdef CONFIG_X86_64
374
375
376
377
378 __get_user(*(u32 *)®s->bp,
379 (u32 __user __force *)(unsigned long)(u32)regs->sp)
380#else
381 get_user(*(u32 *)®s->bp,
382 (u32 __user __force *)(unsigned long)(u32)regs->sp)
383#endif
384 ) {
385
386
387 local_irq_disable();
388 regs->ax = -EFAULT;
389 prepare_exit_to_usermode(regs);
390 return 0;
391 }
392
393
394 do_syscall_32_irqs_on(regs);
395
396#ifdef CONFIG_X86_64
397
398
399
400
401
402
403
404
405
406 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
407 regs->ip == landing_pad &&
408 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
409#else
410
411
412
413
414
415
416
417
418
419
420 return static_cpu_has(X86_FEATURE_SEP) &&
421 regs->cs == __USER_CS && regs->ss == __USER_DS &&
422 regs->ip == landing_pad &&
423 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
424#endif
425}
426#endif
427