1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/cpu.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/elfcore.h>
24#include <linux/smp.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/module.h>
30#include <linux/ptrace.h>
31#include <linux/notifier.h>
32#include <linux/kprobes.h>
33#include <linux/kdebug.h>
34#include <linux/prctl.h>
35#include <linux/uaccess.h>
36#include <linux/io.h>
37#include <linux/ftrace.h>
38
39#include <asm/pgtable.h>
40#include <asm/processor.h>
41#include <asm/fpu/internal.h>
42#include <asm/mmu_context.h>
43#include <asm/prctl.h>
44#include <asm/desc.h>
45#include <asm/proto.h>
46#include <asm/ia32.h>
47#include <asm/idle.h>
48#include <asm/syscalls.h>
49#include <asm/debugreg.h>
50#include <asm/switch_to.h>
51#include <asm/xen/hypervisor.h>
52
53asmlinkage extern void ret_from_fork(void);
54
55__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
56
57
58void __show_regs(struct pt_regs *regs, int all)
59{
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
61 unsigned long d0, d1, d2, d3, d6, d7;
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
64
65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
66 printk_address(regs->ip);
67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
68 regs->sp, regs->flags);
69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
70 regs->ax, regs->bx, regs->cx);
71 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
72 regs->dx, regs->si, regs->di);
73 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
74 regs->bp, regs->r8, regs->r9);
75 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
76 regs->r10, regs->r11, regs->r12);
77 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
78 regs->r13, regs->r14, regs->r15);
79
80 asm("movl %%ds,%0" : "=r" (ds));
81 asm("movl %%cs,%0" : "=r" (cs));
82 asm("movl %%es,%0" : "=r" (es));
83 asm("movl %%fs,%0" : "=r" (fsindex));
84 asm("movl %%gs,%0" : "=r" (gsindex));
85
86 rdmsrl(MSR_FS_BASE, fs);
87 rdmsrl(MSR_GS_BASE, gs);
88 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
89
90 if (!all)
91 return;
92
93 cr0 = read_cr0();
94 cr2 = read_cr2();
95 cr3 = read_cr3();
96 cr4 = __read_cr4();
97
98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
99 fs, fsindex, gs, gsindex, shadowgs);
100 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
101 es, cr0);
102 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
103 cr4);
104
105 get_debugreg(d0, 0);
106 get_debugreg(d1, 1);
107 get_debugreg(d2, 2);
108 get_debugreg(d3, 3);
109 get_debugreg(d6, 6);
110 get_debugreg(d7, 7);
111
112
113 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
114 (d6 == DR6_RESERVED) && (d7 == 0x400))
115 return;
116
117 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
118 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
119
120 if (boot_cpu_has(X86_FEATURE_OSPKE))
121 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
122}
123
124void release_thread(struct task_struct *dead_task)
125{
126 if (dead_task->mm) {
127#ifdef CONFIG_MODIFY_LDT_SYSCALL
128 if (dead_task->mm->context.ldt) {
129 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
130 dead_task->comm,
131 dead_task->mm->context.ldt->entries,
132 dead_task->mm->context.ldt->size);
133 BUG();
134 }
135#endif
136 }
137}
138
139int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
140 unsigned long arg, struct task_struct *p, unsigned long tls)
141{
142 int err;
143 struct pt_regs *childregs;
144 struct task_struct *me = current;
145
146 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
147 childregs = task_pt_regs(p);
148 p->thread.sp = (unsigned long) childregs;
149 set_tsk_thread_flag(p, TIF_FORK);
150 p->thread.io_bitmap_ptr = NULL;
151
152 savesegment(gs, p->thread.gsindex);
153 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
154 savesegment(fs, p->thread.fsindex);
155 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
156 savesegment(es, p->thread.es);
157 savesegment(ds, p->thread.ds);
158 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
159
160 if (unlikely(p->flags & PF_KTHREAD)) {
161
162 memset(childregs, 0, sizeof(struct pt_regs));
163 childregs->sp = (unsigned long)childregs;
164 childregs->ss = __KERNEL_DS;
165 childregs->bx = sp;
166 childregs->bp = arg;
167 childregs->orig_ax = -1;
168 childregs->cs = __KERNEL_CS | get_kernel_rpl();
169 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
170 return 0;
171 }
172 *childregs = *current_pt_regs();
173
174 childregs->ax = 0;
175 if (sp)
176 childregs->sp = sp;
177
178 err = -ENOMEM;
179 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
180 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
181 IO_BITMAP_BYTES, GFP_KERNEL);
182 if (!p->thread.io_bitmap_ptr) {
183 p->thread.io_bitmap_max = 0;
184 return -ENOMEM;
185 }
186 set_tsk_thread_flag(p, TIF_IO_BITMAP);
187 }
188
189
190
191
192 if (clone_flags & CLONE_SETTLS) {
193#ifdef CONFIG_IA32_EMULATION
194 if (in_ia32_syscall())
195 err = do_set_thread_area(p, -1,
196 (struct user_desc __user *)tls, 0);
197 else
198#endif
199 err = do_arch_prctl(p, ARCH_SET_FS, tls);
200 if (err)
201 goto out;
202 }
203 err = 0;
204out:
205 if (err && p->thread.io_bitmap_ptr) {
206 kfree(p->thread.io_bitmap_ptr);
207 p->thread.io_bitmap_max = 0;
208 }
209
210 return err;
211}
212
213static void
214start_thread_common(struct pt_regs *regs, unsigned long new_ip,
215 unsigned long new_sp,
216 unsigned int _cs, unsigned int _ss, unsigned int _ds)
217{
218 loadsegment(fs, 0);
219 loadsegment(es, _ds);
220 loadsegment(ds, _ds);
221 load_gs_index(0);
222 regs->ip = new_ip;
223 regs->sp = new_sp;
224 regs->cs = _cs;
225 regs->ss = _ss;
226 regs->flags = X86_EFLAGS_IF;
227 force_iret();
228}
229
230void
231start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
232{
233 start_thread_common(regs, new_ip, new_sp,
234 __USER_CS, __USER_DS, 0);
235}
236
237#ifdef CONFIG_COMPAT
238void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
239{
240 start_thread_common(regs, new_ip, new_sp,
241 test_thread_flag(TIF_X32)
242 ? __USER_CS : __USER32_CS,
243 __USER_DS, __USER_DS);
244}
245#endif
246
247
248
249
250
251
252
253
254
255
256
257__visible __notrace_funcgraph struct task_struct *
258__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
259{
260 struct thread_struct *prev = &prev_p->thread;
261 struct thread_struct *next = &next_p->thread;
262 struct fpu *prev_fpu = &prev->fpu;
263 struct fpu *next_fpu = &next->fpu;
264 int cpu = smp_processor_id();
265 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
266 unsigned prev_fsindex, prev_gsindex;
267 fpu_switch_t fpu_switch;
268
269 fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
270
271
272
273
274
275
276 savesegment(fs, prev_fsindex);
277 savesegment(gs, prev_gsindex);
278
279
280
281
282
283 load_TLS(next, cpu);
284
285
286
287
288
289
290
291
292 arch_end_context_switch(next_p);
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308 savesegment(es, prev->es);
309 if (unlikely(next->es | prev->es))
310 loadsegment(es, next->es);
311
312 savesegment(ds, prev->ds);
313 if (unlikely(next->ds | prev->ds))
314 loadsegment(ds, next->ds);
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335 if (next->fsindex) {
336
337 loadsegment(fs, next->fsindex);
338 } else {
339 if (next->fsbase) {
340
341 if (prev_fsindex)
342 loadsegment(fs, 0);
343 wrmsrl(MSR_FS_BASE, next->fsbase);
344 } else {
345
346 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
347
348
349
350
351 loadsegment(fs, __USER_DS);
352 loadsegment(fs, 0);
353 } else {
354
355
356
357
358
359 if (prev->fsbase || prev_fsindex)
360 loadsegment(fs, 0);
361 }
362 }
363 }
364
365
366
367
368
369
370
371 if (prev_fsindex)
372 prev->fsbase = 0;
373 prev->fsindex = prev_fsindex;
374
375 if (next->gsindex) {
376
377 load_gs_index(next->gsindex);
378 } else {
379 if (next->gsbase) {
380
381 if (prev_gsindex)
382 load_gs_index(0);
383 wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
384 } else {
385
386 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
387
388
389
390
391
392
393
394
395 load_gs_index(__USER_DS);
396 load_gs_index(0);
397 } else {
398
399
400
401
402
403 if (prev->gsbase || prev_gsindex)
404 load_gs_index(0);
405 }
406 }
407 }
408
409
410
411
412
413
414
415 if (prev_gsindex)
416 prev->gsbase = 0;
417 prev->gsindex = prev_gsindex;
418
419 switch_fpu_finish(next_fpu, fpu_switch);
420
421
422
423
424 this_cpu_write(current_task, next_p);
425
426
427 load_sp0(tss, next);
428
429
430
431
432 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
433 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
434 __switch_to_xtra(prev_p, next_p, tss);
435
436#ifdef CONFIG_XEN
437
438
439
440
441
442 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
443 prev->iopl != next->iopl))
444 xen_set_iopl_mask(next->iopl);
445#endif
446
447 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469 unsigned short ss_sel;
470 savesegment(ss, ss_sel);
471 if (ss_sel != __KERNEL_DS)
472 loadsegment(ss, __KERNEL_DS);
473 }
474
475 return prev_p;
476}
477
478void set_personality_64bit(void)
479{
480
481
482
483 clear_thread_flag(TIF_IA32);
484 clear_thread_flag(TIF_ADDR32);
485 clear_thread_flag(TIF_X32);
486
487
488 if (current->mm)
489 current->mm->context.ia32_compat = 0;
490
491
492
493
494
495 current->personality &= ~READ_IMPLIES_EXEC;
496}
497
498void set_personality_ia32(bool x32)
499{
500
501
502
503 set_thread_flag(TIF_ADDR32);
504
505
506 if (x32) {
507 clear_thread_flag(TIF_IA32);
508 set_thread_flag(TIF_X32);
509 if (current->mm)
510 current->mm->context.ia32_compat = TIF_X32;
511 current->personality &= ~READ_IMPLIES_EXEC;
512
513
514 current_thread_info()->status &= ~TS_COMPAT;
515 } else {
516 set_thread_flag(TIF_IA32);
517 clear_thread_flag(TIF_X32);
518 if (current->mm)
519 current->mm->context.ia32_compat = TIF_IA32;
520 current->personality |= force_personality32;
521
522 current_thread_info()->status |= TS_COMPAT;
523 }
524}
525EXPORT_SYMBOL_GPL(set_personality_ia32);
526
527long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
528{
529 int ret = 0;
530 int doit = task == current;
531 int cpu;
532
533 switch (code) {
534 case ARCH_SET_GS:
535 if (addr >= TASK_SIZE_MAX)
536 return -EPERM;
537 cpu = get_cpu();
538 task->thread.gsindex = 0;
539 task->thread.gsbase = addr;
540 if (doit) {
541 load_gs_index(0);
542 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
543 }
544 put_cpu();
545 break;
546 case ARCH_SET_FS:
547
548
549 if (addr >= TASK_SIZE_MAX)
550 return -EPERM;
551 cpu = get_cpu();
552 task->thread.fsindex = 0;
553 task->thread.fsbase = addr;
554 if (doit) {
555
556 loadsegment(fs, 0);
557 ret = wrmsrl_safe(MSR_FS_BASE, addr);
558 }
559 put_cpu();
560 break;
561 case ARCH_GET_FS: {
562 unsigned long base;
563 if (doit)
564 rdmsrl(MSR_FS_BASE, base);
565 else
566 base = task->thread.fsbase;
567 ret = put_user(base, (unsigned long __user *)addr);
568 break;
569 }
570 case ARCH_GET_GS: {
571 unsigned long base;
572 if (doit)
573 rdmsrl(MSR_KERNEL_GS_BASE, base);
574 else
575 base = task->thread.gsbase;
576 ret = put_user(base, (unsigned long __user *)addr);
577 break;
578 }
579
580 default:
581 ret = -EINVAL;
582 break;
583 }
584
585 return ret;
586}
587
588long sys_arch_prctl(int code, unsigned long addr)
589{
590 return do_arch_prctl(current, code, addr);
591}
592
593unsigned long KSTK_ESP(struct task_struct *task)
594{
595 return task_pt_regs(task)->sp;
596}
597