linux/arch/x86/kernel/process_32.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995  Linus Torvalds
   3 *
   4 *  Pentium III FXSR, SSE support
   5 *      Gareth Hughes <gareth@valinux.com>, May 2000
   6 */
   7
   8/*
   9 * This file handles the architecture-dependent parts of process handling..
  10 */
  11
  12#include <linux/cpu.h>
  13#include <linux/errno.h>
  14#include <linux/sched.h>
  15#include <linux/sched/task.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/fs.h>
  18#include <linux/kernel.h>
  19#include <linux/mm.h>
  20#include <linux/elfcore.h>
  21#include <linux/smp.h>
  22#include <linux/stddef.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include <linux/user.h>
  26#include <linux/interrupt.h>
  27#include <linux/delay.h>
  28#include <linux/reboot.h>
  29#include <linux/mc146818rtc.h>
  30#include <linux/export.h>
  31#include <linux/kallsyms.h>
  32#include <linux/ptrace.h>
  33#include <linux/personality.h>
  34#include <linux/percpu.h>
  35#include <linux/prctl.h>
  36#include <linux/ftrace.h>
  37#include <linux/uaccess.h>
  38#include <linux/io.h>
  39#include <linux/kdebug.h>
  40#include <linux/syscalls.h>
  41
  42#include <asm/pgtable.h>
  43#include <asm/ldt.h>
  44#include <asm/processor.h>
  45#include <asm/fpu/internal.h>
  46#include <asm/desc.h>
  47#ifdef CONFIG_MATH_EMULATION
  48#include <asm/math_emu.h>
  49#endif
  50
  51#include <linux/err.h>
  52
  53#include <asm/tlbflush.h>
  54#include <asm/cpu.h>
  55#include <asm/syscalls.h>
  56#include <asm/debugreg.h>
  57#include <asm/switch_to.h>
  58#include <asm/vm86.h>
  59#include <asm/intel_rdt_sched.h>
  60#include <asm/proto.h>
  61
  62void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
  63{
  64        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
  65        unsigned long d0, d1, d2, d3, d6, d7;
  66        unsigned long sp;
  67        unsigned short ss, gs;
  68
  69        if (user_mode(regs)) {
  70                sp = regs->sp;
  71                ss = regs->ss;
  72                gs = get_user_gs(regs);
  73        } else {
  74                sp = kernel_stack_pointer(regs);
  75                savesegment(ss, ss);
  76                savesegment(gs, gs);
  77        }
  78
  79        show_ip(regs, KERN_DEFAULT);
  80
  81        printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
  82                regs->ax, regs->bx, regs->cx, regs->dx);
  83        printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
  84                regs->si, regs->di, regs->bp, sp);
  85        printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
  86               (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
  87
  88        if (mode != SHOW_REGS_ALL)
  89                return;
  90
  91        cr0 = read_cr0();
  92        cr2 = read_cr2();
  93        cr3 = __read_cr3();
  94        cr4 = __read_cr4();
  95        printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
  96                        cr0, cr2, cr3, cr4);
  97
  98        get_debugreg(d0, 0);
  99        get_debugreg(d1, 1);
 100        get_debugreg(d2, 2);
 101        get_debugreg(d3, 3);
 102        get_debugreg(d6, 6);
 103        get_debugreg(d7, 7);
 104
 105        /* Only print out debug registers if they are in their non-default state. */
 106        if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
 107            (d6 == DR6_RESERVED) && (d7 == 0x400))
 108                return;
 109
 110        printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
 111                        d0, d1, d2, d3);
 112        printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
 113                        d6, d7);
 114}
 115
 116void release_thread(struct task_struct *dead_task)
 117{
 118        BUG_ON(dead_task->mm);
 119        release_vm86_irqs(dead_task);
 120}
 121
 122int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 123        unsigned long arg, struct task_struct *p, unsigned long tls)
 124{
 125        struct pt_regs *childregs = task_pt_regs(p);
 126        struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs);
 127        struct inactive_task_frame *frame = &fork_frame->frame;
 128        struct task_struct *tsk;
 129        int err;
 130
 131        frame->bp = 0;
 132        frame->ret_addr = (unsigned long) ret_from_fork;
 133        p->thread.sp = (unsigned long) fork_frame;
 134        p->thread.sp0 = (unsigned long) (childregs+1);
 135        memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 136
 137        if (unlikely(p->flags & PF_KTHREAD)) {
 138                /* kernel thread */
 139                memset(childregs, 0, sizeof(struct pt_regs));
 140                frame->bx = sp;         /* function */
 141                frame->di = arg;
 142                p->thread.io_bitmap_ptr = NULL;
 143                return 0;
 144        }
 145        frame->bx = 0;
 146        *childregs = *current_pt_regs();
 147        childregs->ax = 0;
 148        if (sp)
 149                childregs->sp = sp;
 150
 151        task_user_gs(p) = get_user_gs(current_pt_regs());
 152
 153        p->thread.io_bitmap_ptr = NULL;
 154        tsk = current;
 155        err = -ENOMEM;
 156
 157        if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
 158                p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
 159                                                IO_BITMAP_BYTES, GFP_KERNEL);
 160                if (!p->thread.io_bitmap_ptr) {
 161                        p->thread.io_bitmap_max = 0;
 162                        return -ENOMEM;
 163                }
 164                set_tsk_thread_flag(p, TIF_IO_BITMAP);
 165        }
 166
 167        err = 0;
 168
 169        /*
 170         * Set a new TLS for the child thread?
 171         */
 172        if (clone_flags & CLONE_SETTLS)
 173                err = do_set_thread_area(p, -1,
 174                        (struct user_desc __user *)tls, 0);
 175
 176        if (err && p->thread.io_bitmap_ptr) {
 177                kfree(p->thread.io_bitmap_ptr);
 178                p->thread.io_bitmap_max = 0;
 179        }
 180        return err;
 181}
 182
 183void
 184start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 185{
 186        set_user_gs(regs, 0);
 187        regs->fs                = 0;
 188        regs->ds                = __USER_DS;
 189        regs->es                = __USER_DS;
 190        regs->ss                = __USER_DS;
 191        regs->cs                = __USER_CS;
 192        regs->ip                = new_ip;
 193        regs->sp                = new_sp;
 194        regs->flags             = X86_EFLAGS_IF;
 195        force_iret();
 196}
 197EXPORT_SYMBOL_GPL(start_thread);
 198
 199
 200/*
 201 *      switch_to(x,y) should switch tasks from x to y.
 202 *
 203 * We fsave/fwait so that an exception goes off at the right time
 204 * (as a call from the fsave or fwait in effect) rather than to
 205 * the wrong process. Lazy FP saving no longer makes any sense
 206 * with modern CPU's, and this simplifies a lot of things (SMP
 207 * and UP become the same).
 208 *
 209 * NOTE! We used to use the x86 hardware context switching. The
 210 * reason for not using it any more becomes apparent when you
 211 * try to recover gracefully from saved state that is no longer
 212 * valid (stale segment register values in particular). With the
 213 * hardware task-switch, there is no way to fix up bad state in
 214 * a reasonable manner.
 215 *
 216 * The fact that Intel documents the hardware task-switching to
 217 * be slow is a fairly red herring - this code is not noticeably
 218 * faster. However, there _is_ some room for improvement here,
 219 * so the performance issues may eventually be a valid point.
 220 * More important, however, is the fact that this allows us much
 221 * more flexibility.
 222 *
 223 * The return value (in %ax) will be the "prev" task after
 224 * the task-switch, and shows up in ret_from_fork in entry.S,
 225 * for example.
 226 */
 227__visible __notrace_funcgraph struct task_struct *
 228__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 229{
 230        struct thread_struct *prev = &prev_p->thread,
 231                             *next = &next_p->thread;
 232        struct fpu *prev_fpu = &prev->fpu;
 233        struct fpu *next_fpu = &next->fpu;
 234        int cpu = smp_processor_id();
 235        struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 236
 237        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 238
 239        switch_fpu_prepare(prev_fpu, cpu);
 240
 241        /*
 242         * Save away %gs. No need to save %fs, as it was saved on the
 243         * stack on entry.  No need to save %es and %ds, as those are
 244         * always kernel segments while inside the kernel.  Doing this
 245         * before setting the new TLS descriptors avoids the situation
 246         * where we temporarily have non-reloadable segments in %fs
 247         * and %gs.  This could be an issue if the NMI handler ever
 248         * used %fs or %gs (it does not today), or if the kernel is
 249         * running inside of a hypervisor layer.
 250         */
 251        lazy_save_gs(prev->gs);
 252
 253        /*
 254         * Load the per-thread Thread-Local Storage descriptor.
 255         */
 256        load_TLS(next, cpu);
 257
 258        /*
 259         * Restore IOPL if needed.  In normal use, the flags restore
 260         * in the switch assembly will handle this.  But if the kernel
 261         * is running virtualized at a non-zero CPL, the popf will
 262         * not restore flags, so it must be done in a separate step.
 263         */
 264        if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
 265                set_iopl_mask(next->iopl);
 266
 267        /*
 268         * Now maybe handle debug registers and/or IO bitmaps
 269         */
 270        if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
 271                     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
 272                __switch_to_xtra(prev_p, next_p, tss);
 273
 274        /*
 275         * Leave lazy mode, flushing any hypercalls made here.
 276         * This must be done before restoring TLS segments so
 277         * the GDT and LDT are properly updated, and must be
 278         * done before fpu__restore(), so the TS bit is up
 279         * to date.
 280         */
 281        arch_end_context_switch(next_p);
 282
 283        /*
 284         * Reload esp0 and cpu_current_top_of_stack.  This changes
 285         * current_thread_info().  Refresh the SYSENTER configuration in
 286         * case prev or next is vm86.
 287         */
 288        update_task_stack(next_p);
 289        refresh_sysenter_cs(next);
 290        this_cpu_write(cpu_current_top_of_stack,
 291                       (unsigned long)task_stack_page(next_p) +
 292                       THREAD_SIZE);
 293
 294        /*
 295         * Restore %gs if needed (which is common)
 296         */
 297        if (prev->gs | next->gs)
 298                lazy_load_gs(next->gs);
 299
 300        switch_fpu_finish(next_fpu, cpu);
 301
 302        this_cpu_write(current_task, next_p);
 303
 304        /* Load the Intel cache allocation PQR MSR. */
 305        intel_rdt_sched_in();
 306
 307        return prev_p;
 308}
 309
 310SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
 311{
 312        return do_arch_prctl_common(current, option, arg2);
 313}
 314