linux/arch/x86/kernel/process_64.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995  Linus Torvalds
   3 *
   4 *  Pentium III FXSR, SSE support
   5 *      Gareth Hughes <gareth@valinux.com>, May 2000
   6 *
   7 *  X86-64 port
   8 *      Andi Kleen.
   9 *
  10 *      CPU hotplug support - ashok.raj@intel.com
  11 */
  12
  13/*
  14 * This file handles the architecture-dependent parts of process handling..
  15 */
  16
  17#include <linux/cpu.h>
  18#include <linux/errno.h>
  19#include <linux/sched.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/fs.h>
  23#include <linux/kernel.h>
  24#include <linux/mm.h>
  25#include <linux/elfcore.h>
  26#include <linux/smp.h>
  27#include <linux/slab.h>
  28#include <linux/user.h>
  29#include <linux/interrupt.h>
  30#include <linux/delay.h>
  31#include <linux/export.h>
  32#include <linux/ptrace.h>
  33#include <linux/notifier.h>
  34#include <linux/kprobes.h>
  35#include <linux/kdebug.h>
  36#include <linux/prctl.h>
  37#include <linux/uaccess.h>
  38#include <linux/io.h>
  39#include <linux/ftrace.h>
  40#include <linux/syscalls.h>
  41
  42#include <asm/pgtable.h>
  43#include <asm/processor.h>
  44#include <asm/fpu/internal.h>
  45#include <asm/mmu_context.h>
  46#include <asm/prctl.h>
  47#include <asm/desc.h>
  48#include <asm/proto.h>
  49#include <asm/ia32.h>
  50#include <asm/syscalls.h>
  51#include <asm/debugreg.h>
  52#include <asm/switch_to.h>
  53#include <asm/xen/hypervisor.h>
  54#include <asm/vdso.h>
  55#include <asm/intel_rdt_sched.h>
  56#include <asm/unistd.h>
  57#ifdef CONFIG_IA32_EMULATION
  58/* Not included via unistd.h */
  59#include <asm/unistd_32_ia32.h>
  60#endif
  61
  62__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
  63
  64/* Prints also some state that isn't saved in the pt_regs */
  65void __show_regs(struct pt_regs *regs, int all)
  66{
  67        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
  68        unsigned long d0, d1, d2, d3, d6, d7;
  69        unsigned int fsindex, gsindex;
  70        unsigned int ds, cs, es;
  71
  72        printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip);
  73        printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
  74                regs->sp, regs->flags);
  75        if (regs->orig_ax != -1)
  76                pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
  77        else
  78                pr_cont("\n");
  79
  80        printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
  81               regs->ax, regs->bx, regs->cx);
  82        printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
  83               regs->dx, regs->si, regs->di);
  84        printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
  85               regs->bp, regs->r8, regs->r9);
  86        printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
  87               regs->r10, regs->r11, regs->r12);
  88        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
  89               regs->r13, regs->r14, regs->r15);
  90
  91        asm("movl %%ds,%0" : "=r" (ds));
  92        asm("movl %%cs,%0" : "=r" (cs));
  93        asm("movl %%es,%0" : "=r" (es));
  94        asm("movl %%fs,%0" : "=r" (fsindex));
  95        asm("movl %%gs,%0" : "=r" (gsindex));
  96
  97        rdmsrl(MSR_FS_BASE, fs);
  98        rdmsrl(MSR_GS_BASE, gs);
  99        rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 100
 101        if (!all)
 102                return;
 103
 104        cr0 = read_cr0();
 105        cr2 = read_cr2();
 106        cr3 = __read_cr3();
 107        cr4 = __read_cr4();
 108
 109        printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
 110               fs, fsindex, gs, gsindex, shadowgs);
 111        printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
 112                        es, cr0);
 113        printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
 114                        cr4);
 115
 116        get_debugreg(d0, 0);
 117        get_debugreg(d1, 1);
 118        get_debugreg(d2, 2);
 119        get_debugreg(d3, 3);
 120        get_debugreg(d6, 6);
 121        get_debugreg(d7, 7);
 122
 123        /* Only print out debug registers if they are in their non-default state. */
 124        if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
 125            (d6 == DR6_RESERVED) && (d7 == 0x400))) {
 126                printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
 127                       d0, d1, d2);
 128                printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
 129                       d3, d6, d7);
 130        }
 131
 132        if (boot_cpu_has(X86_FEATURE_OSPKE))
 133                printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
 134}
 135
 136void release_thread(struct task_struct *dead_task)
 137{
 138        if (dead_task->mm) {
 139#ifdef CONFIG_MODIFY_LDT_SYSCALL
 140                if (dead_task->mm->context.ldt) {
 141                        pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
 142                                dead_task->comm,
 143                                dead_task->mm->context.ldt->entries,
 144                                dead_task->mm->context.ldt->nr_entries);
 145                        BUG();
 146                }
 147#endif
 148        }
 149}
 150
 151enum which_selector {
 152        FS,
 153        GS
 154};
 155
 156/*
 157 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
 158 * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
 159 * It's forcibly inlined because it'll generate better code and this function
 160 * is hot.
 161 */
 162static __always_inline void save_base_legacy(struct task_struct *prev_p,
 163                                             unsigned short selector,
 164                                             enum which_selector which)
 165{
 166        if (likely(selector == 0)) {
 167                /*
 168                 * On Intel (without X86_BUG_NULL_SEG), the segment base could
 169                 * be the pre-existing saved base or it could be zero.  On AMD
 170                 * (with X86_BUG_NULL_SEG), the segment base could be almost
 171                 * anything.
 172                 *
 173                 * This branch is very hot (it's hit twice on almost every
 174                 * context switch between 64-bit programs), and avoiding
 175                 * the RDMSR helps a lot, so we just assume that whatever
 176                 * value is already saved is correct.  This matches historical
 177                 * Linux behavior, so it won't break existing applications.
 178                 *
 179                 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
 180                 * report that the base is zero, it needs to actually be zero:
 181                 * see the corresponding logic in load_seg_legacy.
 182                 */
 183        } else {
 184                /*
 185                 * If the selector is 1, 2, or 3, then the base is zero on
 186                 * !X86_BUG_NULL_SEG CPUs and could be anything on
 187                 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
 188                 * has never attempted to preserve the base across context
 189                 * switches.
 190                 *
 191                 * If selector > 3, then it refers to a real segment, and
 192                 * saving the base isn't necessary.
 193                 */
 194                if (which == FS)
 195                        prev_p->thread.fsbase = 0;
 196                else
 197                        prev_p->thread.gsbase = 0;
 198        }
 199}
 200
 201static __always_inline void save_fsgs(struct task_struct *task)
 202{
 203        savesegment(fs, task->thread.fsindex);
 204        savesegment(gs, task->thread.gsindex);
 205        save_base_legacy(task, task->thread.fsindex, FS);
 206        save_base_legacy(task, task->thread.gsindex, GS);
 207}
 208
 209static __always_inline void loadseg(enum which_selector which,
 210                                    unsigned short sel)
 211{
 212        if (which == FS)
 213                loadsegment(fs, sel);
 214        else
 215                load_gs_index(sel);
 216}
 217
 218static __always_inline void load_seg_legacy(unsigned short prev_index,
 219                                            unsigned long prev_base,
 220                                            unsigned short next_index,
 221                                            unsigned long next_base,
 222                                            enum which_selector which)
 223{
 224        if (likely(next_index <= 3)) {
 225                /*
 226                 * The next task is using 64-bit TLS, is not using this
 227                 * segment at all, or is having fun with arcane CPU features.
 228                 */
 229                if (next_base == 0) {
 230                        /*
 231                         * Nasty case: on AMD CPUs, we need to forcibly zero
 232                         * the base.
 233                         */
 234                        if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
 235                                loadseg(which, __USER_DS);
 236                                loadseg(which, next_index);
 237                        } else {
 238                                /*
 239                                 * We could try to exhaustively detect cases
 240                                 * under which we can skip the segment load,
 241                                 * but there's really only one case that matters
 242                                 * for performance: if both the previous and
 243                                 * next states are fully zeroed, we can skip
 244                                 * the load.
 245                                 *
 246                                 * (This assumes that prev_base == 0 has no
 247                                 * false positives.  This is the case on
 248                                 * Intel-style CPUs.)
 249                                 */
 250                                if (likely(prev_index | next_index | prev_base))
 251                                        loadseg(which, next_index);
 252                        }
 253                } else {
 254                        if (prev_index != next_index)
 255                                loadseg(which, next_index);
 256                        wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
 257                               next_base);
 258                }
 259        } else {
 260                /*
 261                 * The next task is using a real segment.  Loading the selector
 262                 * is sufficient.
 263                 */
 264                loadseg(which, next_index);
 265        }
 266}
 267
 268int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 269                unsigned long arg, struct task_struct *p, unsigned long tls)
 270{
 271        int err;
 272        struct pt_regs *childregs;
 273        struct fork_frame *fork_frame;
 274        struct inactive_task_frame *frame;
 275        struct task_struct *me = current;
 276
 277        p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
 278        childregs = task_pt_regs(p);
 279        fork_frame = container_of(childregs, struct fork_frame, regs);
 280        frame = &fork_frame->frame;
 281        frame->bp = 0;
 282        frame->ret_addr = (unsigned long) ret_from_fork;
 283        p->thread.sp = (unsigned long) fork_frame;
 284        p->thread.io_bitmap_ptr = NULL;
 285
 286        savesegment(gs, p->thread.gsindex);
 287        p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
 288        savesegment(fs, p->thread.fsindex);
 289        p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
 290        savesegment(es, p->thread.es);
 291        savesegment(ds, p->thread.ds);
 292        memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 293
 294        if (unlikely(p->flags & PF_KTHREAD)) {
 295                /* kernel thread */
 296                memset(childregs, 0, sizeof(struct pt_regs));
 297                frame->bx = sp;         /* function */
 298                frame->r12 = arg;
 299                return 0;
 300        }
 301        frame->bx = 0;
 302        *childregs = *current_pt_regs();
 303
 304        childregs->ax = 0;
 305        if (sp)
 306                childregs->sp = sp;
 307
 308        err = -ENOMEM;
 309        if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
 310                p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
 311                                                  IO_BITMAP_BYTES, GFP_KERNEL);
 312                if (!p->thread.io_bitmap_ptr) {
 313                        p->thread.io_bitmap_max = 0;
 314                        return -ENOMEM;
 315                }
 316                set_tsk_thread_flag(p, TIF_IO_BITMAP);
 317        }
 318
 319        /*
 320         * Set a new TLS for the child thread?
 321         */
 322        if (clone_flags & CLONE_SETTLS) {
 323#ifdef CONFIG_IA32_EMULATION
 324                if (in_ia32_syscall())
 325                        err = do_set_thread_area(p, -1,
 326                                (struct user_desc __user *)tls, 0);
 327                else
 328#endif
 329                        err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
 330                if (err)
 331                        goto out;
 332        }
 333        err = 0;
 334out:
 335        if (err && p->thread.io_bitmap_ptr) {
 336                kfree(p->thread.io_bitmap_ptr);
 337                p->thread.io_bitmap_max = 0;
 338        }
 339
 340        return err;
 341}
 342
 343static void
 344start_thread_common(struct pt_regs *regs, unsigned long new_ip,
 345                    unsigned long new_sp,
 346                    unsigned int _cs, unsigned int _ss, unsigned int _ds)
 347{
 348        WARN_ON_ONCE(regs != current_pt_regs());
 349
 350        if (static_cpu_has(X86_BUG_NULL_SEG)) {
 351                /* Loading zero below won't clear the base. */
 352                loadsegment(fs, __USER_DS);
 353                load_gs_index(__USER_DS);
 354        }
 355
 356        loadsegment(fs, 0);
 357        loadsegment(es, _ds);
 358        loadsegment(ds, _ds);
 359        load_gs_index(0);
 360
 361        regs->ip                = new_ip;
 362        regs->sp                = new_sp;
 363        regs->cs                = _cs;
 364        regs->ss                = _ss;
 365        regs->flags             = X86_EFLAGS_IF;
 366        force_iret();
 367}
 368
 369void
 370start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 371{
 372        start_thread_common(regs, new_ip, new_sp,
 373                            __USER_CS, __USER_DS, 0);
 374}
 375
 376#ifdef CONFIG_COMPAT
 377void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
 378{
 379        start_thread_common(regs, new_ip, new_sp,
 380                            test_thread_flag(TIF_X32)
 381                            ? __USER_CS : __USER32_CS,
 382                            __USER_DS, __USER_DS);
 383}
 384#endif
 385
 386/*
 387 *      switch_to(x,y) should switch tasks from x to y.
 388 *
 389 * This could still be optimized:
 390 * - fold all the options into a flag word and test it with a single test.
 391 * - could test fs/gs bitsliced
 392 *
 393 * Kprobes not supported here. Set the probe on schedule instead.
 394 * Function graph tracer not supported too.
 395 */
 396__visible __notrace_funcgraph struct task_struct *
 397__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 398{
 399        struct thread_struct *prev = &prev_p->thread;
 400        struct thread_struct *next = &next_p->thread;
 401        struct fpu *prev_fpu = &prev->fpu;
 402        struct fpu *next_fpu = &next->fpu;
 403        int cpu = smp_processor_id();
 404        struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
 405
 406        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
 407                     this_cpu_read(irq_count) != -1);
 408
 409        switch_fpu_prepare(prev_fpu, cpu);
 410
 411        /* We must save %fs and %gs before load_TLS() because
 412         * %fs and %gs may be cleared by load_TLS().
 413         *
 414         * (e.g. xen_load_tls())
 415         */
 416        save_fsgs(prev_p);
 417
 418        /*
 419         * Load TLS before restoring any segments so that segment loads
 420         * reference the correct GDT entries.
 421         */
 422        load_TLS(next, cpu);
 423
 424        /*
 425         * Leave lazy mode, flushing any hypercalls made here.  This
 426         * must be done after loading TLS entries in the GDT but before
 427         * loading segments that might reference them, and and it must
 428         * be done before fpu__restore(), so the TS bit is up to
 429         * date.
 430         */
 431        arch_end_context_switch(next_p);
 432
 433        /* Switch DS and ES.
 434         *
 435         * Reading them only returns the selectors, but writing them (if
 436         * nonzero) loads the full descriptor from the GDT or LDT.  The
 437         * LDT for next is loaded in switch_mm, and the GDT is loaded
 438         * above.
 439         *
 440         * We therefore need to write new values to the segment
 441         * registers on every context switch unless both the new and old
 442         * values are zero.
 443         *
 444         * Note that we don't need to do anything for CS and SS, as
 445         * those are saved and restored as part of pt_regs.
 446         */
 447        savesegment(es, prev->es);
 448        if (unlikely(next->es | prev->es))
 449                loadsegment(es, next->es);
 450
 451        savesegment(ds, prev->ds);
 452        if (unlikely(next->ds | prev->ds))
 453                loadsegment(ds, next->ds);
 454
 455        load_seg_legacy(prev->fsindex, prev->fsbase,
 456                        next->fsindex, next->fsbase, FS);
 457        load_seg_legacy(prev->gsindex, prev->gsbase,
 458                        next->gsindex, next->gsbase, GS);
 459
 460        switch_fpu_finish(next_fpu, cpu);
 461
 462        /*
 463         * Switch the PDA and FPU contexts.
 464         */
 465        this_cpu_write(current_task, next_p);
 466
 467        /* Reload esp0 and ss1.  This changes current_thread_info(). */
 468        load_sp0(tss, next);
 469
 470        /*
 471         * Now maybe reload the debug registers and handle I/O bitmaps
 472         */
 473        if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
 474                     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
 475                __switch_to_xtra(prev_p, next_p, tss);
 476
 477#ifdef CONFIG_XEN_PV
 478        /*
 479         * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
 480         * current_pt_regs()->flags may not match the current task's
 481         * intended IOPL.  We need to switch it manually.
 482         */
 483        if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
 484                     prev->iopl != next->iopl))
 485                xen_set_iopl_mask(next->iopl);
 486#endif
 487
 488        if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
 489                /*
 490                 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
 491                 * does not update the cached descriptor.  As a result, if we
 492                 * do SYSRET while SS is NULL, we'll end up in user mode with
 493                 * SS apparently equal to __USER_DS but actually unusable.
 494                 *
 495                 * The straightforward workaround would be to fix it up just
 496                 * before SYSRET, but that would slow down the system call
 497                 * fast paths.  Instead, we ensure that SS is never NULL in
 498                 * system call context.  We do this by replacing NULL SS
 499                 * selectors at every context switch.  SYSCALL sets up a valid
 500                 * SS, so the only way to get NULL is to re-enter the kernel
 501                 * from CPL 3 through an interrupt.  Since that can't happen
 502                 * in the same task as a running syscall, we are guaranteed to
 503                 * context switch between every interrupt vector entry and a
 504                 * subsequent SYSRET.
 505                 *
 506                 * We read SS first because SS reads are much faster than
 507                 * writes.  Out of caution, we force SS to __KERNEL_DS even if
 508                 * it previously had a different non-NULL value.
 509                 */
 510                unsigned short ss_sel;
 511                savesegment(ss, ss_sel);
 512                if (ss_sel != __KERNEL_DS)
 513                        loadsegment(ss, __KERNEL_DS);
 514        }
 515
 516        /* Load the Intel cache allocation PQR MSR. */
 517        intel_rdt_sched_in();
 518
 519        return prev_p;
 520}
 521
 522void set_personality_64bit(void)
 523{
 524        /* inherit personality from parent */
 525
 526        /* Make sure to be in 64bit mode */
 527        clear_thread_flag(TIF_IA32);
 528        clear_thread_flag(TIF_ADDR32);
 529        clear_thread_flag(TIF_X32);
 530        /* Pretend that this comes from a 64bit execve */
 531        task_pt_regs(current)->orig_ax = __NR_execve;
 532
 533        /* Ensure the corresponding mm is not marked. */
 534        if (current->mm)
 535                current->mm->context.ia32_compat = 0;
 536
 537        /* TBD: overwrites user setup. Should have two bits.
 538           But 64bit processes have always behaved this way,
 539           so it's not too bad. The main problem is just that
 540           32bit childs are affected again. */
 541        current->personality &= ~READ_IMPLIES_EXEC;
 542}
 543
 544static void __set_personality_x32(void)
 545{
 546#ifdef CONFIG_X86_X32
 547        clear_thread_flag(TIF_IA32);
 548        set_thread_flag(TIF_X32);
 549        if (current->mm)
 550                current->mm->context.ia32_compat = TIF_X32;
 551        current->personality &= ~READ_IMPLIES_EXEC;
 552        /*
 553         * in_compat_syscall() uses the presence of the x32 syscall bit
 554         * flag to determine compat status.  The x86 mmap() code relies on
 555         * the syscall bitness so set x32 syscall bit right here to make
 556         * in_compat_syscall() work during exec().
 557         *
 558         * Pretend to come from a x32 execve.
 559         */
 560        task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
 561        current->thread.status &= ~TS_COMPAT;
 562#endif
 563}
 564
 565static void __set_personality_ia32(void)
 566{
 567#ifdef CONFIG_IA32_EMULATION
 568        set_thread_flag(TIF_IA32);
 569        clear_thread_flag(TIF_X32);
 570        if (current->mm)
 571                current->mm->context.ia32_compat = TIF_IA32;
 572        current->personality |= force_personality32;
 573        /* Prepare the first "return" to user space */
 574        task_pt_regs(current)->orig_ax = __NR_ia32_execve;
 575        current->thread.status |= TS_COMPAT;
 576#endif
 577}
 578
 579void set_personality_ia32(bool x32)
 580{
 581        /* Make sure to be in 32bit mode */
 582        set_thread_flag(TIF_ADDR32);
 583
 584        if (x32)
 585                __set_personality_x32();
 586        else
 587                __set_personality_ia32();
 588}
 589EXPORT_SYMBOL_GPL(set_personality_ia32);
 590
 591#ifdef CONFIG_CHECKPOINT_RESTORE
 592static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
 593{
 594        int ret;
 595
 596        ret = map_vdso_once(image, addr);
 597        if (ret)
 598                return ret;
 599
 600        return (long)image->size;
 601}
 602#endif
 603
 604long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
 605{
 606        int ret = 0;
 607        int doit = task == current;
 608        int cpu;
 609
 610        switch (option) {
 611        case ARCH_SET_GS:
 612                if (arg2 >= TASK_SIZE_MAX)
 613                        return -EPERM;
 614                cpu = get_cpu();
 615                task->thread.gsindex = 0;
 616                task->thread.gsbase = arg2;
 617                if (doit) {
 618                        load_gs_index(0);
 619                        ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, arg2);
 620                }
 621                put_cpu();
 622                break;
 623        case ARCH_SET_FS:
 624                /* Not strictly needed for fs, but do it for symmetry
 625                   with gs */
 626                if (arg2 >= TASK_SIZE_MAX)
 627                        return -EPERM;
 628                cpu = get_cpu();
 629                task->thread.fsindex = 0;
 630                task->thread.fsbase = arg2;
 631                if (doit) {
 632                        /* set the selector to 0 to not confuse __switch_to */
 633                        loadsegment(fs, 0);
 634                        ret = wrmsrl_safe(MSR_FS_BASE, arg2);
 635                }
 636                put_cpu();
 637                break;
 638        case ARCH_GET_FS: {
 639                unsigned long base;
 640
 641                if (doit)
 642                        rdmsrl(MSR_FS_BASE, base);
 643                else
 644                        base = task->thread.fsbase;
 645                ret = put_user(base, (unsigned long __user *)arg2);
 646                break;
 647        }
 648        case ARCH_GET_GS: {
 649                unsigned long base;
 650
 651                if (doit)
 652                        rdmsrl(MSR_KERNEL_GS_BASE, base);
 653                else
 654                        base = task->thread.gsbase;
 655                ret = put_user(base, (unsigned long __user *)arg2);
 656                break;
 657        }
 658
 659#ifdef CONFIG_CHECKPOINT_RESTORE
 660# ifdef CONFIG_X86_X32_ABI
 661        case ARCH_MAP_VDSO_X32:
 662                return prctl_map_vdso(&vdso_image_x32, arg2);
 663# endif
 664# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
 665        case ARCH_MAP_VDSO_32:
 666                return prctl_map_vdso(&vdso_image_32, arg2);
 667# endif
 668        case ARCH_MAP_VDSO_64:
 669                return prctl_map_vdso(&vdso_image_64, arg2);
 670#endif
 671
 672        default:
 673                ret = -EINVAL;
 674                break;
 675        }
 676
 677        return ret;
 678}
 679
 680SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
 681{
 682        long ret;
 683
 684        ret = do_arch_prctl_64(current, option, arg2);
 685        if (ret == -EINVAL)
 686                ret = do_arch_prctl_common(current, option, arg2);
 687
 688        return ret;
 689}
 690
 691#ifdef CONFIG_IA32_EMULATION
 692COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
 693{
 694        return do_arch_prctl_common(current, option, arg2);
 695}
 696#endif
 697
 698unsigned long KSTK_ESP(struct task_struct *task)
 699{
 700        return task_pt_regs(task)->sp;
 701}
 702