linux/arch/x86/include/asm/switch_to.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_SWITCH_TO_H
   3#define _ASM_X86_SWITCH_TO_H
   4
   5#include <linux/sched/task_stack.h>
   6
   7struct task_struct; /* one of the stranger aspects of C forward declarations */
   8
   9struct task_struct *__switch_to_asm(struct task_struct *prev,
  10                                    struct task_struct *next);
  11
  12__visible struct task_struct *__switch_to(struct task_struct *prev,
  13                                          struct task_struct *next);
  14
  15/* This runs runs on the previous thread's stack. */
  16static inline void prepare_switch_to(struct task_struct *next)
  17{
  18#ifdef CONFIG_VMAP_STACK
  19        /*
  20         * If we switch to a stack that has a top-level paging entry
  21         * that is not present in the current mm, the resulting #PF will
  22         * will be promoted to a double-fault and we'll panic.  Probe
  23         * the new stack now so that vmalloc_fault can fix up the page
  24         * tables if needed.  This can only happen if we use a stack
  25         * in vmap space.
  26         *
  27         * We assume that the stack is aligned so that it never spans
  28         * more than one top-level paging entry.
  29         *
  30         * To minimize cache pollution, just follow the stack pointer.
  31         */
  32        READ_ONCE(*(unsigned char *)next->thread.sp);
  33#endif
  34}
  35
  36asmlinkage void ret_from_fork(void);
  37
  38/*
  39 * This is the structure pointed to by thread.sp for an inactive task.  The
  40 * order of the fields must match the code in __switch_to_asm().
  41 */
  42struct inactive_task_frame {
  43#ifdef CONFIG_X86_64
  44        unsigned long r15;
  45        unsigned long r14;
  46        unsigned long r13;
  47        unsigned long r12;
  48#else
  49        unsigned long si;
  50        unsigned long di;
  51#endif
  52        unsigned long bx;
  53
  54        /*
  55         * These two fields must be together.  They form a stack frame header,
  56         * needed by get_frame_pointer().
  57         */
  58        unsigned long bp;
  59        unsigned long ret_addr;
  60};
  61
  62struct fork_frame {
  63        struct inactive_task_frame frame;
  64        struct pt_regs regs;
  65};
  66
  67#define switch_to(prev, next, last)                                     \
  68do {                                                                    \
  69        prepare_switch_to(next);                                        \
  70                                                                        \
  71        ((last) = __switch_to_asm((prev), (next)));                     \
  72} while (0)
  73
  74#ifdef CONFIG_X86_32
  75static inline void refresh_sysenter_cs(struct thread_struct *thread)
  76{
  77        /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  78        if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
  79                return;
  80
  81        this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
  82        wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  83}
  84#endif
  85
  86/* This is used when switching tasks or entering/exiting vm86 mode. */
  87static inline void update_task_stack(struct task_struct *task)
  88{
  89        /* sp0 always points to the entry trampoline stack, which is constant: */
  90#ifdef CONFIG_X86_32
  91        if (static_cpu_has(X86_FEATURE_XENPV))
  92                load_sp0(task->thread.sp0);
  93        else
  94                this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
  95#else
  96        /*
  97         * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
  98         * doesn't work on x86-32 because sp1 and
  99         * cpu_current_top_of_stack have different values (because of
 100         * the non-zero stack-padding on 32bit).
 101         */
 102        if (static_cpu_has(X86_FEATURE_XENPV))
 103                load_sp0(task_top_of_stack(task));
 104#endif
 105
 106}
 107
 108#endif /* _ASM_X86_SWITCH_TO_H */
 109