linux/arch/x86/include/asm/switch_to.h
<<
>>
Prefs
   1#ifndef _ASM_X86_SWITCH_TO_H
   2#define _ASM_X86_SWITCH_TO_H
   3
   4struct task_struct; /* one of the stranger aspects of C forward declarations */
   5__visible struct task_struct *__switch_to(struct task_struct *prev,
   6                                           struct task_struct *next);
   7struct tss_struct;
   8void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
   9                      struct tss_struct *tss);
  10
  11#ifdef CONFIG_X86_32
  12
  13#ifdef CONFIG_CC_STACKPROTECTOR
  14#define __switch_canary                                                 \
  15        "movl %P[task_canary](%[next]), %%ebx\n\t"                      \
  16        "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
  17#define __switch_canary_oparam                                          \
  18        , [stack_canary] "=m" (stack_canary.canary)
  19#define __switch_canary_iparam                                          \
  20        , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
  21#else   /* CC_STACKPROTECTOR */
  22#define __switch_canary
  23#define __switch_canary_oparam
  24#define __switch_canary_iparam
  25#endif  /* CC_STACKPROTECTOR */
  26
  27/*
  28 * Saving eflags is important. It switches not only IOPL between tasks,
  29 * it also protects other tasks from NT leaking through sysenter etc.
  30 */
  31#define switch_to(prev, next, last)                                     \
  32do {                                                                    \
  33        /*                                                              \
  34         * Context-switching clobbers all registers, so we clobber      \
  35         * them explicitly, via unused output variables.                \
  36         * (EAX and EBP is not listed because EBP is saved/restored     \
  37         * explicitly for wchan access and EAX is the return value of   \
  38         * __switch_to())                                               \
  39         */                                                             \
  40        unsigned long ebx, ecx, edx, esi, edi;                          \
  41                                                                        \
  42        asm volatile("pushl %%ebp\n\t"          /* save    EBP   */     \
  43                     "movl %%esp,%[prev_sp]\n\t"        /* save    ESP   */ \
  44                     "movl %[next_sp],%%esp\n\t"        /* restore ESP   */ \
  45                     "movl $1f,%[prev_ip]\n\t"  /* save    EIP   */     \
  46                     "pushl %[next_ip]\n\t"     /* restore EIP   */     \
  47                     __switch_canary                                    \
  48                     "jmp __switch_to\n"        /* regparm call  */     \
  49                     "1:\t"                                             \
  50                     "popl %%ebp\n\t"           /* restore EBP   */     \
  51                                                                        \
  52                     /* output parameters */                            \
  53                     : [prev_sp] "=m" (prev->thread.sp),                \
  54                       [prev_ip] "=m" (prev->thread.ip),                \
  55                       "=a" (last),                                     \
  56                                                                        \
  57                       /* clobbered output registers: */                \
  58                       "=b" (ebx), "=c" (ecx), "=d" (edx),              \
  59                       "=S" (esi), "=D" (edi)                           \
  60                                                                        \
  61                       __switch_canary_oparam                           \
  62                                                                        \
  63                       /* input parameters: */                          \
  64                     : [next_sp]  "m" (next->thread.sp),                \
  65                       [next_ip]  "m" (next->thread.ip),                \
  66                                                                        \
  67                       /* regparm parameters for __switch_to(): */      \
  68                       [prev]     "a" (prev),                           \
  69                       [next]     "d" (next)                            \
  70                                                                        \
  71                       __switch_canary_iparam                           \
  72                                                                        \
  73                     : /* reloaded segment registers */                 \
  74                        "memory");                                      \
  75} while (0)
  76
  77#else /* CONFIG_X86_32 */
  78
  79/* frame pointer must be last for get_wchan */
  80#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
  81#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
  82
  83#define __EXTRA_CLOBBER  \
  84        , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
  85          "r12", "r13", "r14", "r15", "flags"
  86
  87#ifdef CONFIG_CC_STACKPROTECTOR
  88#define __switch_canary                                                   \
  89        "movq %P[task_canary](%%rsi),%%r8\n\t"                            \
  90        "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
  91#define __switch_canary_oparam                                            \
  92        , [gs_canary] "=m" (irq_stack_union.stack_canary)
  93#define __switch_canary_iparam                                            \
  94        , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
  95#else   /* CC_STACKPROTECTOR */
  96#define __switch_canary
  97#define __switch_canary_oparam
  98#define __switch_canary_iparam
  99#endif  /* CC_STACKPROTECTOR */
 100
 101/*
 102 * There is no need to save or restore flags, because flags are always
 103 * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
 104 * has no effect.
 105 */
 106#define switch_to(prev, next, last) \
 107        asm volatile(SAVE_CONTEXT                                         \
 108             "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
 109             "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
 110             "call __switch_to\n\t"                                       \
 111             "movq "__percpu_arg([current_task])",%%rsi\n\t"              \
 112             __switch_canary                                              \
 113             "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
 114             "movq %%rax,%%rdi\n\t"                                       \
 115             "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"                 \
 116             "jnz   ret_from_fork\n\t"                                    \
 117             RESTORE_CONTEXT                                              \
 118             : "=a" (last)                                                \
 119               __switch_canary_oparam                                     \
 120             : [next] "S" (next), [prev] "D" (prev),                      \
 121               [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
 122               [ti_flags] "i" (offsetof(struct thread_info, flags)),      \
 123               [_tif_fork] "i" (_TIF_FORK),                               \
 124               [thread_info] "i" (offsetof(struct task_struct, stack)),   \
 125               [current_task] "m" (current_task)                          \
 126               __switch_canary_iparam                                     \
 127             : "memory", "cc" __EXTRA_CLOBBER)
 128
 129#endif /* CONFIG_X86_32 */
 130
 131#endif /* _ASM_X86_SWITCH_TO_H */
 132