linux/arch/x86/include/asm/xsave.h
<<
>>
Prefs
   1#ifndef __ASM_X86_XSAVE_H
   2#define __ASM_X86_XSAVE_H
   3
   4#include <linux/types.h>
   5#include <asm/processor.h>
   6
   7#define XSTATE_CPUID            0x0000000d
   8
   9#define XSTATE_FP       0x1
  10#define XSTATE_SSE      0x2
  11#define XSTATE_YMM      0x4
  12
  13#define XSTATE_FPSSE    (XSTATE_FP | XSTATE_SSE)
  14
  15#define FXSAVE_SIZE     512
  16
  17#define XSAVE_HDR_SIZE      64
  18#define XSAVE_HDR_OFFSET    FXSAVE_SIZE
  19
  20#define XSAVE_YMM_SIZE      256
  21#define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
  22
  23/*
  24 * These are the features that the OS can handle currently.
  25 */
  26#define XCNTXT_MASK     (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
  27
  28#ifdef CONFIG_X86_64
  29#define REX_PREFIX      "0x48, "
  30#else
  31#define REX_PREFIX
  32#endif
  33
  34extern unsigned int xstate_size;
  35extern u64 pcntxt_mask;
  36extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
  37
  38extern void xsave_init(void);
  39extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
  40extern int init_fpu(struct task_struct *child);
  41extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
  42                            void __user *fpstate,
  43                            struct _fpx_sw_bytes *sw);
  44
  45static inline int fpu_xrstor_checking(struct fpu *fpu)
  46{
  47        struct xsave_struct *fx = &fpu->state->xsave;
  48        int err;
  49
  50        asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
  51                     "2:\n"
  52                     ".section .fixup,\"ax\"\n"
  53                     "3:  movl $-1,%[err]\n"
  54                     "    jmp  2b\n"
  55                     ".previous\n"
  56                     _ASM_EXTABLE(1b, 3b)
  57                     : [err] "=r" (err)
  58                     : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
  59                     : "memory");
  60
  61        return err;
  62}
  63
  64static inline int xsave_user(struct xsave_struct __user *buf)
  65{
  66        int err;
  67
  68        /*
  69         * Clear the xsave header first, so that reserved fields are
  70         * initialized to zero.
  71         */
  72        err = __clear_user(&buf->xsave_hdr,
  73                           sizeof(struct xsave_hdr_struct));
  74        if (unlikely(err))
  75                return -EFAULT;
  76
  77        __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
  78                             "2:\n"
  79                             ".section .fixup,\"ax\"\n"
  80                             "3:  movl $-1,%[err]\n"
  81                             "    jmp  2b\n"
  82                             ".previous\n"
  83                             ".section __ex_table,\"a\"\n"
  84                             _ASM_ALIGN "\n"
  85                             _ASM_PTR "1b,3b\n"
  86                             ".previous"
  87                             : [err] "=r" (err)
  88                             : "D" (buf), "a" (-1), "d" (-1), "0" (0)
  89                             : "memory");
  90        if (unlikely(err) && __clear_user(buf, xstate_size))
  91                err = -EFAULT;
  92        /* No need to clear here because the caller clears USED_MATH */
  93        return err;
  94}
  95
  96static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
  97{
  98        int err;
  99        struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
 100        u32 lmask = mask;
 101        u32 hmask = mask >> 32;
 102
 103        __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
 104                             "2:\n"
 105                             ".section .fixup,\"ax\"\n"
 106                             "3:  movl $-1,%[err]\n"
 107                             "    jmp  2b\n"
 108                             ".previous\n"
 109                             ".section __ex_table,\"a\"\n"
 110                             _ASM_ALIGN "\n"
 111                             _ASM_PTR "1b,3b\n"
 112                             ".previous"
 113                             : [err] "=r" (err)
 114                             : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
 115                             : "memory");       /* memory required? */
 116        return err;
 117}
 118
 119static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
 120{
 121        u32 lmask = mask;
 122        u32 hmask = mask >> 32;
 123
 124        asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
 125                     : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
 126                     :   "memory");
 127}
 128
 129static inline void xsave_state(struct xsave_struct *fx, u64 mask)
 130{
 131        u32 lmask = mask;
 132        u32 hmask = mask >> 32;
 133
 134        asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
 135                     : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
 136                     :   "memory");
 137}
 138
 139static inline void fpu_xsave(struct fpu *fpu)
 140{
 141        /* This, however, we can work around by forcing the compiler to select
 142           an addressing mode that doesn't require extended registers. */
 143        alternative_input(
 144                ".byte " REX_PREFIX "0x0f,0xae,0x27",
 145                ".byte " REX_PREFIX "0x0f,0xae,0x37",
 146                X86_FEATURE_XSAVEOPT,
 147                [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) :
 148                "memory");
 149}
 150#endif
 151