linux/arch/x86/include/asm/fpu/xstate.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_X86_XSAVE_H
   3#define __ASM_X86_XSAVE_H
   4
   5#include <linux/uaccess.h>
   6#include <linux/types.h>
   7
   8#include <asm/processor.h>
   9#include <asm/fpu/api.h>
  10#include <asm/user.h>
  11
  12/* Bit 63 of XCR0 is reserved for future expansion */
  13#define XFEATURE_MASK_EXTEND    (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
  14
  15#define XSTATE_CPUID            0x0000000d
  16
  17#define FXSAVE_SIZE     512
  18
  19#define XSAVE_HDR_SIZE      64
  20#define XSAVE_HDR_OFFSET    FXSAVE_SIZE
  21
  22#define XSAVE_YMM_SIZE      256
  23#define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
  24
  25#define XSAVE_ALIGNMENT     64
  26
  27/* All currently supported user features */
  28#define XFEATURE_MASK_USER_SUPPORTED (XFEATURE_MASK_FP | \
  29                                      XFEATURE_MASK_SSE | \
  30                                      XFEATURE_MASK_YMM | \
  31                                      XFEATURE_MASK_OPMASK | \
  32                                      XFEATURE_MASK_ZMM_Hi256 | \
  33                                      XFEATURE_MASK_Hi16_ZMM     | \
  34                                      XFEATURE_MASK_PKRU | \
  35                                      XFEATURE_MASK_BNDREGS | \
  36                                      XFEATURE_MASK_BNDCSR)
  37
  38/*
  39 * Features which are restored when returning to user space.
  40 * PKRU is not restored on return to user space because PKRU
  41 * is switched eagerly in switch_to() and flush_thread()
  42 */
  43#define XFEATURE_MASK_USER_RESTORE      \
  44        (XFEATURE_MASK_USER_SUPPORTED & ~XFEATURE_MASK_PKRU)
  45
  46/* All currently supported supervisor features */
  47#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID)
  48
  49/*
  50 * A supervisor state component may not always contain valuable information,
  51 * and its size may be huge. Saving/restoring such supervisor state components
  52 * at each context switch can cause high CPU and space overhead, which should
  53 * be avoided. Such supervisor state components should only be saved/restored
  54 * on demand. The on-demand supervisor features are set in this mask.
  55 *
  56 * Unlike the existing supported supervisor features, an independent supervisor
  57 * feature does not allocate a buffer in task->fpu, and the corresponding
  58 * supervisor state component cannot be saved/restored at each context switch.
  59 *
  60 * To support an independent supervisor feature, a developer should follow the
  61 * dos and don'ts as below:
  62 * - Do dynamically allocate a buffer for the supervisor state component.
  63 * - Do manually invoke the XSAVES/XRSTORS instruction to save/restore the
  64 *   state component to/from the buffer.
  65 * - Don't set the bit corresponding to the independent supervisor feature in
  66 *   IA32_XSS at run time, since it has been set at boot time.
  67 */
  68#define XFEATURE_MASK_INDEPENDENT (XFEATURE_MASK_LBR)
  69
  70/*
  71 * Unsupported supervisor features. When a supervisor feature in this mask is
  72 * supported in the future, move it to the supported supervisor feature mask.
  73 */
  74#define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT)
  75
  76/* All supervisor states including supported and unsupported states. */
  77#define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \
  78                                      XFEATURE_MASK_INDEPENDENT | \
  79                                      XFEATURE_MASK_SUPERVISOR_UNSUPPORTED)
  80
  81#ifdef CONFIG_X86_64
  82#define REX_PREFIX      "0x48, "
  83#else
  84#define REX_PREFIX
  85#endif
  86
  87extern u64 xfeatures_mask_all;
  88
  89static inline u64 xfeatures_mask_supervisor(void)
  90{
  91        return xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
  92}
  93
  94/*
  95 * The xfeatures which are enabled in XCR0 and expected to be in ptrace
  96 * buffers and signal frames.
  97 */
  98static inline u64 xfeatures_mask_uabi(void)
  99{
 100        return xfeatures_mask_all & XFEATURE_MASK_USER_SUPPORTED;
 101}
 102
 103/*
 104 * The xfeatures which are restored by the kernel when returning to user
 105 * mode. This is not necessarily the same as xfeatures_mask_uabi() as the
 106 * kernel does not manage all XCR0 enabled features via xsave/xrstor as
 107 * some of them have to be switched eagerly on context switch and exec().
 108 */
 109static inline u64 xfeatures_mask_restore_user(void)
 110{
 111        return xfeatures_mask_all & XFEATURE_MASK_USER_RESTORE;
 112}
 113
 114/*
 115 * Like xfeatures_mask_restore_user() but additionally restors the
 116 * supported supervisor states.
 117 */
 118static inline u64 xfeatures_mask_fpstate(void)
 119{
 120        return xfeatures_mask_all & \
 121                (XFEATURE_MASK_USER_RESTORE | XFEATURE_MASK_SUPERVISOR_SUPPORTED);
 122}
 123
 124static inline u64 xfeatures_mask_independent(void)
 125{
 126        if (!boot_cpu_has(X86_FEATURE_ARCH_LBR))
 127                return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
 128
 129        return XFEATURE_MASK_INDEPENDENT;
 130}
 131
 132extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
 133
 134extern void __init update_regset_xstate_info(unsigned int size,
 135                                             u64 xstate_mask);
 136
 137void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
 138int xfeature_size(int xfeature_nr);
 139int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
 140int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
 141
 142void xsaves(struct xregs_state *xsave, u64 mask);
 143void xrstors(struct xregs_state *xsave, u64 mask);
 144
 145enum xstate_copy_mode {
 146        XSTATE_COPY_FP,
 147        XSTATE_COPY_FX,
 148        XSTATE_COPY_XSAVE,
 149};
 150
 151struct membuf;
 152void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
 153                             enum xstate_copy_mode mode);
 154
 155#endif
 156