linux/arch/x86/include/asm/fpu/internal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 1994 Linus Torvalds
   4 *
   5 * Pentium III FXSR, SSE support
   6 * General FPU state handling cleanups
   7 *      Gareth Hughes <gareth@valinux.com>, May 2000
   8 * x86-64 work by Andi Kleen 2002
   9 */
  10
  11#ifndef _ASM_X86_FPU_INTERNAL_H
  12#define _ASM_X86_FPU_INTERNAL_H
  13
  14#include <linux/compat.h>
  15#include <linux/sched.h>
  16#include <linux/slab.h>
  17#include <linux/mm.h>
  18
  19#include <asm/user.h>
  20#include <asm/fpu/api.h>
  21#include <asm/fpu/xstate.h>
  22#include <asm/fpu/xcr.h>
  23#include <asm/cpufeature.h>
  24#include <asm/trace/fpu.h>
  25
  26/*
  27 * High level FPU state handling functions:
  28 */
  29extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
  30extern void fpu__drop(struct fpu *fpu);
  31extern void fpu__clear_user_states(struct fpu *fpu);
  32extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
  33
  34extern void fpu_sync_fpstate(struct fpu *fpu);
  35
  36/* Clone and exit operations */
  37extern int  fpu_clone(struct task_struct *dst);
  38extern void fpu_flush_thread(void);
  39
  40/*
  41 * Boot time FPU initialization functions:
  42 */
  43extern void fpu__init_cpu(void);
  44extern void fpu__init_system_xstate(void);
  45extern void fpu__init_cpu_xstate(void);
  46extern void fpu__init_system(struct cpuinfo_x86 *c);
  47extern void fpu__init_check_bugs(void);
  48extern void fpu__resume_cpu(void);
  49
  50/*
  51 * Debugging facility:
  52 */
  53#ifdef CONFIG_X86_DEBUG_FPU
  54# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
  55#else
  56# define WARN_ON_FPU(x) ({ (void)(x); 0; })
  57#endif
  58
  59/*
  60 * FPU related CPU feature flag helper routines:
  61 */
  62static __always_inline __pure bool use_xsaveopt(void)
  63{
  64        return static_cpu_has(X86_FEATURE_XSAVEOPT);
  65}
  66
  67static __always_inline __pure bool use_xsave(void)
  68{
  69        return static_cpu_has(X86_FEATURE_XSAVE);
  70}
  71
  72static __always_inline __pure bool use_fxsr(void)
  73{
  74        return static_cpu_has(X86_FEATURE_FXSR);
  75}
  76
  77/*
  78 * fpstate handling functions:
  79 */
  80
  81extern union fpregs_state init_fpstate;
  82
  83extern void fpstate_init(union fpregs_state *state);
  84#ifdef CONFIG_MATH_EMULATION
  85extern void fpstate_init_soft(struct swregs_state *soft);
  86#else
  87static inline void fpstate_init_soft(struct swregs_state *soft) {}
  88#endif
  89extern void save_fpregs_to_fpstate(struct fpu *fpu);
  90
  91/* Returns 0 or the negated trap number, which results in -EFAULT for #PF */
  92#define user_insn(insn, output, input...)                               \
  93({                                                                      \
  94        int err;                                                        \
  95                                                                        \
  96        might_fault();                                                  \
  97                                                                        \
  98        asm volatile(ASM_STAC "\n"                                      \
  99                     "1: " #insn "\n"                                   \
 100                     "2: " ASM_CLAC "\n"                                \
 101                     ".section .fixup,\"ax\"\n"                         \
 102                     "3:  negl %%eax\n"                                 \
 103                     "    jmp  2b\n"                                    \
 104                     ".previous\n"                                      \
 105                     _ASM_EXTABLE_FAULT(1b, 3b)                         \
 106                     : [err] "=a" (err), output                         \
 107                     : "0"(0), input);                                  \
 108        err;                                                            \
 109})
 110
 111#define kernel_insn_err(insn, output, input...)                         \
 112({                                                                      \
 113        int err;                                                        \
 114        asm volatile("1:" #insn "\n\t"                                  \
 115                     "2:\n"                                             \
 116                     ".section .fixup,\"ax\"\n"                         \
 117                     "3:  movl $-1,%[err]\n"                            \
 118                     "    jmp  2b\n"                                    \
 119                     ".previous\n"                                      \
 120                     _ASM_EXTABLE(1b, 3b)                               \
 121                     : [err] "=r" (err), output                         \
 122                     : "0"(0), input);                                  \
 123        err;                                                            \
 124})
 125
 126#define kernel_insn(insn, output, input...)                             \
 127        asm volatile("1:" #insn "\n\t"                                  \
 128                     "2:\n"                                             \
 129                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore)  \
 130                     : output : input)
 131
 132static inline int fnsave_to_user_sigframe(struct fregs_state __user *fx)
 133{
 134        return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
 135}
 136
 137static inline int fxsave_to_user_sigframe(struct fxregs_state __user *fx)
 138{
 139        if (IS_ENABLED(CONFIG_X86_32))
 140                return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
 141        else
 142                return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
 143
 144}
 145
 146static inline void fxrstor(struct fxregs_state *fx)
 147{
 148        if (IS_ENABLED(CONFIG_X86_32))
 149                kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 150        else
 151                kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
 152}
 153
 154static inline int fxrstor_safe(struct fxregs_state *fx)
 155{
 156        if (IS_ENABLED(CONFIG_X86_32))
 157                return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 158        else
 159                return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
 160}
 161
 162static inline int fxrstor_from_user_sigframe(struct fxregs_state __user *fx)
 163{
 164        if (IS_ENABLED(CONFIG_X86_32))
 165                return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 166        else
 167                return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
 168}
 169
 170static inline void frstor(struct fregs_state *fx)
 171{
 172        kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 173}
 174
 175static inline int frstor_safe(struct fregs_state *fx)
 176{
 177        return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 178}
 179
 180static inline int frstor_from_user_sigframe(struct fregs_state __user *fx)
 181{
 182        return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 183}
 184
 185static inline void fxsave(struct fxregs_state *fx)
 186{
 187        if (IS_ENABLED(CONFIG_X86_32))
 188                asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
 189        else
 190                asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
 191}
 192
 193/* These macros all use (%edi)/(%rdi) as the single memory argument. */
 194#define XSAVE           ".byte " REX_PREFIX "0x0f,0xae,0x27"
 195#define XSAVEOPT        ".byte " REX_PREFIX "0x0f,0xae,0x37"
 196#define XSAVES          ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
 197#define XRSTOR          ".byte " REX_PREFIX "0x0f,0xae,0x2f"
 198#define XRSTORS         ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
 199
 200/*
 201 * After this @err contains 0 on success or the negated trap number when
 202 * the operation raises an exception. For faults this results in -EFAULT.
 203 */
 204#define XSTATE_OP(op, st, lmask, hmask, err)                            \
 205        asm volatile("1:" op "\n\t"                                     \
 206                     "xor %[err], %[err]\n"                             \
 207                     "2:\n\t"                                           \
 208                     ".pushsection .fixup,\"ax\"\n\t"                   \
 209                     "3: negl %%eax\n\t"                                \
 210                     "jmp 2b\n\t"                                       \
 211                     ".popsection\n\t"                                  \
 212                     _ASM_EXTABLE_FAULT(1b, 3b)                         \
 213                     : [err] "=a" (err)                                 \
 214                     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
 215                     : "memory")
 216
 217/*
 218 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
 219 * format and supervisor states in addition to modified optimization in
 220 * XSAVEOPT.
 221 *
 222 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
 223 * supports modified optimization which is not supported by XSAVE.
 224 *
 225 * We use XSAVE as a fallback.
 226 *
 227 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
 228 * original instruction which gets replaced. We need to use it here as the
 229 * address of the instruction where we might get an exception at.
 230 */
 231#define XSTATE_XSAVE(st, lmask, hmask, err)                             \
 232        asm volatile(ALTERNATIVE_2(XSAVE,                               \
 233                                   XSAVEOPT, X86_FEATURE_XSAVEOPT,      \
 234                                   XSAVES,   X86_FEATURE_XSAVES)        \
 235                     "\n"                                               \
 236                     "xor %[err], %[err]\n"                             \
 237                     "3:\n"                                             \
 238                     ".pushsection .fixup,\"ax\"\n"                     \
 239                     "4: movl $-2, %[err]\n"                            \
 240                     "jmp 3b\n"                                         \
 241                     ".popsection\n"                                    \
 242                     _ASM_EXTABLE(661b, 4b)                             \
 243                     : [err] "=r" (err)                                 \
 244                     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
 245                     : "memory")
 246
 247/*
 248 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
 249 * XSAVE area format.
 250 */
 251#define XSTATE_XRESTORE(st, lmask, hmask)                               \
 252        asm volatile(ALTERNATIVE(XRSTOR,                                \
 253                                 XRSTORS, X86_FEATURE_XSAVES)           \
 254                     "\n"                                               \
 255                     "3:\n"                                             \
 256                     _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
 257                     :                                                  \
 258                     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
 259                     : "memory")
 260
 261/*
 262 * This function is called only during boot time when x86 caps are not set
 263 * up and alternative can not be used yet.
 264 */
 265static inline void os_xrstor_booting(struct xregs_state *xstate)
 266{
 267        u64 mask = xfeatures_mask_fpstate();
 268        u32 lmask = mask;
 269        u32 hmask = mask >> 32;
 270        int err;
 271
 272        WARN_ON(system_state != SYSTEM_BOOTING);
 273
 274        if (boot_cpu_has(X86_FEATURE_XSAVES))
 275                XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
 276        else
 277                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
 278
 279        /*
 280         * We should never fault when copying from a kernel buffer, and the FPU
 281         * state we set at boot time should be valid.
 282         */
 283        WARN_ON_FPU(err);
 284}
 285
 286/*
 287 * Save processor xstate to xsave area.
 288 *
 289 * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
 290 * and command line options. The choice is permanent until the next reboot.
 291 */
 292static inline void os_xsave(struct xregs_state *xstate)
 293{
 294        u64 mask = xfeatures_mask_all;
 295        u32 lmask = mask;
 296        u32 hmask = mask >> 32;
 297        int err;
 298
 299        WARN_ON_FPU(!alternatives_patched);
 300
 301        XSTATE_XSAVE(xstate, lmask, hmask, err);
 302
 303        /* We should never fault when copying to a kernel buffer: */
 304        WARN_ON_FPU(err);
 305}
 306
 307/*
 308 * Restore processor xstate from xsave area.
 309 *
 310 * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
 311 */
 312static inline void os_xrstor(struct xregs_state *xstate, u64 mask)
 313{
 314        u32 lmask = mask;
 315        u32 hmask = mask >> 32;
 316
 317        XSTATE_XRESTORE(xstate, lmask, hmask);
 318}
 319
 320/*
 321 * Save xstate to user space xsave area.
 322 *
 323 * We don't use modified optimization because xrstor/xrstors might track
 324 * a different application.
 325 *
 326 * We don't use compacted format xsave area for
 327 * backward compatibility for old applications which don't understand
 328 * compacted format of xsave area.
 329 */
 330static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
 331{
 332        /*
 333         * Include the features which are not xsaved/rstored by the kernel
 334         * internally, e.g. PKRU. That's user space ABI and also required
 335         * to allow the signal handler to modify PKRU.
 336         */
 337        u64 mask = xfeatures_mask_uabi();
 338        u32 lmask = mask;
 339        u32 hmask = mask >> 32;
 340        int err;
 341
 342        /*
 343         * Clear the xsave header first, so that reserved fields are
 344         * initialized to zero.
 345         */
 346        err = __clear_user(&buf->header, sizeof(buf->header));
 347        if (unlikely(err))
 348                return -EFAULT;
 349
 350        stac();
 351        XSTATE_OP(XSAVE, buf, lmask, hmask, err);
 352        clac();
 353
 354        return err;
 355}
 356
 357/*
 358 * Restore xstate from user space xsave area.
 359 */
 360static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
 361{
 362        struct xregs_state *xstate = ((__force struct xregs_state *)buf);
 363        u32 lmask = mask;
 364        u32 hmask = mask >> 32;
 365        int err;
 366
 367        stac();
 368        XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
 369        clac();
 370
 371        return err;
 372}
 373
 374/*
 375 * Restore xstate from kernel space xsave area, return an error code instead of
 376 * an exception.
 377 */
 378static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask)
 379{
 380        u32 lmask = mask;
 381        u32 hmask = mask >> 32;
 382        int err;
 383
 384        if (cpu_feature_enabled(X86_FEATURE_XSAVES))
 385                XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
 386        else
 387                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
 388
 389        return err;
 390}
 391
 392extern void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask);
 393
 394static inline void restore_fpregs_from_fpstate(union fpregs_state *fpstate)
 395{
 396        __restore_fpregs_from_fpstate(fpstate, xfeatures_mask_fpstate());
 397}
 398
 399extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
 400
 401/*
 402 * FPU context switch related helper methods:
 403 */
 404
 405DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
 406
 407/*
 408 * The in-register FPU state for an FPU context on a CPU is assumed to be
 409 * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
 410 * matches the FPU.
 411 *
 412 * If the FPU register state is valid, the kernel can skip restoring the
 413 * FPU state from memory.
 414 *
 415 * Any code that clobbers the FPU registers or updates the in-memory
 416 * FPU state for a task MUST let the rest of the kernel know that the
 417 * FPU registers are no longer valid for this task.
 418 *
 419 * Either one of these invalidation functions is enough. Invalidate
 420 * a resource you control: CPU if using the CPU for something else
 421 * (with preemption disabled), FPU for the current task, or a task that
 422 * is prevented from running by the current task.
 423 */
 424static inline void __cpu_invalidate_fpregs_state(void)
 425{
 426        __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
 427}
 428
 429static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
 430{
 431        fpu->last_cpu = -1;
 432}
 433
 434static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
 435{
 436        return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
 437}
 438
 439/*
 440 * These generally need preemption protection to work,
 441 * do try to avoid using these on their own:
 442 */
 443static inline void fpregs_deactivate(struct fpu *fpu)
 444{
 445        this_cpu_write(fpu_fpregs_owner_ctx, NULL);
 446        trace_x86_fpu_regs_deactivated(fpu);
 447}
 448
 449static inline void fpregs_activate(struct fpu *fpu)
 450{
 451        this_cpu_write(fpu_fpregs_owner_ctx, fpu);
 452        trace_x86_fpu_regs_activated(fpu);
 453}
 454
 455/* Internal helper for switch_fpu_return() and signal frame setup */
 456static inline void fpregs_restore_userregs(void)
 457{
 458        struct fpu *fpu = &current->thread.fpu;
 459        int cpu = smp_processor_id();
 460
 461        if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
 462                return;
 463
 464        if (!fpregs_state_valid(fpu, cpu)) {
 465                u64 mask;
 466
 467                /*
 468                 * This restores _all_ xstate which has not been
 469                 * established yet.
 470                 *
 471                 * If PKRU is enabled, then the PKRU value is already
 472                 * correct because it was either set in switch_to() or in
 473                 * flush_thread(). So it is excluded because it might be
 474                 * not up to date in current->thread.fpu.xsave state.
 475                 */
 476                mask = xfeatures_mask_restore_user() |
 477                        xfeatures_mask_supervisor();
 478                __restore_fpregs_from_fpstate(&fpu->state, mask);
 479
 480                fpregs_activate(fpu);
 481                fpu->last_cpu = cpu;
 482        }
 483        clear_thread_flag(TIF_NEED_FPU_LOAD);
 484}
 485
 486/*
 487 * FPU state switching for scheduling.
 488 *
 489 * This is a two-stage process:
 490 *
 491 *  - switch_fpu_prepare() saves the old state.
 492 *    This is done within the context of the old process.
 493 *
 494 *  - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
 495 *    will get loaded on return to userspace, or when the kernel needs it.
 496 *
 497 * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
 498 * are saved in the current thread's FPU register state.
 499 *
 500 * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
 501 * hold current()'s FPU registers. It is required to load the
 502 * registers before returning to userland or using the content
 503 * otherwise.
 504 *
 505 * The FPU context is only stored/restored for a user task and
 506 * PF_KTHREAD is used to distinguish between kernel and user threads.
 507 */
 508static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 509{
 510        if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
 511                save_fpregs_to_fpstate(old_fpu);
 512                /*
 513                 * The save operation preserved register state, so the
 514                 * fpu_fpregs_owner_ctx is still @old_fpu. Store the
 515                 * current CPU number in @old_fpu, so the next return
 516                 * to user space can avoid the FPU register restore
 517                 * when is returns on the same CPU and still owns the
 518                 * context.
 519                 */
 520                old_fpu->last_cpu = cpu;
 521
 522                trace_x86_fpu_regs_deactivated(old_fpu);
 523        }
 524}
 525
 526/*
 527 * Misc helper functions:
 528 */
 529
 530/*
 531 * Delay loading of the complete FPU state until the return to userland.
 532 * PKRU is handled separately.
 533 */
 534static inline void switch_fpu_finish(struct fpu *new_fpu)
 535{
 536        if (cpu_feature_enabled(X86_FEATURE_FPU))
 537                set_thread_flag(TIF_NEED_FPU_LOAD);
 538}
 539
 540#endif /* _ASM_X86_FPU_INTERNAL_H */
 541