linux/arch/arm64/kernel/fpsimd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * FP/SIMD context switching and fault handling
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 * Author: Catalin Marinas <catalin.marinas@arm.com>
   7 */
   8
   9#include <linux/bitmap.h>
  10#include <linux/bitops.h>
  11#include <linux/bottom_half.h>
  12#include <linux/bug.h>
  13#include <linux/cache.h>
  14#include <linux/compat.h>
  15#include <linux/compiler.h>
  16#include <linux/cpu.h>
  17#include <linux/cpu_pm.h>
  18#include <linux/kernel.h>
  19#include <linux/linkage.h>
  20#include <linux/irqflags.h>
  21#include <linux/init.h>
  22#include <linux/percpu.h>
  23#include <linux/prctl.h>
  24#include <linux/preempt.h>
  25#include <linux/ptrace.h>
  26#include <linux/sched/signal.h>
  27#include <linux/sched/task_stack.h>
  28#include <linux/signal.h>
  29#include <linux/slab.h>
  30#include <linux/stddef.h>
  31#include <linux/sysctl.h>
  32#include <linux/swab.h>
  33
  34#include <asm/esr.h>
  35#include <asm/exception.h>
  36#include <asm/fpsimd.h>
  37#include <asm/cpufeature.h>
  38#include <asm/cputype.h>
  39#include <asm/neon.h>
  40#include <asm/processor.h>
  41#include <asm/simd.h>
  42#include <asm/sigcontext.h>
  43#include <asm/sysreg.h>
  44#include <asm/traps.h>
  45#include <asm/virt.h>
  46
  47#define FPEXC_IOF       (1 << 0)
  48#define FPEXC_DZF       (1 << 1)
  49#define FPEXC_OFF       (1 << 2)
  50#define FPEXC_UFF       (1 << 3)
  51#define FPEXC_IXF       (1 << 4)
  52#define FPEXC_IDF       (1 << 7)
  53
  54/*
  55 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
  56 *
  57 * In order to reduce the number of times the FPSIMD state is needlessly saved
  58 * and restored, we need to keep track of two things:
  59 * (a) for each task, we need to remember which CPU was the last one to have
  60 *     the task's FPSIMD state loaded into its FPSIMD registers;
  61 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
  62 *     been loaded into its FPSIMD registers most recently, or whether it has
  63 *     been used to perform kernel mode NEON in the meantime.
  64 *
  65 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
  66 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
  67 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
  68 * address of the userland FPSIMD state of the task that was loaded onto the CPU
  69 * the most recently, or NULL if kernel mode NEON has been performed after that.
  70 *
  71 * With this in place, we no longer have to restore the next FPSIMD state right
  72 * when switching between tasks. Instead, we can defer this check to userland
  73 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
  74 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
  75 * can omit the FPSIMD restore.
  76 *
  77 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
  78 * indicate whether or not the userland FPSIMD state of the current task is
  79 * present in the registers. The flag is set unless the FPSIMD registers of this
  80 * CPU currently contain the most recent userland FPSIMD state of the current
  81 * task.
  82 *
  83 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
  84 * save the task's FPSIMD context back to task_struct from softirq context.
  85 * To prevent this from racing with the manipulation of the task's FPSIMD state
  86 * from task context and thereby corrupting the state, it is necessary to
  87 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
  88 * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
  89 * run but prevent them to use FPSIMD.
  90 *
  91 * For a certain task, the sequence may look something like this:
  92 * - the task gets scheduled in; if both the task's fpsimd_cpu field
  93 *   contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
  94 *   variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
  95 *   cleared, otherwise it is set;
  96 *
  97 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
  98 *   userland FPSIMD state is copied from memory to the registers, the task's
  99 *   fpsimd_cpu field is set to the id of the current CPU, the current
 100 *   CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
 101 *   TIF_FOREIGN_FPSTATE flag is cleared;
 102 *
 103 * - the task executes an ordinary syscall; upon return to userland, the
 104 *   TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
 105 *   restored;
 106 *
 107 * - the task executes a syscall which executes some NEON instructions; this is
 108 *   preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
 109 *   register contents to memory, clears the fpsimd_last_state per-cpu variable
 110 *   and sets the TIF_FOREIGN_FPSTATE flag;
 111 *
 112 * - the task gets preempted after kernel_neon_end() is called; as we have not
 113 *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
 114 *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
 115 */
 116struct fpsimd_last_state_struct {
 117        struct user_fpsimd_state *st;
 118        void *sve_state;
 119        unsigned int sve_vl;
 120};
 121
 122static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
 123
 124/* Default VL for tasks that don't set it explicitly: */
 125static int __sve_default_vl = -1;
 126
 127static int get_sve_default_vl(void)
 128{
 129        return READ_ONCE(__sve_default_vl);
 130}
 131
 132#ifdef CONFIG_ARM64_SVE
 133
 134static void set_sve_default_vl(int val)
 135{
 136        WRITE_ONCE(__sve_default_vl, val);
 137}
 138
 139/* Maximum supported vector length across all CPUs (initially poisoned) */
 140int __ro_after_init sve_max_vl = SVE_VL_MIN;
 141int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
 142
 143/*
 144 * Set of available vector lengths,
 145 * where length vq encoded as bit __vq_to_bit(vq):
 146 */
 147__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
 148/* Set of vector lengths present on at least one cpu: */
 149static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
 150
 151static void __percpu *efi_sve_state;
 152
 153#else /* ! CONFIG_ARM64_SVE */
 154
 155/* Dummy declaration for code that will be optimised out: */
 156extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
 157extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
 158extern void __percpu *efi_sve_state;
 159
 160#endif /* ! CONFIG_ARM64_SVE */
 161
 162DEFINE_PER_CPU(bool, fpsimd_context_busy);
 163EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
 164
 165static void fpsimd_bind_task_to_cpu(void);
 166
 167static void __get_cpu_fpsimd_context(void)
 168{
 169        bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
 170
 171        WARN_ON(busy);
 172}
 173
 174/*
 175 * Claim ownership of the CPU FPSIMD context for use by the calling context.
 176 *
 177 * The caller may freely manipulate the FPSIMD context metadata until
 178 * put_cpu_fpsimd_context() is called.
 179 *
 180 * The double-underscore version must only be called if you know the task
 181 * can't be preempted.
 182 */
 183static void get_cpu_fpsimd_context(void)
 184{
 185        local_bh_disable();
 186        __get_cpu_fpsimd_context();
 187}
 188
 189static void __put_cpu_fpsimd_context(void)
 190{
 191        bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
 192
 193        WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
 194}
 195
 196/*
 197 * Release the CPU FPSIMD context.
 198 *
 199 * Must be called from a context in which get_cpu_fpsimd_context() was
 200 * previously called, with no call to put_cpu_fpsimd_context() in the
 201 * meantime.
 202 */
 203static void put_cpu_fpsimd_context(void)
 204{
 205        __put_cpu_fpsimd_context();
 206        local_bh_enable();
 207}
 208
 209static bool have_cpu_fpsimd_context(void)
 210{
 211        return !preemptible() && __this_cpu_read(fpsimd_context_busy);
 212}
 213
 214/*
 215 * Call __sve_free() directly only if you know task can't be scheduled
 216 * or preempted.
 217 */
 218static void __sve_free(struct task_struct *task)
 219{
 220        kfree(task->thread.sve_state);
 221        task->thread.sve_state = NULL;
 222}
 223
 224static void sve_free(struct task_struct *task)
 225{
 226        WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
 227
 228        __sve_free(task);
 229}
 230
 231/*
 232 * TIF_SVE controls whether a task can use SVE without trapping while
 233 * in userspace, and also the way a task's FPSIMD/SVE state is stored
 234 * in thread_struct.
 235 *
 236 * The kernel uses this flag to track whether a user task is actively
 237 * using SVE, and therefore whether full SVE register state needs to
 238 * be tracked.  If not, the cheaper FPSIMD context handling code can
 239 * be used instead of the more costly SVE equivalents.
 240 *
 241 *  * TIF_SVE set:
 242 *
 243 *    The task can execute SVE instructions while in userspace without
 244 *    trapping to the kernel.
 245 *
 246 *    When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
 247 *    corresponding Zn), P0-P15 and FFR are encoded in in
 248 *    task->thread.sve_state, formatted appropriately for vector
 249 *    length task->thread.sve_vl.
 250 *
 251 *    task->thread.sve_state must point to a valid buffer at least
 252 *    sve_state_size(task) bytes in size.
 253 *
 254 *    During any syscall, the kernel may optionally clear TIF_SVE and
 255 *    discard the vector state except for the FPSIMD subset.
 256 *
 257 *  * TIF_SVE clear:
 258 *
 259 *    An attempt by the user task to execute an SVE instruction causes
 260 *    do_sve_acc() to be called, which does some preparation and then
 261 *    sets TIF_SVE.
 262 *
 263 *    When stored, FPSIMD registers V0-V31 are encoded in
 264 *    task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
 265 *    logically zero but not stored anywhere; P0-P15 and FFR are not
 266 *    stored and have unspecified values from userspace's point of
 267 *    view.  For hygiene purposes, the kernel zeroes them on next use,
 268 *    but userspace is discouraged from relying on this.
 269 *
 270 *    task->thread.sve_state does not need to be non-NULL, valid or any
 271 *    particular size: it must not be dereferenced.
 272 *
 273 *  * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
 274 *    irrespective of whether TIF_SVE is clear or set, since these are
 275 *    not vector length dependent.
 276 */
 277
 278/*
 279 * Update current's FPSIMD/SVE registers from thread_struct.
 280 *
 281 * This function should be called only when the FPSIMD/SVE state in
 282 * thread_struct is known to be up to date, when preparing to enter
 283 * userspace.
 284 */
 285static void task_fpsimd_load(void)
 286{
 287        WARN_ON(!system_supports_fpsimd());
 288        WARN_ON(!have_cpu_fpsimd_context());
 289
 290        if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE))
 291                sve_load_state(sve_pffr(&current->thread),
 292                               &current->thread.uw.fpsimd_state.fpsr,
 293                               sve_vq_from_vl(current->thread.sve_vl) - 1);
 294        else
 295                fpsimd_load_state(&current->thread.uw.fpsimd_state);
 296}
 297
 298/*
 299 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
 300 * date with respect to the CPU registers.
 301 */
 302static void fpsimd_save(void)
 303{
 304        struct fpsimd_last_state_struct const *last =
 305                this_cpu_ptr(&fpsimd_last_state);
 306        /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
 307
 308        WARN_ON(!system_supports_fpsimd());
 309        WARN_ON(!have_cpu_fpsimd_context());
 310
 311        if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
 312                if (IS_ENABLED(CONFIG_ARM64_SVE) &&
 313                    test_thread_flag(TIF_SVE)) {
 314                        if (WARN_ON(sve_get_vl() != last->sve_vl)) {
 315                                /*
 316                                 * Can't save the user regs, so current would
 317                                 * re-enter user with corrupt state.
 318                                 * There's no way to recover, so kill it:
 319                                 */
 320                                force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
 321                                return;
 322                        }
 323
 324                        sve_save_state((char *)last->sve_state +
 325                                                sve_ffr_offset(last->sve_vl),
 326                                       &last->st->fpsr);
 327                } else
 328                        fpsimd_save_state(last->st);
 329        }
 330}
 331
 332/*
 333 * All vector length selection from userspace comes through here.
 334 * We're on a slow path, so some sanity-checks are included.
 335 * If things go wrong there's a bug somewhere, but try to fall back to a
 336 * safe choice.
 337 */
 338static unsigned int find_supported_vector_length(unsigned int vl)
 339{
 340        int bit;
 341        int max_vl = sve_max_vl;
 342
 343        if (WARN_ON(!sve_vl_valid(vl)))
 344                vl = SVE_VL_MIN;
 345
 346        if (WARN_ON(!sve_vl_valid(max_vl)))
 347                max_vl = SVE_VL_MIN;
 348
 349        if (vl > max_vl)
 350                vl = max_vl;
 351
 352        bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
 353                            __vq_to_bit(sve_vq_from_vl(vl)));
 354        return sve_vl_from_vq(__bit_to_vq(bit));
 355}
 356
 357#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
 358
 359static int sve_proc_do_default_vl(struct ctl_table *table, int write,
 360                                  void *buffer, size_t *lenp, loff_t *ppos)
 361{
 362        int ret;
 363        int vl = get_sve_default_vl();
 364        struct ctl_table tmp_table = {
 365                .data = &vl,
 366                .maxlen = sizeof(vl),
 367        };
 368
 369        ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
 370        if (ret || !write)
 371                return ret;
 372
 373        /* Writing -1 has the special meaning "set to max": */
 374        if (vl == -1)
 375                vl = sve_max_vl;
 376
 377        if (!sve_vl_valid(vl))
 378                return -EINVAL;
 379
 380        set_sve_default_vl(find_supported_vector_length(vl));
 381        return 0;
 382}
 383
 384static struct ctl_table sve_default_vl_table[] = {
 385        {
 386                .procname       = "sve_default_vector_length",
 387                .mode           = 0644,
 388                .proc_handler   = sve_proc_do_default_vl,
 389        },
 390        { }
 391};
 392
 393static int __init sve_sysctl_init(void)
 394{
 395        if (system_supports_sve())
 396                if (!register_sysctl("abi", sve_default_vl_table))
 397                        return -EINVAL;
 398
 399        return 0;
 400}
 401
 402#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
 403static int __init sve_sysctl_init(void) { return 0; }
 404#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
 405
 406#define ZREG(sve_state, vq, n) ((char *)(sve_state) +           \
 407        (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
 408
 409#ifdef CONFIG_CPU_BIG_ENDIAN
 410static __uint128_t arm64_cpu_to_le128(__uint128_t x)
 411{
 412        u64 a = swab64(x);
 413        u64 b = swab64(x >> 64);
 414
 415        return ((__uint128_t)a << 64) | b;
 416}
 417#else
 418static __uint128_t arm64_cpu_to_le128(__uint128_t x)
 419{
 420        return x;
 421}
 422#endif
 423
 424#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
 425
 426static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
 427                            unsigned int vq)
 428{
 429        unsigned int i;
 430        __uint128_t *p;
 431
 432        for (i = 0; i < SVE_NUM_ZREGS; ++i) {
 433                p = (__uint128_t *)ZREG(sst, vq, i);
 434                *p = arm64_cpu_to_le128(fst->vregs[i]);
 435        }
 436}
 437
 438/*
 439 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
 440 * task->thread.sve_state.
 441 *
 442 * Task can be a non-runnable task, or current.  In the latter case,
 443 * the caller must have ownership of the cpu FPSIMD context before calling
 444 * this function.
 445 * task->thread.sve_state must point to at least sve_state_size(task)
 446 * bytes of allocated kernel memory.
 447 * task->thread.uw.fpsimd_state must be up to date before calling this
 448 * function.
 449 */
 450static void fpsimd_to_sve(struct task_struct *task)
 451{
 452        unsigned int vq;
 453        void *sst = task->thread.sve_state;
 454        struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
 455
 456        if (!system_supports_sve())
 457                return;
 458
 459        vq = sve_vq_from_vl(task->thread.sve_vl);
 460        __fpsimd_to_sve(sst, fst, vq);
 461}
 462
 463/*
 464 * Transfer the SVE state in task->thread.sve_state to
 465 * task->thread.uw.fpsimd_state.
 466 *
 467 * Task can be a non-runnable task, or current.  In the latter case,
 468 * the caller must have ownership of the cpu FPSIMD context before calling
 469 * this function.
 470 * task->thread.sve_state must point to at least sve_state_size(task)
 471 * bytes of allocated kernel memory.
 472 * task->thread.sve_state must be up to date before calling this function.
 473 */
 474static void sve_to_fpsimd(struct task_struct *task)
 475{
 476        unsigned int vq;
 477        void const *sst = task->thread.sve_state;
 478        struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
 479        unsigned int i;
 480        __uint128_t const *p;
 481
 482        if (!system_supports_sve())
 483                return;
 484
 485        vq = sve_vq_from_vl(task->thread.sve_vl);
 486        for (i = 0; i < SVE_NUM_ZREGS; ++i) {
 487                p = (__uint128_t const *)ZREG(sst, vq, i);
 488                fst->vregs[i] = arm64_le128_to_cpu(*p);
 489        }
 490}
 491
 492#ifdef CONFIG_ARM64_SVE
 493
 494/*
 495 * Return how many bytes of memory are required to store the full SVE
 496 * state for task, given task's currently configured vector length.
 497 */
 498size_t sve_state_size(struct task_struct const *task)
 499{
 500        return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
 501}
 502
 503/*
 504 * Ensure that task->thread.sve_state is allocated and sufficiently large.
 505 *
 506 * This function should be used only in preparation for replacing
 507 * task->thread.sve_state with new data.  The memory is always zeroed
 508 * here to prevent stale data from showing through: this is done in
 509 * the interest of testability and predictability: except in the
 510 * do_sve_acc() case, there is no ABI requirement to hide stale data
 511 * written previously be task.
 512 */
 513void sve_alloc(struct task_struct *task)
 514{
 515        if (task->thread.sve_state) {
 516                memset(task->thread.sve_state, 0, sve_state_size(task));
 517                return;
 518        }
 519
 520        /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
 521        task->thread.sve_state =
 522                kzalloc(sve_state_size(task), GFP_KERNEL);
 523}
 524
 525
 526/*
 527 * Ensure that task->thread.sve_state is up to date with respect to
 528 * the user task, irrespective of when SVE is in use or not.
 529 *
 530 * This should only be called by ptrace.  task must be non-runnable.
 531 * task->thread.sve_state must point to at least sve_state_size(task)
 532 * bytes of allocated kernel memory.
 533 */
 534void fpsimd_sync_to_sve(struct task_struct *task)
 535{
 536        if (!test_tsk_thread_flag(task, TIF_SVE))
 537                fpsimd_to_sve(task);
 538}
 539
 540/*
 541 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
 542 * the user task, irrespective of whether SVE is in use or not.
 543 *
 544 * This should only be called by ptrace.  task must be non-runnable.
 545 * task->thread.sve_state must point to at least sve_state_size(task)
 546 * bytes of allocated kernel memory.
 547 */
 548void sve_sync_to_fpsimd(struct task_struct *task)
 549{
 550        if (test_tsk_thread_flag(task, TIF_SVE))
 551                sve_to_fpsimd(task);
 552}
 553
 554/*
 555 * Ensure that task->thread.sve_state is up to date with respect to
 556 * the task->thread.uw.fpsimd_state.
 557 *
 558 * This should only be called by ptrace to merge new FPSIMD register
 559 * values into a task for which SVE is currently active.
 560 * task must be non-runnable.
 561 * task->thread.sve_state must point to at least sve_state_size(task)
 562 * bytes of allocated kernel memory.
 563 * task->thread.uw.fpsimd_state must already have been initialised with
 564 * the new FPSIMD register values to be merged in.
 565 */
 566void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
 567{
 568        unsigned int vq;
 569        void *sst = task->thread.sve_state;
 570        struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
 571
 572        if (!test_tsk_thread_flag(task, TIF_SVE))
 573                return;
 574
 575        vq = sve_vq_from_vl(task->thread.sve_vl);
 576
 577        memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
 578        __fpsimd_to_sve(sst, fst, vq);
 579}
 580
 581int sve_set_vector_length(struct task_struct *task,
 582                          unsigned long vl, unsigned long flags)
 583{
 584        if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
 585                                     PR_SVE_SET_VL_ONEXEC))
 586                return -EINVAL;
 587
 588        if (!sve_vl_valid(vl))
 589                return -EINVAL;
 590
 591        /*
 592         * Clamp to the maximum vector length that VL-agnostic SVE code can
 593         * work with.  A flag may be assigned in the future to allow setting
 594         * of larger vector lengths without confusing older software.
 595         */
 596        if (vl > SVE_VL_ARCH_MAX)
 597                vl = SVE_VL_ARCH_MAX;
 598
 599        vl = find_supported_vector_length(vl);
 600
 601        if (flags & (PR_SVE_VL_INHERIT |
 602                     PR_SVE_SET_VL_ONEXEC))
 603                task->thread.sve_vl_onexec = vl;
 604        else
 605                /* Reset VL to system default on next exec: */
 606                task->thread.sve_vl_onexec = 0;
 607
 608        /* Only actually set the VL if not deferred: */
 609        if (flags & PR_SVE_SET_VL_ONEXEC)
 610                goto out;
 611
 612        if (vl == task->thread.sve_vl)
 613                goto out;
 614
 615        /*
 616         * To ensure the FPSIMD bits of the SVE vector registers are preserved,
 617         * write any live register state back to task_struct, and convert to a
 618         * non-SVE thread.
 619         */
 620        if (task == current) {
 621                get_cpu_fpsimd_context();
 622
 623                fpsimd_save();
 624        }
 625
 626        fpsimd_flush_task_state(task);
 627        if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
 628                sve_to_fpsimd(task);
 629
 630        if (task == current)
 631                put_cpu_fpsimd_context();
 632
 633        /*
 634         * Force reallocation of task SVE state to the correct size
 635         * on next use:
 636         */
 637        sve_free(task);
 638
 639        task->thread.sve_vl = vl;
 640
 641out:
 642        update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
 643                               flags & PR_SVE_VL_INHERIT);
 644
 645        return 0;
 646}
 647
 648/*
 649 * Encode the current vector length and flags for return.
 650 * This is only required for prctl(): ptrace has separate fields
 651 *
 652 * flags are as for sve_set_vector_length().
 653 */
 654static int sve_prctl_status(unsigned long flags)
 655{
 656        int ret;
 657
 658        if (flags & PR_SVE_SET_VL_ONEXEC)
 659                ret = current->thread.sve_vl_onexec;
 660        else
 661                ret = current->thread.sve_vl;
 662
 663        if (test_thread_flag(TIF_SVE_VL_INHERIT))
 664                ret |= PR_SVE_VL_INHERIT;
 665
 666        return ret;
 667}
 668
 669/* PR_SVE_SET_VL */
 670int sve_set_current_vl(unsigned long arg)
 671{
 672        unsigned long vl, flags;
 673        int ret;
 674
 675        vl = arg & PR_SVE_VL_LEN_MASK;
 676        flags = arg & ~vl;
 677
 678        if (!system_supports_sve() || is_compat_task())
 679                return -EINVAL;
 680
 681        ret = sve_set_vector_length(current, vl, flags);
 682        if (ret)
 683                return ret;
 684
 685        return sve_prctl_status(flags);
 686}
 687
 688/* PR_SVE_GET_VL */
 689int sve_get_current_vl(void)
 690{
 691        if (!system_supports_sve() || is_compat_task())
 692                return -EINVAL;
 693
 694        return sve_prctl_status(0);
 695}
 696
 697static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
 698{
 699        unsigned int vq, vl;
 700        unsigned long zcr;
 701
 702        bitmap_zero(map, SVE_VQ_MAX);
 703
 704        zcr = ZCR_ELx_LEN_MASK;
 705        zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
 706
 707        for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
 708                write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
 709                vl = sve_get_vl();
 710                vq = sve_vq_from_vl(vl); /* skip intervening lengths */
 711                set_bit(__vq_to_bit(vq), map);
 712        }
 713}
 714
 715/*
 716 * Initialise the set of known supported VQs for the boot CPU.
 717 * This is called during kernel boot, before secondary CPUs are brought up.
 718 */
 719void __init sve_init_vq_map(void)
 720{
 721        sve_probe_vqs(sve_vq_map);
 722        bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
 723}
 724
 725/*
 726 * If we haven't committed to the set of supported VQs yet, filter out
 727 * those not supported by the current CPU.
 728 * This function is called during the bring-up of early secondary CPUs only.
 729 */
 730void sve_update_vq_map(void)
 731{
 732        DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
 733
 734        sve_probe_vqs(tmp_map);
 735        bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
 736        bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
 737}
 738
 739/*
 740 * Check whether the current CPU supports all VQs in the committed set.
 741 * This function is called during the bring-up of late secondary CPUs only.
 742 */
 743int sve_verify_vq_map(void)
 744{
 745        DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
 746        unsigned long b;
 747
 748        sve_probe_vqs(tmp_map);
 749
 750        bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
 751        if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
 752                pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
 753                        smp_processor_id());
 754                return -EINVAL;
 755        }
 756
 757        if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
 758                return 0;
 759
 760        /*
 761         * For KVM, it is necessary to ensure that this CPU doesn't
 762         * support any vector length that guests may have probed as
 763         * unsupported.
 764         */
 765
 766        /* Recover the set of supported VQs: */
 767        bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
 768        /* Find VQs supported that are not globally supported: */
 769        bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
 770
 771        /* Find the lowest such VQ, if any: */
 772        b = find_last_bit(tmp_map, SVE_VQ_MAX);
 773        if (b >= SVE_VQ_MAX)
 774                return 0; /* no mismatches */
 775
 776        /*
 777         * Mismatches above sve_max_virtualisable_vl are fine, since
 778         * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
 779         */
 780        if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
 781                pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
 782                        smp_processor_id());
 783                return -EINVAL;
 784        }
 785
 786        return 0;
 787}
 788
 789static void __init sve_efi_setup(void)
 790{
 791        if (!IS_ENABLED(CONFIG_EFI))
 792                return;
 793
 794        /*
 795         * alloc_percpu() warns and prints a backtrace if this goes wrong.
 796         * This is evidence of a crippled system and we are returning void,
 797         * so no attempt is made to handle this situation here.
 798         */
 799        if (!sve_vl_valid(sve_max_vl))
 800                goto fail;
 801
 802        efi_sve_state = __alloc_percpu(
 803                SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
 804        if (!efi_sve_state)
 805                goto fail;
 806
 807        return;
 808
 809fail:
 810        panic("Cannot allocate percpu memory for EFI SVE save/restore");
 811}
 812
 813/*
 814 * Enable SVE for EL1.
 815 * Intended for use by the cpufeatures code during CPU boot.
 816 */
 817void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
 818{
 819        write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
 820        isb();
 821}
 822
 823/*
 824 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
 825 * vector length.
 826 *
 827 * Use only if SVE is present.
 828 * This function clobbers the SVE vector length.
 829 */
 830u64 read_zcr_features(void)
 831{
 832        u64 zcr;
 833        unsigned int vq_max;
 834
 835        /*
 836         * Set the maximum possible VL, and write zeroes to all other
 837         * bits to see if they stick.
 838         */
 839        sve_kernel_enable(NULL);
 840        write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
 841
 842        zcr = read_sysreg_s(SYS_ZCR_EL1);
 843        zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
 844        vq_max = sve_vq_from_vl(sve_get_vl());
 845        zcr |= vq_max - 1; /* set LEN field to maximum effective value */
 846
 847        return zcr;
 848}
 849
 850void __init sve_setup(void)
 851{
 852        u64 zcr;
 853        DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
 854        unsigned long b;
 855
 856        if (!system_supports_sve())
 857                return;
 858
 859        /*
 860         * The SVE architecture mandates support for 128-bit vectors,
 861         * so sve_vq_map must have at least SVE_VQ_MIN set.
 862         * If something went wrong, at least try to patch it up:
 863         */
 864        if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
 865                set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
 866
 867        zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
 868        sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
 869
 870        /*
 871         * Sanity-check that the max VL we determined through CPU features
 872         * corresponds properly to sve_vq_map.  If not, do our best:
 873         */
 874        if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
 875                sve_max_vl = find_supported_vector_length(sve_max_vl);
 876
 877        /*
 878         * For the default VL, pick the maximum supported value <= 64.
 879         * VL == 64 is guaranteed not to grow the signal frame.
 880         */
 881        set_sve_default_vl(find_supported_vector_length(64));
 882
 883        bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
 884                      SVE_VQ_MAX);
 885
 886        b = find_last_bit(tmp_map, SVE_VQ_MAX);
 887        if (b >= SVE_VQ_MAX)
 888                /* No non-virtualisable VLs found */
 889                sve_max_virtualisable_vl = SVE_VQ_MAX;
 890        else if (WARN_ON(b == SVE_VQ_MAX - 1))
 891                /* No virtualisable VLs?  This is architecturally forbidden. */
 892                sve_max_virtualisable_vl = SVE_VQ_MIN;
 893        else /* b + 1 < SVE_VQ_MAX */
 894                sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
 895
 896        if (sve_max_virtualisable_vl > sve_max_vl)
 897                sve_max_virtualisable_vl = sve_max_vl;
 898
 899        pr_info("SVE: maximum available vector length %u bytes per vector\n",
 900                sve_max_vl);
 901        pr_info("SVE: default vector length %u bytes per vector\n",
 902                get_sve_default_vl());
 903
 904        /* KVM decides whether to support mismatched systems. Just warn here: */
 905        if (sve_max_virtualisable_vl < sve_max_vl)
 906                pr_warn("SVE: unvirtualisable vector lengths present\n");
 907
 908        sve_efi_setup();
 909}
 910
 911/*
 912 * Called from the put_task_struct() path, which cannot get here
 913 * unless dead_task is really dead and not schedulable.
 914 */
 915void fpsimd_release_task(struct task_struct *dead_task)
 916{
 917        __sve_free(dead_task);
 918}
 919
 920#endif /* CONFIG_ARM64_SVE */
 921
 922/*
 923 * Trapped SVE access
 924 *
 925 * Storage is allocated for the full SVE state, the current FPSIMD
 926 * register contents are migrated across, and the access trap is
 927 * disabled.
 928 *
 929 * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
 930 * would have disabled the SVE access trap for userspace during
 931 * ret_to_user, making an SVE access trap impossible in that case.
 932 */
 933void do_sve_acc(unsigned int esr, struct pt_regs *regs)
 934{
 935        /* Even if we chose not to use SVE, the hardware could still trap: */
 936        if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
 937                force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
 938                return;
 939        }
 940
 941        sve_alloc(current);
 942        if (!current->thread.sve_state) {
 943                force_sig(SIGKILL);
 944                return;
 945        }
 946
 947        get_cpu_fpsimd_context();
 948
 949        if (test_and_set_thread_flag(TIF_SVE))
 950                WARN_ON(1); /* SVE access shouldn't have trapped */
 951
 952        /*
 953         * Convert the FPSIMD state to SVE, zeroing all the state that
 954         * is not shared with FPSIMD. If (as is likely) the current
 955         * state is live in the registers then do this there and
 956         * update our metadata for the current task including
 957         * disabling the trap, otherwise update our in-memory copy.
 958         */
 959        if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
 960                unsigned long vq_minus_one =
 961                        sve_vq_from_vl(current->thread.sve_vl) - 1;
 962                sve_set_vq(vq_minus_one);
 963                sve_flush_live(vq_minus_one);
 964                fpsimd_bind_task_to_cpu();
 965        } else {
 966                fpsimd_to_sve(current);
 967        }
 968
 969        put_cpu_fpsimd_context();
 970}
 971
 972/*
 973 * Trapped FP/ASIMD access.
 974 */
 975void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
 976{
 977        /* TODO: implement lazy context saving/restoring */
 978        WARN_ON(1);
 979}
 980
 981/*
 982 * Raise a SIGFPE for the current process.
 983 */
 984void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
 985{
 986        unsigned int si_code = FPE_FLTUNK;
 987
 988        if (esr & ESR_ELx_FP_EXC_TFV) {
 989                if (esr & FPEXC_IOF)
 990                        si_code = FPE_FLTINV;
 991                else if (esr & FPEXC_DZF)
 992                        si_code = FPE_FLTDIV;
 993                else if (esr & FPEXC_OFF)
 994                        si_code = FPE_FLTOVF;
 995                else if (esr & FPEXC_UFF)
 996                        si_code = FPE_FLTUND;
 997                else if (esr & FPEXC_IXF)
 998                        si_code = FPE_FLTRES;
 999        }
1000
1001        send_sig_fault(SIGFPE, si_code,
1002                       (void __user *)instruction_pointer(regs),
1003                       current);
1004}
1005
1006void fpsimd_thread_switch(struct task_struct *next)
1007{
1008        bool wrong_task, wrong_cpu;
1009
1010        if (!system_supports_fpsimd())
1011                return;
1012
1013        __get_cpu_fpsimd_context();
1014
1015        /* Save unsaved fpsimd state, if any: */
1016        fpsimd_save();
1017
1018        /*
1019         * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1020         * state.  For kernel threads, FPSIMD registers are never loaded
1021         * and wrong_task and wrong_cpu will always be true.
1022         */
1023        wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
1024                                        &next->thread.uw.fpsimd_state;
1025        wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
1026
1027        update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
1028                               wrong_task || wrong_cpu);
1029
1030        __put_cpu_fpsimd_context();
1031}
1032
1033void fpsimd_flush_thread(void)
1034{
1035        int vl, supported_vl;
1036
1037        if (!system_supports_fpsimd())
1038                return;
1039
1040        get_cpu_fpsimd_context();
1041
1042        fpsimd_flush_task_state(current);
1043        memset(&current->thread.uw.fpsimd_state, 0,
1044               sizeof(current->thread.uw.fpsimd_state));
1045
1046        if (system_supports_sve()) {
1047                clear_thread_flag(TIF_SVE);
1048                sve_free(current);
1049
1050                /*
1051                 * Reset the task vector length as required.
1052                 * This is where we ensure that all user tasks have a valid
1053                 * vector length configured: no kernel task can become a user
1054                 * task without an exec and hence a call to this function.
1055                 * By the time the first call to this function is made, all
1056                 * early hardware probing is complete, so __sve_default_vl
1057                 * should be valid.
1058                 * If a bug causes this to go wrong, we make some noise and
1059                 * try to fudge thread.sve_vl to a safe value here.
1060                 */
1061                vl = current->thread.sve_vl_onexec ?
1062                        current->thread.sve_vl_onexec : get_sve_default_vl();
1063
1064                if (WARN_ON(!sve_vl_valid(vl)))
1065                        vl = SVE_VL_MIN;
1066
1067                supported_vl = find_supported_vector_length(vl);
1068                if (WARN_ON(supported_vl != vl))
1069                        vl = supported_vl;
1070
1071                current->thread.sve_vl = vl;
1072
1073                /*
1074                 * If the task is not set to inherit, ensure that the vector
1075                 * length will be reset by a subsequent exec:
1076                 */
1077                if (!test_thread_flag(TIF_SVE_VL_INHERIT))
1078                        current->thread.sve_vl_onexec = 0;
1079        }
1080
1081        put_cpu_fpsimd_context();
1082}
1083
1084/*
1085 * Save the userland FPSIMD state of 'current' to memory, but only if the state
1086 * currently held in the registers does in fact belong to 'current'
1087 */
1088void fpsimd_preserve_current_state(void)
1089{
1090        if (!system_supports_fpsimd())
1091                return;
1092
1093        get_cpu_fpsimd_context();
1094        fpsimd_save();
1095        put_cpu_fpsimd_context();
1096}
1097
1098/*
1099 * Like fpsimd_preserve_current_state(), but ensure that
1100 * current->thread.uw.fpsimd_state is updated so that it can be copied to
1101 * the signal frame.
1102 */
1103void fpsimd_signal_preserve_current_state(void)
1104{
1105        fpsimd_preserve_current_state();
1106        if (test_thread_flag(TIF_SVE))
1107                sve_to_fpsimd(current);
1108}
1109
1110/*
1111 * Associate current's FPSIMD context with this cpu
1112 * The caller must have ownership of the cpu FPSIMD context before calling
1113 * this function.
1114 */
1115static void fpsimd_bind_task_to_cpu(void)
1116{
1117        struct fpsimd_last_state_struct *last =
1118                this_cpu_ptr(&fpsimd_last_state);
1119
1120        WARN_ON(!system_supports_fpsimd());
1121        last->st = &current->thread.uw.fpsimd_state;
1122        last->sve_state = current->thread.sve_state;
1123        last->sve_vl = current->thread.sve_vl;
1124        current->thread.fpsimd_cpu = smp_processor_id();
1125
1126        if (system_supports_sve()) {
1127                /* Toggle SVE trapping for userspace if needed */
1128                if (test_thread_flag(TIF_SVE))
1129                        sve_user_enable();
1130                else
1131                        sve_user_disable();
1132
1133                /* Serialised by exception return to user */
1134        }
1135}
1136
1137void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
1138                              unsigned int sve_vl)
1139{
1140        struct fpsimd_last_state_struct *last =
1141                this_cpu_ptr(&fpsimd_last_state);
1142
1143        WARN_ON(!system_supports_fpsimd());
1144        WARN_ON(!in_softirq() && !irqs_disabled());
1145
1146        last->st = st;
1147        last->sve_state = sve_state;
1148        last->sve_vl = sve_vl;
1149}
1150
1151/*
1152 * Load the userland FPSIMD state of 'current' from memory, but only if the
1153 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1154 * state of 'current'
1155 */
1156void fpsimd_restore_current_state(void)
1157{
1158        /*
1159         * For the tasks that were created before we detected the absence of
1160         * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
1161         * e.g, init. This could be then inherited by the children processes.
1162         * If we later detect that the system doesn't support FP/SIMD,
1163         * we must clear the flag for  all the tasks to indicate that the
1164         * FPSTATE is clean (as we can't have one) to avoid looping for ever in
1165         * do_notify_resume().
1166         */
1167        if (!system_supports_fpsimd()) {
1168                clear_thread_flag(TIF_FOREIGN_FPSTATE);
1169                return;
1170        }
1171
1172        get_cpu_fpsimd_context();
1173
1174        if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1175                task_fpsimd_load();
1176                fpsimd_bind_task_to_cpu();
1177        }
1178
1179        put_cpu_fpsimd_context();
1180}
1181
1182/*
1183 * Load an updated userland FPSIMD state for 'current' from memory and set the
1184 * flag that indicates that the FPSIMD register contents are the most recent
1185 * FPSIMD state of 'current'
1186 */
1187void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1188{
1189        if (WARN_ON(!system_supports_fpsimd()))
1190                return;
1191
1192        get_cpu_fpsimd_context();
1193
1194        current->thread.uw.fpsimd_state = *state;
1195        if (test_thread_flag(TIF_SVE))
1196                fpsimd_to_sve(current);
1197
1198        task_fpsimd_load();
1199        fpsimd_bind_task_to_cpu();
1200
1201        clear_thread_flag(TIF_FOREIGN_FPSTATE);
1202
1203        put_cpu_fpsimd_context();
1204}
1205
1206/*
1207 * Invalidate live CPU copies of task t's FPSIMD state
1208 *
1209 * This function may be called with preemption enabled.  The barrier()
1210 * ensures that the assignment to fpsimd_cpu is visible to any
1211 * preemption/softirq that could race with set_tsk_thread_flag(), so
1212 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1213 *
1214 * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1215 * subsequent code.
1216 */
1217void fpsimd_flush_task_state(struct task_struct *t)
1218{
1219        t->thread.fpsimd_cpu = NR_CPUS;
1220        /*
1221         * If we don't support fpsimd, bail out after we have
1222         * reset the fpsimd_cpu for this task and clear the
1223         * FPSTATE.
1224         */
1225        if (!system_supports_fpsimd())
1226                return;
1227        barrier();
1228        set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1229
1230        barrier();
1231}
1232
1233/*
1234 * Invalidate any task's FPSIMD state that is present on this cpu.
1235 * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1236 * before calling this function.
1237 */
1238static void fpsimd_flush_cpu_state(void)
1239{
1240        WARN_ON(!system_supports_fpsimd());
1241        __this_cpu_write(fpsimd_last_state.st, NULL);
1242        set_thread_flag(TIF_FOREIGN_FPSTATE);
1243}
1244
1245/*
1246 * Save the FPSIMD state to memory and invalidate cpu view.
1247 * This function must be called with preemption disabled.
1248 */
1249void fpsimd_save_and_flush_cpu_state(void)
1250{
1251        if (!system_supports_fpsimd())
1252                return;
1253        WARN_ON(preemptible());
1254        __get_cpu_fpsimd_context();
1255        fpsimd_save();
1256        fpsimd_flush_cpu_state();
1257        __put_cpu_fpsimd_context();
1258}
1259
1260#ifdef CONFIG_KERNEL_MODE_NEON
1261
1262/*
1263 * Kernel-side NEON support functions
1264 */
1265
1266/*
1267 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1268 * context
1269 *
1270 * Must not be called unless may_use_simd() returns true.
1271 * Task context in the FPSIMD registers is saved back to memory as necessary.
1272 *
1273 * A matching call to kernel_neon_end() must be made before returning from the
1274 * calling context.
1275 *
1276 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1277 * called.
1278 */
1279void kernel_neon_begin(void)
1280{
1281        if (WARN_ON(!system_supports_fpsimd()))
1282                return;
1283
1284        BUG_ON(!may_use_simd());
1285
1286        get_cpu_fpsimd_context();
1287
1288        /* Save unsaved fpsimd state, if any: */
1289        fpsimd_save();
1290
1291        /* Invalidate any task state remaining in the fpsimd regs: */
1292        fpsimd_flush_cpu_state();
1293}
1294EXPORT_SYMBOL(kernel_neon_begin);
1295
1296/*
1297 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1298 *
1299 * Must be called from a context in which kernel_neon_begin() was previously
1300 * called, with no call to kernel_neon_end() in the meantime.
1301 *
1302 * The caller must not use the FPSIMD registers after this function is called,
1303 * unless kernel_neon_begin() is called again in the meantime.
1304 */
1305void kernel_neon_end(void)
1306{
1307        if (!system_supports_fpsimd())
1308                return;
1309
1310        put_cpu_fpsimd_context();
1311}
1312EXPORT_SYMBOL(kernel_neon_end);
1313
1314#ifdef CONFIG_EFI
1315
1316static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1317static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1318static DEFINE_PER_CPU(bool, efi_sve_state_used);
1319
1320/*
1321 * EFI runtime services support functions
1322 *
1323 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1324 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1325 * is always used rather than being an optional accelerator.
1326 *
1327 * These functions provide the necessary support for ensuring FPSIMD
1328 * save/restore in the contexts from which EFI is used.
1329 *
1330 * Do not use them for any other purpose -- if tempted to do so, you are
1331 * either doing something wrong or you need to propose some refactoring.
1332 */
1333
1334/*
1335 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1336 */
1337void __efi_fpsimd_begin(void)
1338{
1339        if (!system_supports_fpsimd())
1340                return;
1341
1342        WARN_ON(preemptible());
1343
1344        if (may_use_simd()) {
1345                kernel_neon_begin();
1346        } else {
1347                /*
1348                 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1349                 * preserving:
1350                 */
1351                if (system_supports_sve() && likely(efi_sve_state)) {
1352                        char *sve_state = this_cpu_ptr(efi_sve_state);
1353
1354                        __this_cpu_write(efi_sve_state_used, true);
1355
1356                        sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
1357                                       &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
1358                } else {
1359                        fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1360                }
1361
1362                __this_cpu_write(efi_fpsimd_state_used, true);
1363        }
1364}
1365
1366/*
1367 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1368 */
1369void __efi_fpsimd_end(void)
1370{
1371        if (!system_supports_fpsimd())
1372                return;
1373
1374        if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
1375                kernel_neon_end();
1376        } else {
1377                if (system_supports_sve() &&
1378                    likely(__this_cpu_read(efi_sve_state_used))) {
1379                        char const *sve_state = this_cpu_ptr(efi_sve_state);
1380
1381                        sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
1382                                       &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1383                                       sve_vq_from_vl(sve_get_vl()) - 1);
1384
1385                        __this_cpu_write(efi_sve_state_used, false);
1386                } else {
1387                        fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1388                }
1389        }
1390}
1391
1392#endif /* CONFIG_EFI */
1393
1394#endif /* CONFIG_KERNEL_MODE_NEON */
1395
1396#ifdef CONFIG_CPU_PM
1397static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1398                                  unsigned long cmd, void *v)
1399{
1400        switch (cmd) {
1401        case CPU_PM_ENTER:
1402                fpsimd_save_and_flush_cpu_state();
1403                break;
1404        case CPU_PM_EXIT:
1405                break;
1406        case CPU_PM_ENTER_FAILED:
1407        default:
1408                return NOTIFY_DONE;
1409        }
1410        return NOTIFY_OK;
1411}
1412
1413static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1414        .notifier_call = fpsimd_cpu_pm_notifier,
1415};
1416
1417static void __init fpsimd_pm_init(void)
1418{
1419        cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1420}
1421
1422#else
1423static inline void fpsimd_pm_init(void) { }
1424#endif /* CONFIG_CPU_PM */
1425
1426#ifdef CONFIG_HOTPLUG_CPU
1427static int fpsimd_cpu_dead(unsigned int cpu)
1428{
1429        per_cpu(fpsimd_last_state.st, cpu) = NULL;
1430        return 0;
1431}
1432
1433static inline void fpsimd_hotplug_init(void)
1434{
1435        cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1436                                  NULL, fpsimd_cpu_dead);
1437}
1438
1439#else
1440static inline void fpsimd_hotplug_init(void) { }
1441#endif
1442
1443/*
1444 * FP/SIMD support code initialisation.
1445 */
1446static int __init fpsimd_init(void)
1447{
1448        if (cpu_have_named_feature(FP)) {
1449                fpsimd_pm_init();
1450                fpsimd_hotplug_init();
1451        } else {
1452                pr_notice("Floating-point is not implemented\n");
1453        }
1454
1455        if (!cpu_have_named_feature(ASIMD))
1456                pr_notice("Advanced SIMD is not implemented\n");
1457
1458        return sve_sysctl_init();
1459}
1460core_initcall(fpsimd_init);
1461