linux/arch/arm64/kernel/process.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Based on arch/arm/kernel/process.c
   4 *
   5 * Original Copyright (C) 1995  Linus Torvalds
   6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
   7 * Copyright (C) 2012 ARM Ltd.
   8 */
   9#include <linux/compat.h>
  10#include <linux/efi.h>
  11#include <linux/elf.h>
  12#include <linux/export.h>
  13#include <linux/sched.h>
  14#include <linux/sched/debug.h>
  15#include <linux/sched/task.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/kernel.h>
  18#include <linux/mman.h>
  19#include <linux/mm.h>
  20#include <linux/nospec.h>
  21#include <linux/stddef.h>
  22#include <linux/sysctl.h>
  23#include <linux/unistd.h>
  24#include <linux/user.h>
  25#include <linux/delay.h>
  26#include <linux/reboot.h>
  27#include <linux/interrupt.h>
  28#include <linux/init.h>
  29#include <linux/cpu.h>
  30#include <linux/elfcore.h>
  31#include <linux/pm.h>
  32#include <linux/tick.h>
  33#include <linux/utsname.h>
  34#include <linux/uaccess.h>
  35#include <linux/random.h>
  36#include <linux/hw_breakpoint.h>
  37#include <linux/personality.h>
  38#include <linux/notifier.h>
  39#include <trace/events/power.h>
  40#include <linux/percpu.h>
  41#include <linux/thread_info.h>
  42#include <linux/prctl.h>
  43
  44#include <asm/alternative.h>
  45#include <asm/compat.h>
  46#include <asm/cpufeature.h>
  47#include <asm/cacheflush.h>
  48#include <asm/exec.h>
  49#include <asm/fpsimd.h>
  50#include <asm/mmu_context.h>
  51#include <asm/mte.h>
  52#include <asm/processor.h>
  53#include <asm/pointer_auth.h>
  54#include <asm/stacktrace.h>
  55#include <asm/switch_to.h>
  56#include <asm/system_misc.h>
  57
  58#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
  59#include <linux/stackprotector.h>
  60unsigned long __stack_chk_guard __ro_after_init;
  61EXPORT_SYMBOL(__stack_chk_guard);
  62#endif
  63
  64/*
  65 * Function pointers to optional machine specific functions
  66 */
  67void (*pm_power_off)(void);
  68EXPORT_SYMBOL_GPL(pm_power_off);
  69
  70#ifdef CONFIG_HOTPLUG_CPU
  71void arch_cpu_idle_dead(void)
  72{
  73       cpu_die();
  74}
  75#endif
  76
  77/*
  78 * Called by kexec, immediately prior to machine_kexec().
  79 *
  80 * This must completely disable all secondary CPUs; simply causing those CPUs
  81 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
  82 * kexec'd kernel to use any and all RAM as it sees fit, without having to
  83 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
  84 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
  85 */
  86void machine_shutdown(void)
  87{
  88        smp_shutdown_nonboot_cpus(reboot_cpu);
  89}
  90
  91/*
  92 * Halting simply requires that the secondary CPUs stop performing any
  93 * activity (executing tasks, handling interrupts). smp_send_stop()
  94 * achieves this.
  95 */
  96void machine_halt(void)
  97{
  98        local_irq_disable();
  99        smp_send_stop();
 100        while (1);
 101}
 102
 103/*
 104 * Power-off simply requires that the secondary CPUs stop performing any
 105 * activity (executing tasks, handling interrupts). smp_send_stop()
 106 * achieves this. When the system power is turned off, it will take all CPUs
 107 * with it.
 108 */
 109void machine_power_off(void)
 110{
 111        local_irq_disable();
 112        smp_send_stop();
 113        if (pm_power_off)
 114                pm_power_off();
 115}
 116
 117/*
 118 * Restart requires that the secondary CPUs stop performing any activity
 119 * while the primary CPU resets the system. Systems with multiple CPUs must
 120 * provide a HW restart implementation, to ensure that all CPUs reset at once.
 121 * This is required so that any code running after reset on the primary CPU
 122 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
 123 * executing pre-reset code, and using RAM that the primary CPU's code wishes
 124 * to use. Implementing such co-ordination would be essentially impossible.
 125 */
 126void machine_restart(char *cmd)
 127{
 128        /* Disable interrupts first */
 129        local_irq_disable();
 130        smp_send_stop();
 131
 132        /*
 133         * UpdateCapsule() depends on the system being reset via
 134         * ResetSystem().
 135         */
 136        if (efi_enabled(EFI_RUNTIME_SERVICES))
 137                efi_reboot(reboot_mode, NULL);
 138
 139        /* Now call the architecture specific reboot code. */
 140        do_kernel_restart(cmd);
 141
 142        /*
 143         * Whoops - the architecture was unable to reboot.
 144         */
 145        printk("Reboot failed -- System halted\n");
 146        while (1);
 147}
 148
 149#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
 150static const char *const btypes[] = {
 151        bstr(NONE, "--"),
 152        bstr(  JC, "jc"),
 153        bstr(   C, "-c"),
 154        bstr(  J , "j-")
 155};
 156#undef bstr
 157
 158static void print_pstate(struct pt_regs *regs)
 159{
 160        u64 pstate = regs->pstate;
 161
 162        if (compat_user_mode(regs)) {
 163                printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n",
 164                        pstate,
 165                        pstate & PSR_AA32_N_BIT ? 'N' : 'n',
 166                        pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
 167                        pstate & PSR_AA32_C_BIT ? 'C' : 'c',
 168                        pstate & PSR_AA32_V_BIT ? 'V' : 'v',
 169                        pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
 170                        pstate & PSR_AA32_T_BIT ? "T32" : "A32",
 171                        pstate & PSR_AA32_E_BIT ? "BE" : "LE",
 172                        pstate & PSR_AA32_A_BIT ? 'A' : 'a',
 173                        pstate & PSR_AA32_I_BIT ? 'I' : 'i',
 174                        pstate & PSR_AA32_F_BIT ? 'F' : 'f',
 175                        pstate & PSR_AA32_DIT_BIT ? '+' : '-',
 176                        pstate & PSR_AA32_SSBS_BIT ? '+' : '-');
 177        } else {
 178                const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
 179                                               PSR_BTYPE_SHIFT];
 180
 181                printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n",
 182                        pstate,
 183                        pstate & PSR_N_BIT ? 'N' : 'n',
 184                        pstate & PSR_Z_BIT ? 'Z' : 'z',
 185                        pstate & PSR_C_BIT ? 'C' : 'c',
 186                        pstate & PSR_V_BIT ? 'V' : 'v',
 187                        pstate & PSR_D_BIT ? 'D' : 'd',
 188                        pstate & PSR_A_BIT ? 'A' : 'a',
 189                        pstate & PSR_I_BIT ? 'I' : 'i',
 190                        pstate & PSR_F_BIT ? 'F' : 'f',
 191                        pstate & PSR_PAN_BIT ? '+' : '-',
 192                        pstate & PSR_UAO_BIT ? '+' : '-',
 193                        pstate & PSR_TCO_BIT ? '+' : '-',
 194                        pstate & PSR_DIT_BIT ? '+' : '-',
 195                        pstate & PSR_SSBS_BIT ? '+' : '-',
 196                        btype_str);
 197        }
 198}
 199
 200void __show_regs(struct pt_regs *regs)
 201{
 202        int i, top_reg;
 203        u64 lr, sp;
 204
 205        if (compat_user_mode(regs)) {
 206                lr = regs->compat_lr;
 207                sp = regs->compat_sp;
 208                top_reg = 12;
 209        } else {
 210                lr = regs->regs[30];
 211                sp = regs->sp;
 212                top_reg = 29;
 213        }
 214
 215        show_regs_print_info(KERN_DEFAULT);
 216        print_pstate(regs);
 217
 218        if (!user_mode(regs)) {
 219                printk("pc : %pS\n", (void *)regs->pc);
 220                printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
 221        } else {
 222                printk("pc : %016llx\n", regs->pc);
 223                printk("lr : %016llx\n", lr);
 224        }
 225
 226        printk("sp : %016llx\n", sp);
 227
 228        if (system_uses_irq_prio_masking())
 229                printk("pmr_save: %08llx\n", regs->pmr_save);
 230
 231        i = top_reg;
 232
 233        while (i >= 0) {
 234                printk("x%-2d: %016llx", i, regs->regs[i]);
 235
 236                while (i-- % 3)
 237                        pr_cont(" x%-2d: %016llx", i, regs->regs[i]);
 238
 239                pr_cont("\n");
 240        }
 241}
 242
 243void show_regs(struct pt_regs *regs)
 244{
 245        __show_regs(regs);
 246        dump_backtrace(regs, NULL, KERN_DEFAULT);
 247}
 248
 249static void tls_thread_flush(void)
 250{
 251        write_sysreg(0, tpidr_el0);
 252
 253        if (is_compat_task()) {
 254                current->thread.uw.tp_value = 0;
 255
 256                /*
 257                 * We need to ensure ordering between the shadow state and the
 258                 * hardware state, so that we don't corrupt the hardware state
 259                 * with a stale shadow state during context switch.
 260                 */
 261                barrier();
 262                write_sysreg(0, tpidrro_el0);
 263        }
 264}
 265
 266static void flush_tagged_addr_state(void)
 267{
 268        if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
 269                clear_thread_flag(TIF_TAGGED_ADDR);
 270}
 271
 272void flush_thread(void)
 273{
 274        fpsimd_flush_thread();
 275        tls_thread_flush();
 276        flush_ptrace_hw_breakpoint(current);
 277        flush_tagged_addr_state();
 278}
 279
 280void release_thread(struct task_struct *dead_task)
 281{
 282}
 283
 284void arch_release_task_struct(struct task_struct *tsk)
 285{
 286        fpsimd_release_task(tsk);
 287}
 288
 289int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 290{
 291        if (current->mm)
 292                fpsimd_preserve_current_state();
 293        *dst = *src;
 294
 295        /* We rely on the above assignment to initialize dst's thread_flags: */
 296        BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
 297
 298        /*
 299         * Detach src's sve_state (if any) from dst so that it does not
 300         * get erroneously used or freed prematurely.  dst's sve_state
 301         * will be allocated on demand later on if dst uses SVE.
 302         * For consistency, also clear TIF_SVE here: this could be done
 303         * later in copy_process(), but to avoid tripping up future
 304         * maintainers it is best not to leave TIF_SVE and sve_state in
 305         * an inconsistent state, even temporarily.
 306         */
 307        dst->thread.sve_state = NULL;
 308        clear_tsk_thread_flag(dst, TIF_SVE);
 309
 310        /* clear any pending asynchronous tag fault raised by the parent */
 311        clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
 312
 313        return 0;
 314}
 315
 316asmlinkage void ret_from_fork(void) asm("ret_from_fork");
 317
 318int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 319                unsigned long stk_sz, struct task_struct *p, unsigned long tls)
 320{
 321        struct pt_regs *childregs = task_pt_regs(p);
 322
 323        memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 324
 325        /*
 326         * In case p was allocated the same task_struct pointer as some
 327         * other recently-exited task, make sure p is disassociated from
 328         * any cpu that may have run that now-exited task recently.
 329         * Otherwise we could erroneously skip reloading the FPSIMD
 330         * registers for p.
 331         */
 332        fpsimd_flush_task_state(p);
 333
 334        ptrauth_thread_init_kernel(p);
 335
 336        if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
 337                *childregs = *current_pt_regs();
 338                childregs->regs[0] = 0;
 339
 340                /*
 341                 * Read the current TLS pointer from tpidr_el0 as it may be
 342                 * out-of-sync with the saved value.
 343                 */
 344                *task_user_tls(p) = read_sysreg(tpidr_el0);
 345
 346                if (stack_start) {
 347                        if (is_compat_thread(task_thread_info(p)))
 348                                childregs->compat_sp = stack_start;
 349                        else
 350                                childregs->sp = stack_start;
 351                }
 352
 353                /*
 354                 * If a TLS pointer was passed to clone, use it for the new
 355                 * thread.
 356                 */
 357                if (clone_flags & CLONE_SETTLS)
 358                        p->thread.uw.tp_value = tls;
 359        } else {
 360                /*
 361                 * A kthread has no context to ERET to, so ensure any buggy
 362                 * ERET is treated as an illegal exception return.
 363                 *
 364                 * When a user task is created from a kthread, childregs will
 365                 * be initialized by start_thread() or start_compat_thread().
 366                 */
 367                memset(childregs, 0, sizeof(struct pt_regs));
 368                childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
 369
 370                p->thread.cpu_context.x19 = stack_start;
 371                p->thread.cpu_context.x20 = stk_sz;
 372        }
 373        p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
 374        p->thread.cpu_context.sp = (unsigned long)childregs;
 375        /*
 376         * For the benefit of the unwinder, set up childregs->stackframe
 377         * as the final frame for the new task.
 378         */
 379        p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
 380
 381        ptrace_hw_copy_thread(p);
 382
 383        return 0;
 384}
 385
 386void tls_preserve_current_state(void)
 387{
 388        *task_user_tls(current) = read_sysreg(tpidr_el0);
 389}
 390
 391static void tls_thread_switch(struct task_struct *next)
 392{
 393        tls_preserve_current_state();
 394
 395        if (is_compat_thread(task_thread_info(next)))
 396                write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
 397        else if (!arm64_kernel_unmapped_at_el0())
 398                write_sysreg(0, tpidrro_el0);
 399
 400        write_sysreg(*task_user_tls(next), tpidr_el0);
 401}
 402
 403/*
 404 * Force SSBS state on context-switch, since it may be lost after migrating
 405 * from a CPU which treats the bit as RES0 in a heterogeneous system.
 406 */
 407static void ssbs_thread_switch(struct task_struct *next)
 408{
 409        /*
 410         * Nothing to do for kernel threads, but 'regs' may be junk
 411         * (e.g. idle task) so check the flags and bail early.
 412         */
 413        if (unlikely(next->flags & PF_KTHREAD))
 414                return;
 415
 416        /*
 417         * If all CPUs implement the SSBS extension, then we just need to
 418         * context-switch the PSTATE field.
 419         */
 420        if (cpus_have_const_cap(ARM64_SSBS))
 421                return;
 422
 423        spectre_v4_enable_task_mitigation(next);
 424}
 425
 426/*
 427 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
 428 * shadow copy so that we can restore this upon entry from userspace.
 429 *
 430 * This is *only* for exception entry from EL0, and is not valid until we
 431 * __switch_to() a user task.
 432 */
 433DEFINE_PER_CPU(struct task_struct *, __entry_task);
 434
 435static void entry_task_switch(struct task_struct *next)
 436{
 437        __this_cpu_write(__entry_task, next);
 438}
 439
 440/*
 441 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
 442 * Assuming the virtual counter is enabled at the beginning of times:
 443 *
 444 * - disable access when switching from a 64bit task to a 32bit task
 445 * - enable access when switching from a 32bit task to a 64bit task
 446 */
 447static void erratum_1418040_thread_switch(struct task_struct *prev,
 448                                          struct task_struct *next)
 449{
 450        bool prev32, next32;
 451        u64 val;
 452
 453        if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
 454                return;
 455
 456        prev32 = is_compat_thread(task_thread_info(prev));
 457        next32 = is_compat_thread(task_thread_info(next));
 458
 459        if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
 460                return;
 461
 462        val = read_sysreg(cntkctl_el1);
 463
 464        if (!next32)
 465                val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
 466        else
 467                val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
 468
 469        write_sysreg(val, cntkctl_el1);
 470}
 471
 472/*
 473 * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
 474 * this function must be called with preemption disabled and the update to
 475 * sctlr_user must be made in the same preemption disabled block so that
 476 * __switch_to() does not see the variable update before the SCTLR_EL1 one.
 477 */
 478void update_sctlr_el1(u64 sctlr)
 479{
 480        /*
 481         * EnIA must not be cleared while in the kernel as this is necessary for
 482         * in-kernel PAC. It will be cleared on kernel exit if needed.
 483         */
 484        sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
 485
 486        /* ISB required for the kernel uaccess routines when setting TCF0. */
 487        isb();
 488}
 489
 490/*
 491 * Thread switching.
 492 */
 493__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
 494                                struct task_struct *next)
 495{
 496        struct task_struct *last;
 497
 498        fpsimd_thread_switch(next);
 499        tls_thread_switch(next);
 500        hw_breakpoint_thread_switch(next);
 501        contextidr_thread_switch(next);
 502        entry_task_switch(next);
 503        ssbs_thread_switch(next);
 504        erratum_1418040_thread_switch(prev, next);
 505        ptrauth_thread_switch_user(next);
 506
 507        /*
 508         * Complete any pending TLB or cache maintenance on this CPU in case
 509         * the thread migrates to a different CPU.
 510         * This full barrier is also required by the membarrier system
 511         * call.
 512         */
 513        dsb(ish);
 514
 515        /*
 516         * MTE thread switching must happen after the DSB above to ensure that
 517         * any asynchronous tag check faults have been logged in the TFSR*_EL1
 518         * registers.
 519         */
 520        mte_thread_switch(next);
 521        /* avoid expensive SCTLR_EL1 accesses if no change */
 522        if (prev->thread.sctlr_user != next->thread.sctlr_user)
 523                update_sctlr_el1(next->thread.sctlr_user);
 524
 525        /* the actual thread switch */
 526        last = cpu_switch_to(prev, next);
 527
 528        return last;
 529}
 530
 531unsigned long get_wchan(struct task_struct *p)
 532{
 533        struct stackframe frame;
 534        unsigned long stack_page, ret = 0;
 535        int count = 0;
 536        if (!p || p == current || task_is_running(p))
 537                return 0;
 538
 539        stack_page = (unsigned long)try_get_task_stack(p);
 540        if (!stack_page)
 541                return 0;
 542
 543        start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
 544
 545        do {
 546                if (unwind_frame(p, &frame))
 547                        goto out;
 548                if (!in_sched_functions(frame.pc)) {
 549                        ret = frame.pc;
 550                        goto out;
 551                }
 552        } while (count++ < 16);
 553
 554out:
 555        put_task_stack(p);
 556        return ret;
 557}
 558
 559unsigned long arch_align_stack(unsigned long sp)
 560{
 561        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 562                sp -= get_random_int() & ~PAGE_MASK;
 563        return sp & ~0xf;
 564}
 565
 566#ifdef CONFIG_COMPAT
 567int compat_elf_check_arch(const struct elf32_hdr *hdr)
 568{
 569        if (!system_supports_32bit_el0())
 570                return false;
 571
 572        if ((hdr)->e_machine != EM_ARM)
 573                return false;
 574
 575        if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
 576                return false;
 577
 578        /*
 579         * Prevent execve() of a 32-bit program from a deadline task
 580         * if the restricted affinity mask would be inadmissible on an
 581         * asymmetric system.
 582         */
 583        return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
 584               !dl_task_check_affinity(current, system_32bit_el0_cpumask());
 585}
 586#endif
 587
 588/*
 589 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
 590 */
 591void arch_setup_new_exec(void)
 592{
 593        unsigned long mmflags = 0;
 594
 595        if (is_compat_task()) {
 596                mmflags = MMCF_AARCH32;
 597
 598                /*
 599                 * Restrict the CPU affinity mask for a 32-bit task so that
 600                 * it contains only 32-bit-capable CPUs.
 601                 *
 602                 * From the perspective of the task, this looks similar to
 603                 * what would happen if the 64-bit-only CPUs were hot-unplugged
 604                 * at the point of execve(), although we try a bit harder to
 605                 * honour the cpuset hierarchy.
 606                 */
 607                if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
 608                        force_compatible_cpus_allowed_ptr(current);
 609        } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
 610                relax_compatible_cpus_allowed_ptr(current);
 611        }
 612
 613        current->mm->context.flags = mmflags;
 614        ptrauth_thread_init_user();
 615        mte_thread_init_user();
 616
 617        if (task_spec_ssb_noexec(current)) {
 618                arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
 619                                         PR_SPEC_ENABLE);
 620        }
 621}
 622
 623#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
 624/*
 625 * Control the relaxed ABI allowing tagged user addresses into the kernel.
 626 */
 627static unsigned int tagged_addr_disabled;
 628
 629long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
 630{
 631        unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
 632        struct thread_info *ti = task_thread_info(task);
 633
 634        if (is_compat_thread(ti))
 635                return -EINVAL;
 636
 637        if (system_supports_mte())
 638                valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
 639
 640        if (arg & ~valid_mask)
 641                return -EINVAL;
 642
 643        /*
 644         * Do not allow the enabling of the tagged address ABI if globally
 645         * disabled via sysctl abi.tagged_addr_disabled.
 646         */
 647        if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
 648                return -EINVAL;
 649
 650        if (set_mte_ctrl(task, arg) != 0)
 651                return -EINVAL;
 652
 653        update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
 654
 655        return 0;
 656}
 657
 658long get_tagged_addr_ctrl(struct task_struct *task)
 659{
 660        long ret = 0;
 661        struct thread_info *ti = task_thread_info(task);
 662
 663        if (is_compat_thread(ti))
 664                return -EINVAL;
 665
 666        if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
 667                ret = PR_TAGGED_ADDR_ENABLE;
 668
 669        ret |= get_mte_ctrl(task);
 670
 671        return ret;
 672}
 673
 674/*
 675 * Global sysctl to disable the tagged user addresses support. This control
 676 * only prevents the tagged address ABI enabling via prctl() and does not
 677 * disable it for tasks that already opted in to the relaxed ABI.
 678 */
 679
 680static struct ctl_table tagged_addr_sysctl_table[] = {
 681        {
 682                .procname       = "tagged_addr_disabled",
 683                .mode           = 0644,
 684                .data           = &tagged_addr_disabled,
 685                .maxlen         = sizeof(int),
 686                .proc_handler   = proc_dointvec_minmax,
 687                .extra1         = SYSCTL_ZERO,
 688                .extra2         = SYSCTL_ONE,
 689        },
 690        { }
 691};
 692
 693static int __init tagged_addr_init(void)
 694{
 695        if (!register_sysctl("abi", tagged_addr_sysctl_table))
 696                return -EINVAL;
 697        return 0;
 698}
 699
 700core_initcall(tagged_addr_init);
 701#endif  /* CONFIG_ARM64_TAGGED_ADDR_ABI */
 702
 703#ifdef CONFIG_BINFMT_ELF
 704int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
 705                         bool has_interp, bool is_interp)
 706{
 707        /*
 708         * For dynamically linked executables the interpreter is
 709         * responsible for setting PROT_BTI on everything except
 710         * itself.
 711         */
 712        if (is_interp != has_interp)
 713                return prot;
 714
 715        if (!(state->flags & ARM64_ELF_BTI))
 716                return prot;
 717
 718        if (prot & PROT_EXEC)
 719                prot |= PROT_BTI;
 720
 721        return prot;
 722}
 723#endif
 724