linux/arch/powerpc/include/asm/book3s/64/kup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
   3#define _ASM_POWERPC_BOOK3S_64_KUP_H
   4
   5#include <linux/const.h>
   6#include <asm/reg.h>
   7
   8#define AMR_KUAP_BLOCK_READ     UL(0x5455555555555555)
   9#define AMR_KUAP_BLOCK_WRITE    UL(0xa8aaaaaaaaaaaaaa)
  10#define AMR_KUEP_BLOCKED        UL(0x5455555555555555)
  11#define AMR_KUAP_BLOCKED        (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
  12
  13#ifdef __ASSEMBLY__
  14
  15.macro kuap_user_restore gpr1, gpr2
  16#if defined(CONFIG_PPC_PKEY)
  17        BEGIN_MMU_FTR_SECTION_NESTED(67)
  18        b       100f  // skip_restore_amr
  19        END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
  20        /*
  21         * AMR and IAMR are going to be different when
  22         * returning to userspace.
  23         */
  24        ld      \gpr1, STACK_REGS_AMR(r1)
  25
  26        /*
  27         * If kuap feature is not enabled, do the mtspr
  28         * only if AMR value is different.
  29         */
  30        BEGIN_MMU_FTR_SECTION_NESTED(68)
  31        mfspr   \gpr2, SPRN_AMR
  32        cmpd    \gpr1, \gpr2
  33        beq     99f
  34        END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
  35
  36        isync
  37        mtspr   SPRN_AMR, \gpr1
  3899:
  39        /*
  40         * Restore IAMR only when returning to userspace
  41         */
  42        ld      \gpr1, STACK_REGS_IAMR(r1)
  43
  44        /*
  45         * If kuep feature is not enabled, do the mtspr
  46         * only if IAMR value is different.
  47         */
  48        BEGIN_MMU_FTR_SECTION_NESTED(69)
  49        mfspr   \gpr2, SPRN_IAMR
  50        cmpd    \gpr1, \gpr2
  51        beq     100f
  52        END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
  53
  54        isync
  55        mtspr   SPRN_IAMR, \gpr1
  56
  57100: //skip_restore_amr
  58        /* No isync required, see kuap_user_restore() */
  59#endif
  60.endm
  61
  62.macro kuap_kernel_restore gpr1, gpr2
  63#if defined(CONFIG_PPC_PKEY)
  64
  65        BEGIN_MMU_FTR_SECTION_NESTED(67)
  66        /*
  67         * AMR is going to be mostly the same since we are
  68         * returning to the kernel. Compare and do a mtspr.
  69         */
  70        ld      \gpr2, STACK_REGS_AMR(r1)
  71        mfspr   \gpr1, SPRN_AMR
  72        cmpd    \gpr1, \gpr2
  73        beq     100f
  74        isync
  75        mtspr   SPRN_AMR, \gpr2
  76        /*
  77         * No isync required, see kuap_restore_amr()
  78         * No need to restore IAMR when returning to kernel space.
  79         */
  80100:
  81        END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
  82#endif
  83.endm
  84
  85#ifdef CONFIG_PPC_KUAP
  86.macro kuap_check_amr gpr1, gpr2
  87#ifdef CONFIG_PPC_KUAP_DEBUG
  88        BEGIN_MMU_FTR_SECTION_NESTED(67)
  89        mfspr   \gpr1, SPRN_AMR
  90        /* Prevent access to userspace using any key values */
  91        LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
  92999:    tdne    \gpr1, \gpr2
  93        EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
  94        END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
  95#endif
  96.endm
  97#endif
  98
  99/*
 100 *      if (pkey) {
 101 *
 102 *              save AMR -> stack;
 103 *              if (kuap) {
 104 *                      if (AMR != BLOCKED)
 105 *                              KUAP_BLOCKED -> AMR;
 106 *              }
 107 *              if (from_user) {
 108 *                      save IAMR -> stack;
 109 *                      if (kuep) {
 110 *                              KUEP_BLOCKED ->IAMR
 111 *                      }
 112 *              }
 113 *              return;
 114 *      }
 115 *
 116 *      if (kuap) {
 117 *              if (from_kernel) {
 118 *                      save AMR -> stack;
 119 *                      if (AMR != BLOCKED)
 120 *                              KUAP_BLOCKED -> AMR;
 121 *              }
 122 *
 123 *      }
 124 */
 125.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
 126#if defined(CONFIG_PPC_PKEY)
 127
 128        /*
 129         * if both pkey and kuap is disabled, nothing to do
 130         */
 131        BEGIN_MMU_FTR_SECTION_NESTED(68)
 132        b       100f  // skip_save_amr
 133        END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
 134
 135        /*
 136         * if pkey is disabled and we are entering from userspace
 137         * don't do anything.
 138         */
 139        BEGIN_MMU_FTR_SECTION_NESTED(67)
 140        .ifnb \msr_pr_cr
 141        /*
 142         * Without pkey we are not changing AMR outside the kernel
 143         * hence skip this completely.
 144         */
 145        bne     \msr_pr_cr, 100f  // from userspace
 146        .endif
 147        END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
 148
 149        /*
 150         * pkey is enabled or pkey is disabled but entering from kernel
 151         */
 152        mfspr   \gpr1, SPRN_AMR
 153        std     \gpr1, STACK_REGS_AMR(r1)
 154
 155        /*
 156         * update kernel AMR with AMR_KUAP_BLOCKED only
 157         * if KUAP feature is enabled
 158         */
 159        BEGIN_MMU_FTR_SECTION_NESTED(69)
 160        LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
 161        cmpd    \use_cr, \gpr1, \gpr2
 162        beq     \use_cr, 102f
 163        /*
 164         * We don't isync here because we very recently entered via an interrupt
 165         */
 166        mtspr   SPRN_AMR, \gpr2
 167        isync
 168102:
 169        END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
 170
 171        /*
 172         * if entering from kernel we don't need save IAMR
 173         */
 174        .ifnb \msr_pr_cr
 175        beq     \msr_pr_cr, 100f // from kernel space
 176        mfspr   \gpr1, SPRN_IAMR
 177        std     \gpr1, STACK_REGS_IAMR(r1)
 178
 179        /*
 180         * update kernel IAMR with AMR_KUEP_BLOCKED only
 181         * if KUEP feature is enabled
 182         */
 183        BEGIN_MMU_FTR_SECTION_NESTED(70)
 184        LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
 185        mtspr   SPRN_IAMR, \gpr2
 186        isync
 187        END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
 188        .endif
 189
 190100: // skip_save_amr
 191#endif
 192.endm
 193
 194#else /* !__ASSEMBLY__ */
 195
 196#include <linux/jump_label.h>
 197
 198DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
 199
 200#ifdef CONFIG_PPC_PKEY
 201
 202extern u64 __ro_after_init default_uamor;
 203extern u64 __ro_after_init default_amr;
 204extern u64 __ro_after_init default_iamr;
 205
 206#include <asm/mmu.h>
 207#include <asm/ptrace.h>
 208
 209/* usage of kthread_use_mm() should inherit the
 210 * AMR value of the operating address space. But, the AMR value is
 211 * thread-specific and we inherit the address space and not thread
 212 * access restrictions. Because of this ignore AMR value when accessing
 213 * userspace via kernel thread.
 214 */
 215static inline u64 current_thread_amr(void)
 216{
 217        if (current->thread.regs)
 218                return current->thread.regs->amr;
 219        return default_amr;
 220}
 221
 222static inline u64 current_thread_iamr(void)
 223{
 224        if (current->thread.regs)
 225                return current->thread.regs->iamr;
 226        return default_iamr;
 227}
 228#endif /* CONFIG_PPC_PKEY */
 229
 230#ifdef CONFIG_PPC_KUAP
 231
 232static inline void kuap_user_restore(struct pt_regs *regs)
 233{
 234        bool restore_amr = false, restore_iamr = false;
 235        unsigned long amr, iamr;
 236
 237        if (!mmu_has_feature(MMU_FTR_PKEY))
 238                return;
 239
 240        if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
 241                amr = mfspr(SPRN_AMR);
 242                if (amr != regs->amr)
 243                        restore_amr = true;
 244        } else {
 245                restore_amr = true;
 246        }
 247
 248        if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
 249                iamr = mfspr(SPRN_IAMR);
 250                if (iamr != regs->iamr)
 251                        restore_iamr = true;
 252        } else {
 253                restore_iamr = true;
 254        }
 255
 256
 257        if (restore_amr || restore_iamr) {
 258                isync();
 259                if (restore_amr)
 260                        mtspr(SPRN_AMR, regs->amr);
 261                if (restore_iamr)
 262                        mtspr(SPRN_IAMR, regs->iamr);
 263        }
 264        /*
 265         * No isync required here because we are about to rfi
 266         * back to previous context before any user accesses
 267         * would be made, which is a CSI.
 268         */
 269}
 270
 271static inline void kuap_kernel_restore(struct pt_regs *regs,
 272                                           unsigned long amr)
 273{
 274        if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
 275                if (unlikely(regs->amr != amr)) {
 276                        isync();
 277                        mtspr(SPRN_AMR, regs->amr);
 278                        /*
 279                         * No isync required here because we are about to rfi
 280                         * back to previous context before any user accesses
 281                         * would be made, which is a CSI.
 282                         */
 283                }
 284        }
 285        /*
 286         * No need to restore IAMR when returning to kernel space.
 287         */
 288}
 289
 290static inline unsigned long kuap_get_and_assert_locked(void)
 291{
 292        if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
 293                unsigned long amr = mfspr(SPRN_AMR);
 294                if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
 295                        WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
 296                return amr;
 297        }
 298        return 0;
 299}
 300
 301static inline void kuap_assert_locked(void)
 302{
 303        if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
 304                WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
 305}
 306
 307/*
 308 * We support individually allowing read or write, but we don't support nesting
 309 * because that would require an expensive read/modify write of the AMR.
 310 */
 311
 312static inline unsigned long get_kuap(void)
 313{
 314        /*
 315         * We return AMR_KUAP_BLOCKED when we don't support KUAP because
 316         * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
 317         * cause restore_user_access to do a flush.
 318         *
 319         * This has no effect in terms of actually blocking things on hash,
 320         * so it doesn't break anything.
 321         */
 322        if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
 323                return AMR_KUAP_BLOCKED;
 324
 325        return mfspr(SPRN_AMR);
 326}
 327
 328static inline void set_kuap(unsigned long value)
 329{
 330        if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
 331                return;
 332
 333        /*
 334         * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
 335         * before and after the move to AMR. See table 6 on page 1134.
 336         */
 337        isync();
 338        mtspr(SPRN_AMR, value);
 339        isync();
 340}
 341
 342static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
 343                                  bool is_write)
 344{
 345        if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
 346                return false;
 347        /*
 348         * For radix this will be a storage protection fault (DSISR_PROTFAULT).
 349         * For hash this will be a key fault (DSISR_KEYFAULT)
 350         */
 351        /*
 352         * We do have exception table entry, but accessing the
 353         * userspace results in fault.  This could be because we
 354         * didn't unlock the AMR or access is denied by userspace
 355         * using a key value that blocks access. We are only interested
 356         * in catching the use case of accessing without unlocking
 357         * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
 358         */
 359        if (is_write) {
 360                return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
 361        }
 362        return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
 363}
 364
 365static __always_inline void allow_user_access(void __user *to, const void __user *from,
 366                                              unsigned long size, unsigned long dir)
 367{
 368        unsigned long thread_amr = 0;
 369
 370        // This is written so we can resolve to a single case at build time
 371        BUILD_BUG_ON(!__builtin_constant_p(dir));
 372
 373        if (mmu_has_feature(MMU_FTR_PKEY))
 374                thread_amr = current_thread_amr();
 375
 376        if (dir == KUAP_READ)
 377                set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
 378        else if (dir == KUAP_WRITE)
 379                set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
 380        else if (dir == KUAP_READ_WRITE)
 381                set_kuap(thread_amr);
 382        else
 383                BUILD_BUG();
 384}
 385
 386#else /* CONFIG_PPC_KUAP */
 387
 388static inline unsigned long get_kuap(void)
 389{
 390        return AMR_KUAP_BLOCKED;
 391}
 392
 393static inline void set_kuap(unsigned long value) { }
 394
 395static __always_inline void allow_user_access(void __user *to, const void __user *from,
 396                                              unsigned long size, unsigned long dir)
 397{ }
 398
 399#endif /* !CONFIG_PPC_KUAP */
 400
 401static inline void prevent_user_access(unsigned long dir)
 402{
 403        set_kuap(AMR_KUAP_BLOCKED);
 404        if (static_branch_unlikely(&uaccess_flush_key))
 405                do_uaccess_flush();
 406}
 407
 408static inline unsigned long prevent_user_access_return(void)
 409{
 410        unsigned long flags = get_kuap();
 411
 412        set_kuap(AMR_KUAP_BLOCKED);
 413        if (static_branch_unlikely(&uaccess_flush_key))
 414                do_uaccess_flush();
 415
 416        return flags;
 417}
 418
 419static inline void restore_user_access(unsigned long flags)
 420{
 421        set_kuap(flags);
 422        if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
 423                do_uaccess_flush();
 424}
 425#endif /* __ASSEMBLY__ */
 426
 427#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
 428