linux/arch/powerpc/include/asm/book3s/32/kup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
   3#define _ASM_POWERPC_BOOK3S_32_KUP_H
   4
   5#include <asm/bug.h>
   6#include <asm/book3s/32/mmu-hash.h>
   7#include <asm/mmu.h>
   8#include <asm/synch.h>
   9
  10#ifndef __ASSEMBLY__
  11
  12#include <linux/jump_label.h>
  13
  14extern struct static_key_false disable_kuap_key;
  15extern struct static_key_false disable_kuep_key;
  16
  17static __always_inline bool kuap_is_disabled(void)
  18{
  19        return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
  20}
  21
  22static __always_inline bool kuep_is_disabled(void)
  23{
  24        return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
  25}
  26
  27static inline void kuep_lock(void)
  28{
  29        if (kuep_is_disabled())
  30                return;
  31
  32        update_user_segments(mfsr(0) | SR_NX);
  33        /*
  34         * This isync() shouldn't be necessary as the kernel is not excepted to
  35         * run any instruction in userspace soon after the update of segments,
  36         * but hash based cores (at least G3) seem to exhibit a random
  37         * behaviour when the 'isync' is not there. 603 cores don't have this
  38         * behaviour so don't do the 'isync' as it saves several CPU cycles.
  39         */
  40        if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
  41                isync();        /* Context sync required after mtsr() */
  42}
  43
  44static inline void kuep_unlock(void)
  45{
  46        if (kuep_is_disabled())
  47                return;
  48
  49        update_user_segments(mfsr(0) & ~SR_NX);
  50        /*
  51         * This isync() shouldn't be necessary as a 'rfi' will soon be executed
  52         * to return to userspace, but hash based cores (at least G3) seem to
  53         * exhibit a random behaviour when the 'isync' is not there. 603 cores
  54         * don't have this behaviour so don't do the 'isync' as it saves several
  55         * CPU cycles.
  56         */
  57        if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
  58                isync();        /* Context sync required after mtsr() */
  59}
  60
  61#ifdef CONFIG_PPC_KUAP
  62
  63#include <linux/sched.h>
  64
  65#define KUAP_NONE       (~0UL)
  66#define KUAP_ALL        (~1UL)
  67
  68static inline void kuap_lock_one(unsigned long addr)
  69{
  70        mtsr(mfsr(addr) | SR_KS, addr);
  71        isync();        /* Context sync required after mtsr() */
  72}
  73
  74static inline void kuap_unlock_one(unsigned long addr)
  75{
  76        mtsr(mfsr(addr) & ~SR_KS, addr);
  77        isync();        /* Context sync required after mtsr() */
  78}
  79
  80static inline void kuap_lock_all(void)
  81{
  82        update_user_segments(mfsr(0) | SR_KS);
  83        isync();        /* Context sync required after mtsr() */
  84}
  85
  86static inline void kuap_unlock_all(void)
  87{
  88        update_user_segments(mfsr(0) & ~SR_KS);
  89        isync();        /* Context sync required after mtsr() */
  90}
  91
  92void kuap_lock_all_ool(void);
  93void kuap_unlock_all_ool(void);
  94
  95static inline void kuap_lock(unsigned long addr, bool ool)
  96{
  97        if (likely(addr != KUAP_ALL))
  98                kuap_lock_one(addr);
  99        else if (!ool)
 100                kuap_lock_all();
 101        else
 102                kuap_lock_all_ool();
 103}
 104
 105static inline void kuap_unlock(unsigned long addr, bool ool)
 106{
 107        if (likely(addr != KUAP_ALL))
 108                kuap_unlock_one(addr);
 109        else if (!ool)
 110                kuap_unlock_all();
 111        else
 112                kuap_unlock_all_ool();
 113}
 114
 115static inline void kuap_save_and_lock(struct pt_regs *regs)
 116{
 117        unsigned long kuap = current->thread.kuap;
 118
 119        if (kuap_is_disabled())
 120                return;
 121
 122        regs->kuap = kuap;
 123        if (unlikely(kuap == KUAP_NONE))
 124                return;
 125
 126        current->thread.kuap = KUAP_NONE;
 127        kuap_lock(kuap, false);
 128}
 129
 130static inline void kuap_user_restore(struct pt_regs *regs)
 131{
 132}
 133
 134static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
 135{
 136        if (kuap_is_disabled())
 137                return;
 138
 139        if (unlikely(kuap != KUAP_NONE)) {
 140                current->thread.kuap = KUAP_NONE;
 141                kuap_lock(kuap, false);
 142        }
 143
 144        if (likely(regs->kuap == KUAP_NONE))
 145                return;
 146
 147        current->thread.kuap = regs->kuap;
 148
 149        kuap_unlock(regs->kuap, false);
 150}
 151
 152static inline unsigned long kuap_get_and_assert_locked(void)
 153{
 154        unsigned long kuap = current->thread.kuap;
 155
 156        if (kuap_is_disabled())
 157                return KUAP_NONE;
 158
 159        WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
 160
 161        return kuap;
 162}
 163
 164static inline void kuap_assert_locked(void)
 165{
 166        kuap_get_and_assert_locked();
 167}
 168
 169static __always_inline void allow_user_access(void __user *to, const void __user *from,
 170                                              u32 size, unsigned long dir)
 171{
 172        if (kuap_is_disabled())
 173                return;
 174
 175        BUILD_BUG_ON(!__builtin_constant_p(dir));
 176
 177        if (!(dir & KUAP_WRITE))
 178                return;
 179
 180        current->thread.kuap = (__force u32)to;
 181        kuap_unlock_one((__force u32)to);
 182}
 183
 184static __always_inline void prevent_user_access(unsigned long dir)
 185{
 186        u32 kuap = current->thread.kuap;
 187
 188        if (kuap_is_disabled())
 189                return;
 190
 191        BUILD_BUG_ON(!__builtin_constant_p(dir));
 192
 193        if (!(dir & KUAP_WRITE))
 194                return;
 195
 196        current->thread.kuap = KUAP_NONE;
 197        kuap_lock(kuap, true);
 198}
 199
 200static inline unsigned long prevent_user_access_return(void)
 201{
 202        unsigned long flags = current->thread.kuap;
 203
 204        if (kuap_is_disabled())
 205                return KUAP_NONE;
 206
 207        if (flags != KUAP_NONE) {
 208                current->thread.kuap = KUAP_NONE;
 209                kuap_lock(flags, true);
 210        }
 211
 212        return flags;
 213}
 214
 215static inline void restore_user_access(unsigned long flags)
 216{
 217        if (kuap_is_disabled())
 218                return;
 219
 220        if (flags != KUAP_NONE) {
 221                current->thread.kuap = flags;
 222                kuap_unlock(flags, true);
 223        }
 224}
 225
 226static inline bool
 227bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 228{
 229        unsigned long kuap = regs->kuap;
 230
 231        if (kuap_is_disabled())
 232                return false;
 233
 234        if (!is_write || kuap == KUAP_ALL)
 235                return false;
 236        if (kuap == KUAP_NONE)
 237                return true;
 238
 239        /* If faulting address doesn't match unlocked segment, unlock all */
 240        if ((kuap ^ address) & 0xf0000000)
 241                regs->kuap = KUAP_ALL;
 242
 243        return false;
 244}
 245
 246#endif /* CONFIG_PPC_KUAP */
 247
 248#endif /* __ASSEMBLY__ */
 249
 250#endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
 251