linux/arch/powerpc/include/asm/book3s/32/kup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
   3#define _ASM_POWERPC_BOOK3S_32_KUP_H
   4
   5#include <asm/book3s/32/mmu-hash.h>
   6
   7#ifdef __ASSEMBLY__
   8
   9.macro kuep_update_sr   gpr1, gpr2              /* NEVER use r0 as gpr2 due to addis */
  10101:    mtsrin  \gpr1, \gpr2
  11        addi    \gpr1, \gpr1, 0x111             /* next VSID */
  12        rlwinm  \gpr1, \gpr1, 0, 0xf0ffffff     /* clear VSID overflow */
  13        addis   \gpr2, \gpr2, 0x1000            /* address of next segment */
  14        bdnz    101b
  15        isync
  16.endm
  17
  18.macro kuep_lock        gpr1, gpr2
  19#ifdef CONFIG_PPC_KUEP
  20        li      \gpr1, NUM_USER_SEGMENTS
  21        li      \gpr2, 0
  22        mtctr   \gpr1
  23        mfsrin  \gpr1, \gpr2
  24        oris    \gpr1, \gpr1, SR_NX@h           /* set Nx */
  25        kuep_update_sr \gpr1, \gpr2
  26#endif
  27.endm
  28
  29.macro kuep_unlock      gpr1, gpr2
  30#ifdef CONFIG_PPC_KUEP
  31        li      \gpr1, NUM_USER_SEGMENTS
  32        li      \gpr2, 0
  33        mtctr   \gpr1
  34        mfsrin  \gpr1, \gpr2
  35        rlwinm  \gpr1, \gpr1, 0, ~SR_NX         /* Clear Nx */
  36        kuep_update_sr \gpr1, \gpr2
  37#endif
  38.endm
  39
  40#ifdef CONFIG_PPC_KUAP
  41
  42.macro kuap_update_sr   gpr1, gpr2, gpr3        /* NEVER use r0 as gpr2 due to addis */
  43101:    mtsrin  \gpr1, \gpr2
  44        addi    \gpr1, \gpr1, 0x111             /* next VSID */
  45        rlwinm  \gpr1, \gpr1, 0, 0xf0ffffff     /* clear VSID overflow */
  46        addis   \gpr2, \gpr2, 0x1000            /* address of next segment */
  47        cmplw   \gpr2, \gpr3
  48        blt-    101b
  49        isync
  50.endm
  51
  52.macro kuap_save_and_lock       sp, thread, gpr1, gpr2, gpr3
  53        lwz     \gpr2, KUAP(\thread)
  54        rlwinm. \gpr3, \gpr2, 28, 0xf0000000
  55        stw     \gpr2, STACK_REGS_KUAP(\sp)
  56        beq+    102f
  57        li      \gpr1, 0
  58        stw     \gpr1, KUAP(\thread)
  59        mfsrin  \gpr1, \gpr2
  60        oris    \gpr1, \gpr1, SR_KS@h   /* set Ks */
  61        kuap_update_sr  \gpr1, \gpr2, \gpr3
  62102:
  63.endm
  64
  65.macro kuap_restore     sp, current, gpr1, gpr2, gpr3
  66        lwz     \gpr2, STACK_REGS_KUAP(\sp)
  67        rlwinm. \gpr3, \gpr2, 28, 0xf0000000
  68        stw     \gpr2, THREAD + KUAP(\current)
  69        beq+    102f
  70        mfsrin  \gpr1, \gpr2
  71        rlwinm  \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */
  72        kuap_update_sr  \gpr1, \gpr2, \gpr3
  73102:
  74.endm
  75
  76.macro kuap_check       current, gpr
  77#ifdef CONFIG_PPC_KUAP_DEBUG
  78        lwz     \gpr, KUAP(thread)
  79999:    twnei   \gpr, 0
  80        EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
  81#endif
  82.endm
  83
  84#endif /* CONFIG_PPC_KUAP */
  85
  86#else /* !__ASSEMBLY__ */
  87
  88#ifdef CONFIG_PPC_KUAP
  89
  90#include <linux/sched.h>
  91
  92static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
  93{
  94        addr &= 0xf0000000;     /* align addr to start of segment */
  95        barrier();      /* make sure thread.kuap is updated before playing with SRs */
  96        while (addr < end) {
  97                mtsrin(sr, addr);
  98                sr += 0x111;            /* next VSID */
  99                sr &= 0xf0ffffff;       /* clear VSID overflow */
 100                addr += 0x10000000;     /* address of next segment */
 101        }
 102        isync();        /* Context sync required after mtsrin() */
 103}
 104
 105static __always_inline void allow_user_access(void __user *to, const void __user *from,
 106                                              u32 size, unsigned long dir)
 107{
 108        u32 addr, end;
 109
 110        BUILD_BUG_ON(!__builtin_constant_p(dir));
 111        BUILD_BUG_ON(dir == KUAP_CURRENT);
 112
 113        if (!(dir & KUAP_WRITE))
 114                return;
 115
 116        addr = (__force u32)to;
 117
 118        if (unlikely(addr >= TASK_SIZE || !size))
 119                return;
 120
 121        end = min(addr + size, TASK_SIZE);
 122
 123        current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
 124        kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end);       /* Clear Ks */
 125}
 126
 127static __always_inline void prevent_user_access(void __user *to, const void __user *from,
 128                                                u32 size, unsigned long dir)
 129{
 130        u32 addr, end;
 131
 132        BUILD_BUG_ON(!__builtin_constant_p(dir));
 133
 134        if (dir == KUAP_CURRENT) {
 135                u32 kuap = current->thread.kuap;
 136
 137                if (unlikely(!kuap))
 138                        return;
 139
 140                addr = kuap & 0xf0000000;
 141                end = kuap << 28;
 142        } else if (dir & KUAP_WRITE) {
 143                addr = (__force u32)to;
 144                end = min(addr + size, TASK_SIZE);
 145
 146                if (unlikely(addr >= TASK_SIZE || !size))
 147                        return;
 148        } else {
 149                return;
 150        }
 151
 152        current->thread.kuap = 0;
 153        kuap_update_sr(mfsrin(addr) | SR_KS, addr, end);        /* set Ks */
 154}
 155
 156static inline unsigned long prevent_user_access_return(void)
 157{
 158        unsigned long flags = current->thread.kuap;
 159        unsigned long addr = flags & 0xf0000000;
 160        unsigned long end = flags << 28;
 161        void __user *to = (__force void __user *)addr;
 162
 163        if (flags)
 164                prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
 165
 166        return flags;
 167}
 168
 169static inline void restore_user_access(unsigned long flags)
 170{
 171        unsigned long addr = flags & 0xf0000000;
 172        unsigned long end = flags << 28;
 173        void __user *to = (__force void __user *)addr;
 174
 175        if (flags)
 176                allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
 177}
 178
 179static inline bool
 180bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 181{
 182        unsigned long begin = regs->kuap & 0xf0000000;
 183        unsigned long end = regs->kuap << 28;
 184
 185        if (!is_write)
 186                return false;
 187
 188        return WARN(address < begin || address >= end,
 189                    "Bug: write fault blocked by segment registers !");
 190}
 191
 192#endif /* CONFIG_PPC_KUAP */
 193
 194#endif /* __ASSEMBLY__ */
 195
 196#endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
 197