linux/arch/arm64/include/asm/uaccess.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/uaccess.h
   3 *
   4 * Copyright (C) 2012 ARM Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18#ifndef __ASM_UACCESS_H
  19#define __ASM_UACCESS_H
  20
  21#include <asm/alternative.h>
  22#include <asm/kernel-pgtable.h>
  23#include <asm/sysreg.h>
  24
  25/*
  26 * User space memory access functions
  27 */
  28#include <linux/bitops.h>
  29#include <linux/kasan-checks.h>
  30#include <linux/string.h>
  31
  32#include <asm/cpufeature.h>
  33#include <asm/ptrace.h>
  34#include <asm/memory.h>
  35#include <asm/compiler.h>
  36#include <asm/extable.h>
  37
  38#define KERNEL_DS       (-1UL)
  39#define get_ds()        (KERNEL_DS)
  40
  41#define USER_DS         TASK_SIZE_64
  42#define get_fs()        (current_thread_info()->addr_limit)
  43
  44static inline void set_fs(mm_segment_t fs)
  45{
  46        current_thread_info()->addr_limit = fs;
  47
  48        /*
  49         * Enable/disable UAO so that copy_to_user() etc can access
  50         * kernel memory with the unprivileged instructions.
  51         */
  52        if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
  53                asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
  54        else
  55                asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
  56                                CONFIG_ARM64_UAO));
  57}
  58
  59#define segment_eq(a, b)        ((a) == (b))
  60
  61/*
  62 * Test whether a block of memory is a valid user space address.
  63 * Returns 1 if the range is valid, 0 otherwise.
  64 *
  65 * This is equivalent to the following test:
  66 * (u65)addr + (u65)size <= current->addr_limit
  67 *
  68 * This needs 65-bit arithmetic.
  69 */
  70#define __range_ok(addr, size)                                          \
  71({                                                                      \
  72        unsigned long __addr = (unsigned long)(addr);                   \
  73        unsigned long flag, roksum;                                     \
  74        __chk_user_ptr(addr);                                           \
  75        asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"         \
  76                : "=&r" (flag), "=&r" (roksum)                          \
  77                : "1" (__addr), "Ir" (size),                            \
  78                  "r" (current_thread_info()->addr_limit)               \
  79                : "cc");                                                \
  80        flag;                                                           \
  81})
  82
  83/*
  84 * When dealing with data aborts, watchpoints, or instruction traps we may end
  85 * up with a tagged userland pointer. Clear the tag to get a sane pointer to
  86 * pass on to access_ok(), for instance.
  87 */
  88#define untagged_addr(addr)             sign_extend64(addr, 55)
  89
  90#define access_ok(type, addr, size)     __range_ok(addr, size)
  91#define user_addr_max                   get_fs
  92
  93#define _ASM_EXTABLE(from, to)                                          \
  94        "       .pushsection    __ex_table, \"a\"\n"                    \
  95        "       .align          3\n"                                    \
  96        "       .long           (" #from " - .), (" #to " - .)\n"       \
  97        "       .popsection\n"
  98
  99/*
 100 * User access enabling/disabling.
 101 */
 102#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 103static inline void __uaccess_ttbr0_disable(void)
 104{
 105        unsigned long ttbr;
 106
 107        /* reserved_ttbr0 placed at the end of swapper_pg_dir */
 108        ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
 109        write_sysreg(ttbr, ttbr0_el1);
 110        isb();
 111}
 112
 113static inline void __uaccess_ttbr0_enable(void)
 114{
 115        unsigned long flags;
 116
 117        /*
 118         * Disable interrupts to avoid preemption between reading the 'ttbr0'
 119         * variable and the MSR. A context switch could trigger an ASID
 120         * roll-over and an update of 'ttbr0'.
 121         */
 122        local_irq_save(flags);
 123        write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
 124        isb();
 125        local_irq_restore(flags);
 126}
 127
 128static inline bool uaccess_ttbr0_disable(void)
 129{
 130        if (!system_uses_ttbr0_pan())
 131                return false;
 132        __uaccess_ttbr0_disable();
 133        return true;
 134}
 135
 136static inline bool uaccess_ttbr0_enable(void)
 137{
 138        if (!system_uses_ttbr0_pan())
 139                return false;
 140        __uaccess_ttbr0_enable();
 141        return true;
 142}
 143#else
 144static inline bool uaccess_ttbr0_disable(void)
 145{
 146        return false;
 147}
 148
 149static inline bool uaccess_ttbr0_enable(void)
 150{
 151        return false;
 152}
 153#endif
 154
 155#define __uaccess_disable(alt)                                          \
 156do {                                                                    \
 157        if (!uaccess_ttbr0_disable())                                   \
 158                asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,          \
 159                                CONFIG_ARM64_PAN));                     \
 160} while (0)
 161
 162#define __uaccess_enable(alt)                                           \
 163do {                                                                    \
 164        if (!uaccess_ttbr0_enable())                                    \
 165                asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,          \
 166                                CONFIG_ARM64_PAN));                     \
 167} while (0)
 168
 169static inline void uaccess_disable(void)
 170{
 171        __uaccess_disable(ARM64_HAS_PAN);
 172}
 173
 174static inline void uaccess_enable(void)
 175{
 176        __uaccess_enable(ARM64_HAS_PAN);
 177}
 178
 179/*
 180 * These functions are no-ops when UAO is present.
 181 */
 182static inline void uaccess_disable_not_uao(void)
 183{
 184        __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
 185}
 186
 187static inline void uaccess_enable_not_uao(void)
 188{
 189        __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
 190}
 191
 192/*
 193 * The "__xxx" versions of the user access functions do not verify the address
 194 * space - it must have been done previously with a separate "access_ok()"
 195 * call.
 196 *
 197 * The "__xxx_error" versions set the third argument to -EFAULT if an error
 198 * occurs, and leave it unchanged on success.
 199 */
 200#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature)    \
 201        asm volatile(                                                   \
 202        "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
 203                        alt_instr " " reg "1, [%2]\n", feature)         \
 204        "2:\n"                                                          \
 205        "       .section .fixup, \"ax\"\n"                              \
 206        "       .align  2\n"                                            \
 207        "3:     mov     %w0, %3\n"                                      \
 208        "       mov     %1, #0\n"                                       \
 209        "       b       2b\n"                                           \
 210        "       .previous\n"                                            \
 211        _ASM_EXTABLE(1b, 3b)                                            \
 212        : "+r" (err), "=&r" (x)                                         \
 213        : "r" (addr), "i" (-EFAULT))
 214
 215#define __get_user_err(x, ptr, err)                                     \
 216do {                                                                    \
 217        unsigned long __gu_val;                                         \
 218        __chk_user_ptr(ptr);                                            \
 219        uaccess_enable_not_uao();                                       \
 220        switch (sizeof(*(ptr))) {                                       \
 221        case 1:                                                         \
 222                __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
 223                               (err), ARM64_HAS_UAO);                   \
 224                break;                                                  \
 225        case 2:                                                         \
 226                __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr),  \
 227                               (err), ARM64_HAS_UAO);                   \
 228                break;                                                  \
 229        case 4:                                                         \
 230                __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr),    \
 231                               (err), ARM64_HAS_UAO);                   \
 232                break;                                                  \
 233        case 8:                                                         \
 234                __get_user_asm("ldr", "ldtr", "%x",  __gu_val, (ptr),   \
 235                               (err), ARM64_HAS_UAO);                   \
 236                break;                                                  \
 237        default:                                                        \
 238                BUILD_BUG();                                            \
 239        }                                                               \
 240        uaccess_disable_not_uao();                                      \
 241        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 242} while (0)
 243
 244#define __get_user(x, ptr)                                              \
 245({                                                                      \
 246        int __gu_err = 0;                                               \
 247        __get_user_err((x), (ptr), __gu_err);                           \
 248        __gu_err;                                                       \
 249})
 250
 251#define __get_user_error(x, ptr, err)                                   \
 252({                                                                      \
 253        __get_user_err((x), (ptr), (err));                              \
 254        (void)0;                                                        \
 255})
 256
 257#define get_user(x, ptr)                                                \
 258({                                                                      \
 259        __typeof__(*(ptr)) __user *__p = (ptr);                         \
 260        might_fault();                                                  \
 261        access_ok(VERIFY_READ, __p, sizeof(*__p)) ?                     \
 262                __get_user((x), __p) :                                  \
 263                ((x) = 0, -EFAULT);                                     \
 264})
 265
 266#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)    \
 267        asm volatile(                                                   \
 268        "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
 269                        alt_instr " " reg "1, [%2]\n", feature)         \
 270        "2:\n"                                                          \
 271        "       .section .fixup,\"ax\"\n"                               \
 272        "       .align  2\n"                                            \
 273        "3:     mov     %w0, %3\n"                                      \
 274        "       b       2b\n"                                           \
 275        "       .previous\n"                                            \
 276        _ASM_EXTABLE(1b, 3b)                                            \
 277        : "+r" (err)                                                    \
 278        : "r" (x), "r" (addr), "i" (-EFAULT))
 279
 280#define __put_user_err(x, ptr, err)                                     \
 281do {                                                                    \
 282        __typeof__(*(ptr)) __pu_val = (x);                              \
 283        __chk_user_ptr(ptr);                                            \
 284        uaccess_enable_not_uao();                                       \
 285        switch (sizeof(*(ptr))) {                                       \
 286        case 1:                                                         \
 287                __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),  \
 288                               (err), ARM64_HAS_UAO);                   \
 289                break;                                                  \
 290        case 2:                                                         \
 291                __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr),  \
 292                               (err), ARM64_HAS_UAO);                   \
 293                break;                                                  \
 294        case 4:                                                         \
 295                __put_user_asm("str", "sttr", "%w", __pu_val, (ptr),    \
 296                               (err), ARM64_HAS_UAO);                   \
 297                break;                                                  \
 298        case 8:                                                         \
 299                __put_user_asm("str", "sttr", "%x", __pu_val, (ptr),    \
 300                               (err), ARM64_HAS_UAO);                   \
 301                break;                                                  \
 302        default:                                                        \
 303                BUILD_BUG();                                            \
 304        }                                                               \
 305        uaccess_disable_not_uao();                                      \
 306} while (0)
 307
 308#define __put_user(x, ptr)                                              \
 309({                                                                      \
 310        int __pu_err = 0;                                               \
 311        __put_user_err((x), (ptr), __pu_err);                           \
 312        __pu_err;                                                       \
 313})
 314
 315#define __put_user_error(x, ptr, err)                                   \
 316({                                                                      \
 317        __put_user_err((x), (ptr), (err));                              \
 318        (void)0;                                                        \
 319})
 320
 321#define put_user(x, ptr)                                                \
 322({                                                                      \
 323        __typeof__(*(ptr)) __user *__p = (ptr);                         \
 324        might_fault();                                                  \
 325        access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?                    \
 326                __put_user((x), __p) :                                  \
 327                -EFAULT;                                                \
 328})
 329
 330extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 331#define raw_copy_from_user __arch_copy_from_user
 332extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
 333#define raw_copy_to_user __arch_copy_to_user
 334extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 335extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
 336#define INLINE_COPY_TO_USER
 337#define INLINE_COPY_FROM_USER
 338
 339static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 340{
 341        if (access_ok(VERIFY_WRITE, to, n))
 342                n = __clear_user(to, n);
 343        return n;
 344}
 345
 346extern long strncpy_from_user(char *dest, const char __user *src, long count);
 347
 348extern __must_check long strnlen_user(const char __user *str, long n);
 349
 350#endif /* __ASM_UACCESS_H */
 351