linux/mm/usercopy.c
<<
>>
Prefs
   1/*
   2 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
   3 * which are designed to protect kernel memory from needless exposure
   4 * and overwrite under many unintended conditions. This code is based
   5 * on PAX_USERCOPY, which is:
   6 *
   7 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
   8 * Security Inc.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 */
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/mm.h>
  18#include <linux/slab.h>
  19#include <linux/sched.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/thread_info.h>
  23#include <asm/sections.h>
  24
  25/*
  26 * Checks if a given pointer and length is contained by the current
  27 * stack frame (if possible).
  28 *
  29 * Returns:
  30 *      NOT_STACK: not at all on the stack
  31 *      GOOD_FRAME: fully within a valid stack frame
  32 *      GOOD_STACK: fully on the stack (when can't do frame-checking)
  33 *      BAD_STACK: error condition (invalid stack position or bad stack frame)
  34 */
  35static noinline int check_stack_object(const void *obj, unsigned long len)
  36{
  37        const void * const stack = task_stack_page(current);
  38        const void * const stackend = stack + THREAD_SIZE;
  39        int ret;
  40
  41        /* Object is not on the stack at all. */
  42        if (obj + len <= stack || stackend <= obj)
  43                return NOT_STACK;
  44
  45        /*
  46         * Reject: object partially overlaps the stack (passing the
  47         * the check above means at least one end is within the stack,
  48         * so if this check fails, the other end is outside the stack).
  49         */
  50        if (obj < stack || stackend < obj + len)
  51                return BAD_STACK;
  52
  53        /* Check if object is safely within a valid frame. */
  54        ret = arch_within_stack_frames(stack, stackend, obj, len);
  55        if (ret)
  56                return ret;
  57
  58        return GOOD_STACK;
  59}
  60
  61/*
  62 * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
  63 * an unexpected state during a copy_from_user() or copy_to_user() call.
  64 * There are several checks being performed on the buffer by the
  65 * __check_object_size() function. Normal stack buffer usage should never
  66 * trip the checks, and kernel text addressing will always trip the check.
  67 * For cache objects, it is checking that only the whitelisted range of
  68 * bytes for a given cache is being accessed (via the cache's usersize and
  69 * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
  70 * kmem_cache_create_usercopy() function to create the cache (and
  71 * carefully audit the whitelist range).
  72 */
  73void usercopy_warn(const char *name, const char *detail, bool to_user,
  74                   unsigned long offset, unsigned long len)
  75{
  76        WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
  77                 to_user ? "exposure" : "overwrite",
  78                 to_user ? "from" : "to",
  79                 name ? : "unknown?!",
  80                 detail ? " '" : "", detail ? : "", detail ? "'" : "",
  81                 offset, len);
  82}
  83
  84void __noreturn usercopy_abort(const char *name, const char *detail,
  85                               bool to_user, unsigned long offset,
  86                               unsigned long len)
  87{
  88        pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
  89                 to_user ? "exposure" : "overwrite",
  90                 to_user ? "from" : "to",
  91                 name ? : "unknown?!",
  92                 detail ? " '" : "", detail ? : "", detail ? "'" : "",
  93                 offset, len);
  94
  95        /*
  96         * For greater effect, it would be nice to do do_group_exit(),
  97         * but BUG() actually hooks all the lock-breaking and per-arch
  98         * Oops code, so that is used here instead.
  99         */
 100        BUG();
 101}
 102
 103/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
 104static bool overlaps(const unsigned long ptr, unsigned long n,
 105                     unsigned long low, unsigned long high)
 106{
 107        const unsigned long check_low = ptr;
 108        unsigned long check_high = check_low + n;
 109
 110        /* Does not overlap if entirely above or entirely below. */
 111        if (check_low >= high || check_high <= low)
 112                return false;
 113
 114        return true;
 115}
 116
 117/* Is this address range in the kernel text area? */
 118static inline void check_kernel_text_object(const unsigned long ptr,
 119                                            unsigned long n, bool to_user)
 120{
 121        unsigned long textlow = (unsigned long)_stext;
 122        unsigned long texthigh = (unsigned long)_etext;
 123        unsigned long textlow_linear, texthigh_linear;
 124
 125        if (overlaps(ptr, n, textlow, texthigh))
 126                usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
 127
 128        /*
 129         * Some architectures have virtual memory mappings with a secondary
 130         * mapping of the kernel text, i.e. there is more than one virtual
 131         * kernel address that points to the kernel image. It is usually
 132         * when there is a separate linear physical memory mapping, in that
 133         * __pa() is not just the reverse of __va(). This can be detected
 134         * and checked:
 135         */
 136        textlow_linear = (unsigned long)lm_alias(textlow);
 137        /* No different mapping: we're done. */
 138        if (textlow_linear == textlow)
 139                return;
 140
 141        /* Check the secondary mapping... */
 142        texthigh_linear = (unsigned long)lm_alias(texthigh);
 143        if (overlaps(ptr, n, textlow_linear, texthigh_linear))
 144                usercopy_abort("linear kernel text", NULL, to_user,
 145                               ptr - textlow_linear, n);
 146}
 147
 148static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
 149                                       bool to_user)
 150{
 151        /* Reject if object wraps past end of memory. */
 152        if (ptr + n < ptr)
 153                usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
 154
 155        /* Reject if NULL or ZERO-allocation. */
 156        if (ZERO_OR_NULL_PTR(ptr))
 157                usercopy_abort("null address", NULL, to_user, ptr, n);
 158}
 159
 160/* Checks for allocs that are marked in some way as spanning multiple pages. */
 161static inline void check_page_span(const void *ptr, unsigned long n,
 162                                   struct page *page, bool to_user)
 163{
 164#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
 165        const void *end = ptr + n - 1;
 166        struct page *endpage;
 167        bool is_reserved, is_cma;
 168
 169        /*
 170         * Sometimes the kernel data regions are not marked Reserved (see
 171         * check below). And sometimes [_sdata,_edata) does not cover
 172         * rodata and/or bss, so check each range explicitly.
 173         */
 174
 175        /* Allow reads of kernel rodata region (if not marked as Reserved). */
 176        if (ptr >= (const void *)__start_rodata &&
 177            end <= (const void *)__end_rodata) {
 178                if (!to_user)
 179                        usercopy_abort("rodata", NULL, to_user, 0, n);
 180                return;
 181        }
 182
 183        /* Allow kernel data region (if not marked as Reserved). */
 184        if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
 185                return;
 186
 187        /* Allow kernel bss region (if not marked as Reserved). */
 188        if (ptr >= (const void *)__bss_start &&
 189            end <= (const void *)__bss_stop)
 190                return;
 191
 192        /* Is the object wholly within one base page? */
 193        if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
 194                   ((unsigned long)end & (unsigned long)PAGE_MASK)))
 195                return;
 196
 197        /* Allow if fully inside the same compound (__GFP_COMP) page. */
 198        endpage = virt_to_head_page(end);
 199        if (likely(endpage == page))
 200                return;
 201
 202        /*
 203         * Reject if range is entirely either Reserved (i.e. special or
 204         * device memory), or CMA. Otherwise, reject since the object spans
 205         * several independently allocated pages.
 206         */
 207        is_reserved = PageReserved(page);
 208        is_cma = is_migrate_cma_page(page);
 209        if (!is_reserved && !is_cma)
 210                usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
 211
 212        for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
 213                page = virt_to_head_page(ptr);
 214                if (is_reserved && !PageReserved(page))
 215                        usercopy_abort("spans Reserved and non-Reserved pages",
 216                                       NULL, to_user, 0, n);
 217                if (is_cma && !is_migrate_cma_page(page))
 218                        usercopy_abort("spans CMA and non-CMA pages", NULL,
 219                                       to_user, 0, n);
 220        }
 221#endif
 222}
 223
 224static inline void check_heap_object(const void *ptr, unsigned long n,
 225                                     bool to_user)
 226{
 227        struct page *page;
 228
 229        if (!virt_addr_valid(ptr))
 230                return;
 231
 232        page = virt_to_head_page(ptr);
 233
 234        if (PageSlab(page)) {
 235                /* Check slab allocator for flags and size. */
 236                __check_heap_object(ptr, n, page, to_user);
 237        } else {
 238                /* Verify object does not incorrectly span multiple pages. */
 239                check_page_span(ptr, n, page, to_user);
 240        }
 241}
 242
 243/*
 244 * Validates that the given object is:
 245 * - not bogus address
 246 * - known-safe heap or stack object
 247 * - not in kernel text
 248 */
 249void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 250{
 251        /* Skip all tests if size is zero. */
 252        if (!n)
 253                return;
 254
 255        /* Check for invalid addresses. */
 256        check_bogus_address((const unsigned long)ptr, n, to_user);
 257
 258        /* Check for bad heap object. */
 259        check_heap_object(ptr, n, to_user);
 260
 261        /* Check for bad stack object. */
 262        switch (check_stack_object(ptr, n)) {
 263        case NOT_STACK:
 264                /* Object is not touching the current process stack. */
 265                break;
 266        case GOOD_FRAME:
 267        case GOOD_STACK:
 268                /*
 269                 * Object is either in the correct frame (when it
 270                 * is possible to check) or just generally on the
 271                 * process stack (when frame checking not available).
 272                 */
 273                return;
 274        default:
 275                usercopy_abort("process stack", NULL, to_user, 0, n);
 276        }
 277
 278        /* Check for object in kernel to avoid text exposure. */
 279        check_kernel_text_object((const unsigned long)ptr, n, to_user);
 280}
 281EXPORT_SYMBOL(__check_object_size);
 282