linux/drivers/misc/lkdtm/usercopy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This is for all the tests related to copy_to_user() and copy_from_user()
   4 * hardening.
   5 */
   6#include "lkdtm.h"
   7#include <linux/slab.h>
   8#include <linux/vmalloc.h>
   9#include <linux/sched/task_stack.h>
  10#include <linux/mman.h>
  11#include <linux/uaccess.h>
  12#include <asm/cacheflush.h>
  13
  14/*
  15 * Many of the tests here end up using const sizes, but those would
  16 * normally be ignored by hardened usercopy, so force the compiler
  17 * into choosing the non-const path to make sure we trigger the
  18 * hardened usercopy checks by added "unconst" to all the const copies,
  19 * and making sure "cache_size" isn't optimized into a const.
  20 */
  21static volatile size_t unconst = 0;
  22static volatile size_t cache_size = 1024;
  23static struct kmem_cache *whitelist_cache;
  24
  25static const unsigned char test_text[] = "This is a test.\n";
  26
  27/*
  28 * Instead of adding -Wno-return-local-addr, just pass the stack address
  29 * through a function to obfuscate it from the compiler.
  30 */
  31static noinline unsigned char *trick_compiler(unsigned char *stack)
  32{
  33        return stack + 0;
  34}
  35
  36static noinline unsigned char *do_usercopy_stack_callee(int value)
  37{
  38        unsigned char buf[32];
  39        int i;
  40
  41        /* Exercise stack to avoid everything living in registers. */
  42        for (i = 0; i < sizeof(buf); i++) {
  43                buf[i] = value & 0xff;
  44        }
  45
  46        return trick_compiler(buf);
  47}
  48
  49static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
  50{
  51        unsigned long user_addr;
  52        unsigned char good_stack[32];
  53        unsigned char *bad_stack;
  54        int i;
  55
  56        /* Exercise stack to avoid everything living in registers. */
  57        for (i = 0; i < sizeof(good_stack); i++)
  58                good_stack[i] = test_text[i % sizeof(test_text)];
  59
  60        /* This is a pointer to outside our current stack frame. */
  61        if (bad_frame) {
  62                bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
  63        } else {
  64                /* Put start address just inside stack. */
  65                bad_stack = task_stack_page(current) + THREAD_SIZE;
  66                bad_stack -= sizeof(unsigned long);
  67        }
  68
  69        user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
  70                            PROT_READ | PROT_WRITE | PROT_EXEC,
  71                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
  72        if (user_addr >= TASK_SIZE) {
  73                pr_warn("Failed to allocate user memory\n");
  74                return;
  75        }
  76
  77        if (to_user) {
  78                pr_info("attempting good copy_to_user of local stack\n");
  79                if (copy_to_user((void __user *)user_addr, good_stack,
  80                                 unconst + sizeof(good_stack))) {
  81                        pr_warn("copy_to_user failed unexpectedly?!\n");
  82                        goto free_user;
  83                }
  84
  85                pr_info("attempting bad copy_to_user of distant stack\n");
  86                if (copy_to_user((void __user *)user_addr, bad_stack,
  87                                 unconst + sizeof(good_stack))) {
  88                        pr_warn("copy_to_user failed, but lacked Oops\n");
  89                        goto free_user;
  90                }
  91        } else {
  92                /*
  93                 * There isn't a safe way to not be protected by usercopy
  94                 * if we're going to write to another thread's stack.
  95                 */
  96                if (!bad_frame)
  97                        goto free_user;
  98
  99                pr_info("attempting good copy_from_user of local stack\n");
 100                if (copy_from_user(good_stack, (void __user *)user_addr,
 101                                   unconst + sizeof(good_stack))) {
 102                        pr_warn("copy_from_user failed unexpectedly?!\n");
 103                        goto free_user;
 104                }
 105
 106                pr_info("attempting bad copy_from_user of distant stack\n");
 107                if (copy_from_user(bad_stack, (void __user *)user_addr,
 108                                   unconst + sizeof(good_stack))) {
 109                        pr_warn("copy_from_user failed, but lacked Oops\n");
 110                        goto free_user;
 111                }
 112        }
 113
 114free_user:
 115        vm_munmap(user_addr, PAGE_SIZE);
 116}
 117
 118/*
 119 * This checks for whole-object size validation with hardened usercopy,
 120 * with or without usercopy whitelisting.
 121 */
 122static void do_usercopy_heap_size(bool to_user)
 123{
 124        unsigned long user_addr;
 125        unsigned char *one, *two;
 126        void __user *test_user_addr;
 127        void *test_kern_addr;
 128        size_t size = unconst + 1024;
 129
 130        one = kmalloc(size, GFP_KERNEL);
 131        two = kmalloc(size, GFP_KERNEL);
 132        if (!one || !two) {
 133                pr_warn("Failed to allocate kernel memory\n");
 134                goto free_kernel;
 135        }
 136
 137        user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
 138                            PROT_READ | PROT_WRITE | PROT_EXEC,
 139                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
 140        if (user_addr >= TASK_SIZE) {
 141                pr_warn("Failed to allocate user memory\n");
 142                goto free_kernel;
 143        }
 144
 145        memset(one, 'A', size);
 146        memset(two, 'B', size);
 147
 148        test_user_addr = (void __user *)(user_addr + 16);
 149        test_kern_addr = one + 16;
 150
 151        if (to_user) {
 152                pr_info("attempting good copy_to_user of correct size\n");
 153                if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
 154                        pr_warn("copy_to_user failed unexpectedly?!\n");
 155                        goto free_user;
 156                }
 157
 158                pr_info("attempting bad copy_to_user of too large size\n");
 159                if (copy_to_user(test_user_addr, test_kern_addr, size)) {
 160                        pr_warn("copy_to_user failed, but lacked Oops\n");
 161                        goto free_user;
 162                }
 163        } else {
 164                pr_info("attempting good copy_from_user of correct size\n");
 165                if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
 166                        pr_warn("copy_from_user failed unexpectedly?!\n");
 167                        goto free_user;
 168                }
 169
 170                pr_info("attempting bad copy_from_user of too large size\n");
 171                if (copy_from_user(test_kern_addr, test_user_addr, size)) {
 172                        pr_warn("copy_from_user failed, but lacked Oops\n");
 173                        goto free_user;
 174                }
 175        }
 176
 177free_user:
 178        vm_munmap(user_addr, PAGE_SIZE);
 179free_kernel:
 180        kfree(one);
 181        kfree(two);
 182}
 183
 184/*
 185 * This checks for the specific whitelist window within an object. If this
 186 * test passes, then do_usercopy_heap_size() tests will pass too.
 187 */
 188static void do_usercopy_heap_whitelist(bool to_user)
 189{
 190        unsigned long user_alloc;
 191        unsigned char *buf = NULL;
 192        unsigned char __user *user_addr;
 193        size_t offset, size;
 194
 195        /* Make sure cache was prepared. */
 196        if (!whitelist_cache) {
 197                pr_warn("Failed to allocate kernel cache\n");
 198                return;
 199        }
 200
 201        /*
 202         * Allocate a buffer with a whitelisted window in the buffer.
 203         */
 204        buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
 205        if (!buf) {
 206                pr_warn("Failed to allocate buffer from whitelist cache\n");
 207                goto free_alloc;
 208        }
 209
 210        /* Allocate user memory we'll poke at. */
 211        user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
 212                            PROT_READ | PROT_WRITE | PROT_EXEC,
 213                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
 214        if (user_alloc >= TASK_SIZE) {
 215                pr_warn("Failed to allocate user memory\n");
 216                goto free_alloc;
 217        }
 218        user_addr = (void __user *)user_alloc;
 219
 220        memset(buf, 'B', cache_size);
 221
 222        /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
 223        offset = (cache_size / 4) + unconst;
 224        size = (cache_size / 16) + unconst;
 225
 226        if (to_user) {
 227                pr_info("attempting good copy_to_user inside whitelist\n");
 228                if (copy_to_user(user_addr, buf + offset, size)) {
 229                        pr_warn("copy_to_user failed unexpectedly?!\n");
 230                        goto free_user;
 231                }
 232
 233                pr_info("attempting bad copy_to_user outside whitelist\n");
 234                if (copy_to_user(user_addr, buf + offset - 1, size)) {
 235                        pr_warn("copy_to_user failed, but lacked Oops\n");
 236                        goto free_user;
 237                }
 238        } else {
 239                pr_info("attempting good copy_from_user inside whitelist\n");
 240                if (copy_from_user(buf + offset, user_addr, size)) {
 241                        pr_warn("copy_from_user failed unexpectedly?!\n");
 242                        goto free_user;
 243                }
 244
 245                pr_info("attempting bad copy_from_user outside whitelist\n");
 246                if (copy_from_user(buf + offset - 1, user_addr, size)) {
 247                        pr_warn("copy_from_user failed, but lacked Oops\n");
 248                        goto free_user;
 249                }
 250        }
 251
 252free_user:
 253        vm_munmap(user_alloc, PAGE_SIZE);
 254free_alloc:
 255        if (buf)
 256                kmem_cache_free(whitelist_cache, buf);
 257}
 258
 259/* Callable tests. */
 260void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
 261{
 262        do_usercopy_heap_size(true);
 263}
 264
 265void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
 266{
 267        do_usercopy_heap_size(false);
 268}
 269
 270void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
 271{
 272        do_usercopy_heap_whitelist(true);
 273}
 274
 275void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
 276{
 277        do_usercopy_heap_whitelist(false);
 278}
 279
 280void lkdtm_USERCOPY_STACK_FRAME_TO(void)
 281{
 282        do_usercopy_stack(true, true);
 283}
 284
 285void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
 286{
 287        do_usercopy_stack(false, true);
 288}
 289
 290void lkdtm_USERCOPY_STACK_BEYOND(void)
 291{
 292        do_usercopy_stack(true, false);
 293}
 294
 295void lkdtm_USERCOPY_KERNEL(void)
 296{
 297        unsigned long user_addr;
 298
 299        user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
 300                            PROT_READ | PROT_WRITE | PROT_EXEC,
 301                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
 302        if (user_addr >= TASK_SIZE) {
 303                pr_warn("Failed to allocate user memory\n");
 304                return;
 305        }
 306
 307        pr_info("attempting good copy_to_user from kernel rodata\n");
 308        if (copy_to_user((void __user *)user_addr, test_text,
 309                         unconst + sizeof(test_text))) {
 310                pr_warn("copy_to_user failed unexpectedly?!\n");
 311                goto free_user;
 312        }
 313
 314        pr_info("attempting bad copy_to_user from kernel text\n");
 315        if (copy_to_user((void __user *)user_addr, vm_mmap,
 316                         unconst + PAGE_SIZE)) {
 317                pr_warn("copy_to_user failed, but lacked Oops\n");
 318                goto free_user;
 319        }
 320
 321free_user:
 322        vm_munmap(user_addr, PAGE_SIZE);
 323}
 324
 325void __init lkdtm_usercopy_init(void)
 326{
 327        /* Prepare cache that lacks SLAB_USERCOPY flag. */
 328        whitelist_cache =
 329                kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
 330                                           0, 0,
 331                                           cache_size / 4,
 332                                           cache_size / 16,
 333                                           NULL);
 334}
 335
 336void __exit lkdtm_usercopy_exit(void)
 337{
 338        kmem_cache_destroy(whitelist_cache);
 339}
 340