linux/drivers/misc/lkdtm/bugs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
   4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
   5 * lockups) along with other things that don't fit well into existing LKDTM
   6 * test source files.
   7 */
   8#include "lkdtm.h"
   9#include <linux/list.h>
  10#include <linux/sched.h>
  11#include <linux/sched/signal.h>
  12#include <linux/sched/task_stack.h>
  13#include <linux/uaccess.h>
  14
  15struct lkdtm_list {
  16        struct list_head node;
  17};
  18
  19/*
  20 * Make sure our attempts to over run the kernel stack doesn't trigger
  21 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
  22 * recurse past the end of THREAD_SIZE by default.
  23 */
  24#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
  25#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
  26#else
  27#define REC_STACK_SIZE (THREAD_SIZE / 8)
  28#endif
  29#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
  30
  31static int recur_count = REC_NUM_DEFAULT;
  32
  33static DEFINE_SPINLOCK(lock_me_up);
  34
  35/*
  36 * Make sure compiler does not optimize this function or stack frame away:
  37 * - function marked noinline
  38 * - stack variables are marked volatile
  39 * - stack variables are written (memset()) and read (pr_info())
  40 * - function has external effects (pr_info())
  41 * */
  42static int noinline recursive_loop(int remaining)
  43{
  44        volatile char buf[REC_STACK_SIZE];
  45
  46        memset((void *)buf, remaining & 0xFF, sizeof(buf));
  47        pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
  48                recur_count);
  49        if (!remaining)
  50                return 0;
  51        else
  52                return recursive_loop(remaining - 1);
  53}
  54
  55/* If the depth is negative, use the default, otherwise keep parameter. */
  56void __init lkdtm_bugs_init(int *recur_param)
  57{
  58        if (*recur_param < 0)
  59                *recur_param = recur_count;
  60        else
  61                recur_count = *recur_param;
  62}
  63
  64void lkdtm_PANIC(void)
  65{
  66        panic("dumptest");
  67}
  68
  69void lkdtm_BUG(void)
  70{
  71        BUG();
  72}
  73
  74static int warn_counter;
  75
  76void lkdtm_WARNING(void)
  77{
  78        WARN(1, "Warning message trigger count: %d\n", warn_counter++);
  79}
  80
  81void lkdtm_EXCEPTION(void)
  82{
  83        *((volatile int *) 0) = 0;
  84}
  85
  86void lkdtm_LOOP(void)
  87{
  88        for (;;)
  89                ;
  90}
  91
  92void lkdtm_EXHAUST_STACK(void)
  93{
  94        pr_info("Calling function with %lu frame size to depth %d ...\n",
  95                REC_STACK_SIZE, recur_count);
  96        recursive_loop(recur_count);
  97        pr_info("FAIL: survived without exhausting stack?!\n");
  98}
  99
 100static noinline void __lkdtm_CORRUPT_STACK(void *stack)
 101{
 102        memset(stack, '\xff', 64);
 103}
 104
 105/* This should trip the stack canary, not corrupt the return address. */
 106noinline void lkdtm_CORRUPT_STACK(void)
 107{
 108        /* Use default char array length that triggers stack protection. */
 109        char data[8] __aligned(sizeof(void *));
 110
 111        __lkdtm_CORRUPT_STACK(&data);
 112
 113        pr_info("Corrupted stack containing char array ...\n");
 114}
 115
 116/* Same as above but will only get a canary with -fstack-protector-strong */
 117noinline void lkdtm_CORRUPT_STACK_STRONG(void)
 118{
 119        union {
 120                unsigned short shorts[4];
 121                unsigned long *ptr;
 122        } data __aligned(sizeof(void *));
 123
 124        __lkdtm_CORRUPT_STACK(&data);
 125
 126        pr_info("Corrupted stack containing union ...\n");
 127}
 128
 129void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
 130{
 131        static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
 132        u32 *p;
 133        u32 val = 0x12345678;
 134
 135        p = (u32 *)(data + 1);
 136        if (*p == 0)
 137                val = 0x87654321;
 138        *p = val;
 139}
 140
 141void lkdtm_SOFTLOCKUP(void)
 142{
 143        preempt_disable();
 144        for (;;)
 145                cpu_relax();
 146}
 147
 148void lkdtm_HARDLOCKUP(void)
 149{
 150        local_irq_disable();
 151        for (;;)
 152                cpu_relax();
 153}
 154
 155void lkdtm_SPINLOCKUP(void)
 156{
 157        /* Must be called twice to trigger. */
 158        spin_lock(&lock_me_up);
 159        /* Let sparse know we intended to exit holding the lock. */
 160        __release(&lock_me_up);
 161}
 162
 163void lkdtm_HUNG_TASK(void)
 164{
 165        set_current_state(TASK_UNINTERRUPTIBLE);
 166        schedule();
 167}
 168
 169void lkdtm_CORRUPT_LIST_ADD(void)
 170{
 171        /*
 172         * Initially, an empty list via LIST_HEAD:
 173         *      test_head.next = &test_head
 174         *      test_head.prev = &test_head
 175         */
 176        LIST_HEAD(test_head);
 177        struct lkdtm_list good, bad;
 178        void *target[2] = { };
 179        void *redirection = &target;
 180
 181        pr_info("attempting good list addition\n");
 182
 183        /*
 184         * Adding to the list performs these actions:
 185         *      test_head.next->prev = &good.node
 186         *      good.node.next = test_head.next
 187         *      good.node.prev = test_head
 188         *      test_head.next = good.node
 189         */
 190        list_add(&good.node, &test_head);
 191
 192        pr_info("attempting corrupted list addition\n");
 193        /*
 194         * In simulating this "write what where" primitive, the "what" is
 195         * the address of &bad.node, and the "where" is the address held
 196         * by "redirection".
 197         */
 198        test_head.next = redirection;
 199        list_add(&bad.node, &test_head);
 200
 201        if (target[0] == NULL && target[1] == NULL)
 202                pr_err("Overwrite did not happen, but no BUG?!\n");
 203        else
 204                pr_err("list_add() corruption not detected!\n");
 205}
 206
 207void lkdtm_CORRUPT_LIST_DEL(void)
 208{
 209        LIST_HEAD(test_head);
 210        struct lkdtm_list item;
 211        void *target[2] = { };
 212        void *redirection = &target;
 213
 214        list_add(&item.node, &test_head);
 215
 216        pr_info("attempting good list removal\n");
 217        list_del(&item.node);
 218
 219        pr_info("attempting corrupted list removal\n");
 220        list_add(&item.node, &test_head);
 221
 222        /* As with the list_add() test above, this corrupts "next". */
 223        item.node.next = redirection;
 224        list_del(&item.node);
 225
 226        if (target[0] == NULL && target[1] == NULL)
 227                pr_err("Overwrite did not happen, but no BUG?!\n");
 228        else
 229                pr_err("list_del() corruption not detected!\n");
 230}
 231
 232/* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
 233void lkdtm_CORRUPT_USER_DS(void)
 234{
 235        pr_info("setting bad task size limit\n");
 236        set_fs(KERNEL_DS);
 237
 238        /* Make sure we do not keep running with a KERNEL_DS! */
 239        force_sig(SIGKILL);
 240}
 241
 242/* Test that VMAP_STACK is actually allocating with a leading guard page */
 243void lkdtm_STACK_GUARD_PAGE_LEADING(void)
 244{
 245        const unsigned char *stack = task_stack_page(current);
 246        const unsigned char *ptr = stack - 1;
 247        volatile unsigned char byte;
 248
 249        pr_info("attempting bad read from page below current stack\n");
 250
 251        byte = *ptr;
 252
 253        pr_err("FAIL: accessed page before stack!\n");
 254}
 255
 256/* Test that VMAP_STACK is actually allocating with a trailing guard page */
 257void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
 258{
 259        const unsigned char *stack = task_stack_page(current);
 260        const unsigned char *ptr = stack + THREAD_SIZE;
 261        volatile unsigned char byte;
 262
 263        pr_info("attempting bad read from page above current stack\n");
 264
 265        byte = *ptr;
 266
 267        pr_err("FAIL: accessed page after stack!\n");
 268}
 269
 270void lkdtm_UNSET_SMEP(void)
 271{
 272#ifdef CONFIG_X86_64
 273#define MOV_CR4_DEPTH   64
 274        void (*direct_write_cr4)(unsigned long val);
 275        unsigned char *insn;
 276        unsigned long cr4;
 277        int i;
 278
 279        cr4 = native_read_cr4();
 280
 281        if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
 282                pr_err("FAIL: SMEP not in use\n");
 283                return;
 284        }
 285        cr4 &= ~(X86_CR4_SMEP);
 286
 287        pr_info("trying to clear SMEP normally\n");
 288        native_write_cr4(cr4);
 289        if (cr4 == native_read_cr4()) {
 290                pr_err("FAIL: pinning SMEP failed!\n");
 291                cr4 |= X86_CR4_SMEP;
 292                pr_info("restoring SMEP\n");
 293                native_write_cr4(cr4);
 294                return;
 295        }
 296        pr_info("ok: SMEP did not get cleared\n");
 297
 298        /*
 299         * To test the post-write pinning verification we need to call
 300         * directly into the middle of native_write_cr4() where the
 301         * cr4 write happens, skipping any pinning. This searches for
 302         * the cr4 writing instruction.
 303         */
 304        insn = (unsigned char *)native_write_cr4;
 305        for (i = 0; i < MOV_CR4_DEPTH; i++) {
 306                /* mov %rdi, %cr4 */
 307                if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
 308                        break;
 309                /* mov %rdi,%rax; mov %rax, %cr4 */
 310                if (insn[i]   == 0x48 && insn[i+1] == 0x89 &&
 311                    insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
 312                    insn[i+4] == 0x22 && insn[i+5] == 0xe0)
 313                        break;
 314        }
 315        if (i >= MOV_CR4_DEPTH) {
 316                pr_info("ok: cannot locate cr4 writing call gadget\n");
 317                return;
 318        }
 319        direct_write_cr4 = (void *)(insn + i);
 320
 321        pr_info("trying to clear SMEP with call gadget\n");
 322        direct_write_cr4(cr4);
 323        if (native_read_cr4() & X86_CR4_SMEP) {
 324                pr_info("ok: SMEP removal was reverted\n");
 325        } else {
 326                pr_err("FAIL: cleared SMEP not detected!\n");
 327                cr4 |= X86_CR4_SMEP;
 328                pr_info("restoring SMEP\n");
 329                native_write_cr4(cr4);
 330        }
 331#else
 332        pr_err("FAIL: this test is x86_64-only\n");
 333#endif
 334}
 335