linux/mm/kfence/kfence_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Test cases for KFENCE memory safety error detector. Since the interface with
   4 * which KFENCE's reports are obtained is via the console, this is the output we
   5 * should verify. For each test case checks the presence (or absence) of
   6 * generated reports. Relies on 'console' tracepoint to capture reports as they
   7 * appear in the kernel log.
   8 *
   9 * Copyright (C) 2020, Google LLC.
  10 * Author: Alexander Potapenko <glider@google.com>
  11 *         Marco Elver <elver@google.com>
  12 */
  13
  14#include <kunit/test.h>
  15#include <linux/jiffies.h>
  16#include <linux/kernel.h>
  17#include <linux/kfence.h>
  18#include <linux/mm.h>
  19#include <linux/random.h>
  20#include <linux/slab.h>
  21#include <linux/spinlock.h>
  22#include <linux/string.h>
  23#include <linux/tracepoint.h>
  24#include <trace/events/printk.h>
  25
  26#include <asm/kfence.h>
  27
  28#include "kfence.h"
  29
  30/* May be overridden by <asm/kfence.h>. */
  31#ifndef arch_kfence_test_address
  32#define arch_kfence_test_address(addr) (addr)
  33#endif
  34
  35#define KFENCE_TEST_REQUIRES(test, cond) do {                   \
  36        if (!(cond))                                            \
  37                kunit_skip((test), "Test requires: " #cond);    \
  38} while (0)
  39
  40/* Report as observed from console. */
  41static struct {
  42        spinlock_t lock;
  43        int nlines;
  44        char lines[2][256];
  45} observed = {
  46        .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
  47};
  48
  49/* Probe for console output: obtains observed lines of interest. */
  50static void probe_console(void *ignore, const char *buf, size_t len)
  51{
  52        unsigned long flags;
  53        int nlines;
  54
  55        spin_lock_irqsave(&observed.lock, flags);
  56        nlines = observed.nlines;
  57
  58        if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
  59                /*
  60                 * KFENCE report and related to the test.
  61                 *
  62                 * The provided @buf is not NUL-terminated; copy no more than
  63                 * @len bytes and let strscpy() add the missing NUL-terminator.
  64                 */
  65                strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
  66                nlines = 1;
  67        } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
  68                strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
  69        }
  70
  71        WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
  72        spin_unlock_irqrestore(&observed.lock, flags);
  73}
  74
  75/* Check if a report related to the test exists. */
  76static bool report_available(void)
  77{
  78        return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
  79}
  80
  81/* Information we expect in a report. */
  82struct expect_report {
  83        enum kfence_error_type type; /* The type or error. */
  84        void *fn; /* Function pointer to expected function where access occurred. */
  85        char *addr; /* Address at which the bad access occurred. */
  86        bool is_write; /* Is access a write. */
  87};
  88
  89static const char *get_access_type(const struct expect_report *r)
  90{
  91        return r->is_write ? "write" : "read";
  92}
  93
  94/* Check observed report matches information in @r. */
  95static bool report_matches(const struct expect_report *r)
  96{
  97        unsigned long addr = (unsigned long)r->addr;
  98        bool ret = false;
  99        unsigned long flags;
 100        typeof(observed.lines) expect;
 101        const char *end;
 102        char *cur;
 103
 104        /* Doubled-checked locking. */
 105        if (!report_available())
 106                return false;
 107
 108        /* Generate expected report contents. */
 109
 110        /* Title */
 111        cur = expect[0];
 112        end = &expect[0][sizeof(expect[0]) - 1];
 113        switch (r->type) {
 114        case KFENCE_ERROR_OOB:
 115                cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
 116                                 get_access_type(r));
 117                break;
 118        case KFENCE_ERROR_UAF:
 119                cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
 120                                 get_access_type(r));
 121                break;
 122        case KFENCE_ERROR_CORRUPTION:
 123                cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
 124                break;
 125        case KFENCE_ERROR_INVALID:
 126                cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
 127                                 get_access_type(r));
 128                break;
 129        case KFENCE_ERROR_INVALID_FREE:
 130                cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
 131                break;
 132        }
 133
 134        scnprintf(cur, end - cur, " in %pS", r->fn);
 135        /* The exact offset won't match, remove it; also strip module name. */
 136        cur = strchr(expect[0], '+');
 137        if (cur)
 138                *cur = '\0';
 139
 140        /* Access information */
 141        cur = expect[1];
 142        end = &expect[1][sizeof(expect[1]) - 1];
 143
 144        switch (r->type) {
 145        case KFENCE_ERROR_OOB:
 146                cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
 147                addr = arch_kfence_test_address(addr);
 148                break;
 149        case KFENCE_ERROR_UAF:
 150                cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
 151                addr = arch_kfence_test_address(addr);
 152                break;
 153        case KFENCE_ERROR_CORRUPTION:
 154                cur += scnprintf(cur, end - cur, "Corrupted memory at");
 155                break;
 156        case KFENCE_ERROR_INVALID:
 157                cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
 158                addr = arch_kfence_test_address(addr);
 159                break;
 160        case KFENCE_ERROR_INVALID_FREE:
 161                cur += scnprintf(cur, end - cur, "Invalid free of");
 162                break;
 163        }
 164
 165        cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
 166
 167        spin_lock_irqsave(&observed.lock, flags);
 168        if (!report_available())
 169                goto out; /* A new report is being captured. */
 170
 171        /* Finally match expected output to what we actually observed. */
 172        ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
 173out:
 174        spin_unlock_irqrestore(&observed.lock, flags);
 175        return ret;
 176}
 177
 178/* ===== Test cases ===== */
 179
 180#define TEST_PRIV_WANT_MEMCACHE ((void *)1)
 181
 182/* Cache used by tests; if NULL, allocate from kmalloc instead. */
 183static struct kmem_cache *test_cache;
 184
 185static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
 186                               void (*ctor)(void *))
 187{
 188        if (test->priv != TEST_PRIV_WANT_MEMCACHE)
 189                return size;
 190
 191        kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
 192
 193        /*
 194         * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
 195         * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
 196         * allocate via memcg, if enabled.
 197         */
 198        flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT;
 199        test_cache = kmem_cache_create("test", size, 1, flags, ctor);
 200        KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
 201
 202        return size;
 203}
 204
 205static void test_cache_destroy(void)
 206{
 207        if (!test_cache)
 208                return;
 209
 210        kmem_cache_destroy(test_cache);
 211        test_cache = NULL;
 212}
 213
 214static inline size_t kmalloc_cache_alignment(size_t size)
 215{
 216        return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align;
 217}
 218
 219/* Must always inline to match stack trace against caller. */
 220static __always_inline void test_free(void *ptr)
 221{
 222        if (test_cache)
 223                kmem_cache_free(test_cache, ptr);
 224        else
 225                kfree(ptr);
 226}
 227
 228/*
 229 * If this should be a KFENCE allocation, and on which side the allocation and
 230 * the closest guard page should be.
 231 */
 232enum allocation_policy {
 233        ALLOCATE_ANY, /* KFENCE, any side. */
 234        ALLOCATE_LEFT, /* KFENCE, left side of page. */
 235        ALLOCATE_RIGHT, /* KFENCE, right side of page. */
 236        ALLOCATE_NONE, /* No KFENCE allocation. */
 237};
 238
 239/*
 240 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
 241 * current test_cache if set up.
 242 */
 243static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
 244{
 245        void *alloc;
 246        unsigned long timeout, resched_after;
 247        const char *policy_name;
 248
 249        switch (policy) {
 250        case ALLOCATE_ANY:
 251                policy_name = "any";
 252                break;
 253        case ALLOCATE_LEFT:
 254                policy_name = "left";
 255                break;
 256        case ALLOCATE_RIGHT:
 257                policy_name = "right";
 258                break;
 259        case ALLOCATE_NONE:
 260                policy_name = "none";
 261                break;
 262        }
 263
 264        kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
 265                   policy_name, !!test_cache);
 266
 267        /*
 268         * 100x the sample interval should be more than enough to ensure we get
 269         * a KFENCE allocation eventually.
 270         */
 271        timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
 272        /*
 273         * Especially for non-preemption kernels, ensure the allocation-gate
 274         * timer can catch up: after @resched_after, every failed allocation
 275         * attempt yields, to ensure the allocation-gate timer is scheduled.
 276         */
 277        resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
 278        do {
 279                if (test_cache)
 280                        alloc = kmem_cache_alloc(test_cache, gfp);
 281                else
 282                        alloc = kmalloc(size, gfp);
 283
 284                if (is_kfence_address(alloc)) {
 285                        struct slab *slab = virt_to_slab(alloc);
 286                        struct kmem_cache *s = test_cache ?:
 287                                        kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
 288
 289                        /*
 290                         * Verify that various helpers return the right values
 291                         * even for KFENCE objects; these are required so that
 292                         * memcg accounting works correctly.
 293                         */
 294                        KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
 295                        KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
 296
 297                        if (policy == ALLOCATE_ANY)
 298                                return alloc;
 299                        if (policy == ALLOCATE_LEFT && IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
 300                                return alloc;
 301                        if (policy == ALLOCATE_RIGHT &&
 302                            !IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
 303                                return alloc;
 304                } else if (policy == ALLOCATE_NONE)
 305                        return alloc;
 306
 307                test_free(alloc);
 308
 309                if (time_after(jiffies, resched_after))
 310                        cond_resched();
 311        } while (time_before(jiffies, timeout));
 312
 313        KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
 314        return NULL; /* Unreachable. */
 315}
 316
 317static void test_out_of_bounds_read(struct kunit *test)
 318{
 319        size_t size = 32;
 320        struct expect_report expect = {
 321                .type = KFENCE_ERROR_OOB,
 322                .fn = test_out_of_bounds_read,
 323                .is_write = false,
 324        };
 325        char *buf;
 326
 327        setup_test_cache(test, size, 0, NULL);
 328
 329        /*
 330         * If we don't have our own cache, adjust based on alignment, so that we
 331         * actually access guard pages on either side.
 332         */
 333        if (!test_cache)
 334                size = kmalloc_cache_alignment(size);
 335
 336        /* Test both sides. */
 337
 338        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
 339        expect.addr = buf - 1;
 340        READ_ONCE(*expect.addr);
 341        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 342        test_free(buf);
 343
 344        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
 345        expect.addr = buf + size;
 346        READ_ONCE(*expect.addr);
 347        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 348        test_free(buf);
 349}
 350
 351static void test_out_of_bounds_write(struct kunit *test)
 352{
 353        size_t size = 32;
 354        struct expect_report expect = {
 355                .type = KFENCE_ERROR_OOB,
 356                .fn = test_out_of_bounds_write,
 357                .is_write = true,
 358        };
 359        char *buf;
 360
 361        setup_test_cache(test, size, 0, NULL);
 362        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
 363        expect.addr = buf - 1;
 364        WRITE_ONCE(*expect.addr, 42);
 365        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 366        test_free(buf);
 367}
 368
 369static void test_use_after_free_read(struct kunit *test)
 370{
 371        const size_t size = 32;
 372        struct expect_report expect = {
 373                .type = KFENCE_ERROR_UAF,
 374                .fn = test_use_after_free_read,
 375                .is_write = false,
 376        };
 377
 378        setup_test_cache(test, size, 0, NULL);
 379        expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
 380        test_free(expect.addr);
 381        READ_ONCE(*expect.addr);
 382        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 383}
 384
 385static void test_double_free(struct kunit *test)
 386{
 387        const size_t size = 32;
 388        struct expect_report expect = {
 389                .type = KFENCE_ERROR_INVALID_FREE,
 390                .fn = test_double_free,
 391        };
 392
 393        setup_test_cache(test, size, 0, NULL);
 394        expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
 395        test_free(expect.addr);
 396        test_free(expect.addr); /* Double-free. */
 397        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 398}
 399
 400static void test_invalid_addr_free(struct kunit *test)
 401{
 402        const size_t size = 32;
 403        struct expect_report expect = {
 404                .type = KFENCE_ERROR_INVALID_FREE,
 405                .fn = test_invalid_addr_free,
 406        };
 407        char *buf;
 408
 409        setup_test_cache(test, size, 0, NULL);
 410        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
 411        expect.addr = buf + 1; /* Free on invalid address. */
 412        test_free(expect.addr); /* Invalid address free. */
 413        test_free(buf); /* No error. */
 414        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 415}
 416
 417static void test_corruption(struct kunit *test)
 418{
 419        size_t size = 32;
 420        struct expect_report expect = {
 421                .type = KFENCE_ERROR_CORRUPTION,
 422                .fn = test_corruption,
 423        };
 424        char *buf;
 425
 426        setup_test_cache(test, size, 0, NULL);
 427
 428        /* Test both sides. */
 429
 430        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
 431        expect.addr = buf + size;
 432        WRITE_ONCE(*expect.addr, 42);
 433        test_free(buf);
 434        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 435
 436        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
 437        expect.addr = buf - 1;
 438        WRITE_ONCE(*expect.addr, 42);
 439        test_free(buf);
 440        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 441}
 442
 443/*
 444 * KFENCE is unable to detect an OOB if the allocation's alignment requirements
 445 * leave a gap between the object and the guard page. Specifically, an
 446 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
 447 * respectively. Therefore it is impossible for the allocated object to
 448 * contiguously line up with the right guard page.
 449 *
 450 * However, we test that an access to memory beyond the gap results in KFENCE
 451 * detecting an OOB access.
 452 */
 453static void test_kmalloc_aligned_oob_read(struct kunit *test)
 454{
 455        const size_t size = 73;
 456        const size_t align = kmalloc_cache_alignment(size);
 457        struct expect_report expect = {
 458                .type = KFENCE_ERROR_OOB,
 459                .fn = test_kmalloc_aligned_oob_read,
 460                .is_write = false,
 461        };
 462        char *buf;
 463
 464        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
 465
 466        /*
 467         * The object is offset to the right, so there won't be an OOB to the
 468         * left of it.
 469         */
 470        READ_ONCE(*(buf - 1));
 471        KUNIT_EXPECT_FALSE(test, report_available());
 472
 473        /*
 474         * @buf must be aligned on @align, therefore buf + size belongs to the
 475         * same page -> no OOB.
 476         */
 477        READ_ONCE(*(buf + size));
 478        KUNIT_EXPECT_FALSE(test, report_available());
 479
 480        /* Overflowing by @align bytes will result in an OOB. */
 481        expect.addr = buf + size + align;
 482        READ_ONCE(*expect.addr);
 483        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 484
 485        test_free(buf);
 486}
 487
 488static void test_kmalloc_aligned_oob_write(struct kunit *test)
 489{
 490        const size_t size = 73;
 491        struct expect_report expect = {
 492                .type = KFENCE_ERROR_CORRUPTION,
 493                .fn = test_kmalloc_aligned_oob_write,
 494        };
 495        char *buf;
 496
 497        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
 498        /*
 499         * The object is offset to the right, so we won't get a page
 500         * fault immediately after it.
 501         */
 502        expect.addr = buf + size;
 503        WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
 504        KUNIT_EXPECT_FALSE(test, report_available());
 505        test_free(buf);
 506        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 507}
 508
 509/* Test cache shrinking and destroying with KFENCE. */
 510static void test_shrink_memcache(struct kunit *test)
 511{
 512        const size_t size = 32;
 513        void *buf;
 514
 515        setup_test_cache(test, size, 0, NULL);
 516        KUNIT_EXPECT_TRUE(test, test_cache);
 517        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
 518        kmem_cache_shrink(test_cache);
 519        test_free(buf);
 520
 521        KUNIT_EXPECT_FALSE(test, report_available());
 522}
 523
 524static void ctor_set_x(void *obj)
 525{
 526        /* Every object has at least 8 bytes. */
 527        memset(obj, 'x', 8);
 528}
 529
 530/* Ensure that SL*B does not modify KFENCE objects on bulk free. */
 531static void test_free_bulk(struct kunit *test)
 532{
 533        int iter;
 534
 535        for (iter = 0; iter < 5; iter++) {
 536                const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0,
 537                                                     (iter & 1) ? ctor_set_x : NULL);
 538                void *objects[] = {
 539                        test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
 540                        test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
 541                        test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
 542                        test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
 543                        test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
 544                };
 545
 546                kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
 547                KUNIT_ASSERT_FALSE(test, report_available());
 548                test_cache_destroy();
 549        }
 550}
 551
 552/* Test init-on-free works. */
 553static void test_init_on_free(struct kunit *test)
 554{
 555        const size_t size = 32;
 556        struct expect_report expect = {
 557                .type = KFENCE_ERROR_UAF,
 558                .fn = test_init_on_free,
 559                .is_write = false,
 560        };
 561        int i;
 562
 563        KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON));
 564        /* Assume it hasn't been disabled on command line. */
 565
 566        setup_test_cache(test, size, 0, NULL);
 567        expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
 568        for (i = 0; i < size; i++)
 569                expect.addr[i] = i + 1;
 570        test_free(expect.addr);
 571
 572        for (i = 0; i < size; i++) {
 573                /*
 574                 * This may fail if the page was recycled by KFENCE and then
 575                 * written to again -- this however, is near impossible with a
 576                 * default config.
 577                 */
 578                KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
 579
 580                if (!i) /* Only check first access to not fail test if page is ever re-protected. */
 581                        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 582        }
 583}
 584
 585/* Ensure that constructors work properly. */
 586static void test_memcache_ctor(struct kunit *test)
 587{
 588        const size_t size = 32;
 589        char *buf;
 590        int i;
 591
 592        setup_test_cache(test, size, 0, ctor_set_x);
 593        buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
 594
 595        for (i = 0; i < 8; i++)
 596                KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
 597
 598        test_free(buf);
 599
 600        KUNIT_EXPECT_FALSE(test, report_available());
 601}
 602
 603/* Test that memory is zeroed if requested. */
 604static void test_gfpzero(struct kunit *test)
 605{
 606        const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
 607        char *buf1, *buf2;
 608        int i;
 609
 610        /* Skip if we think it'd take too long. */
 611        KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
 612
 613        setup_test_cache(test, size, 0, NULL);
 614        buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
 615        for (i = 0; i < size; i++)
 616                buf1[i] = i + 1;
 617        test_free(buf1);
 618
 619        /* Try to get same address again -- this can take a while. */
 620        for (i = 0;; i++) {
 621                buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
 622                if (buf1 == buf2)
 623                        break;
 624                test_free(buf2);
 625
 626                if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
 627                        kunit_warn(test, "giving up ... cannot get same object back\n");
 628                        return;
 629                }
 630                cond_resched();
 631        }
 632
 633        for (i = 0; i < size; i++)
 634                KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
 635
 636        test_free(buf2);
 637
 638        KUNIT_EXPECT_FALSE(test, report_available());
 639}
 640
 641static void test_invalid_access(struct kunit *test)
 642{
 643        const struct expect_report expect = {
 644                .type = KFENCE_ERROR_INVALID,
 645                .fn = test_invalid_access,
 646                .addr = &__kfence_pool[10],
 647                .is_write = false,
 648        };
 649
 650        READ_ONCE(__kfence_pool[10]);
 651        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 652}
 653
 654/* Test SLAB_TYPESAFE_BY_RCU works. */
 655static void test_memcache_typesafe_by_rcu(struct kunit *test)
 656{
 657        const size_t size = 32;
 658        struct expect_report expect = {
 659                .type = KFENCE_ERROR_UAF,
 660                .fn = test_memcache_typesafe_by_rcu,
 661                .is_write = false,
 662        };
 663
 664        setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
 665        KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
 666
 667        expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
 668        *expect.addr = 42;
 669
 670        rcu_read_lock();
 671        test_free(expect.addr);
 672        KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
 673        /*
 674         * Up to this point, memory should not have been freed yet, and
 675         * therefore there should be no KFENCE report from the above access.
 676         */
 677        rcu_read_unlock();
 678
 679        /* Above access to @expect.addr should not have generated a report! */
 680        KUNIT_EXPECT_FALSE(test, report_available());
 681
 682        /* Only after rcu_barrier() is the memory guaranteed to be freed. */
 683        rcu_barrier();
 684
 685        /* Expect use-after-free. */
 686        KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
 687        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 688}
 689
 690/* Test krealloc(). */
 691static void test_krealloc(struct kunit *test)
 692{
 693        const size_t size = 32;
 694        const struct expect_report expect = {
 695                .type = KFENCE_ERROR_UAF,
 696                .fn = test_krealloc,
 697                .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
 698                .is_write = false,
 699        };
 700        char *buf = expect.addr;
 701        int i;
 702
 703        KUNIT_EXPECT_FALSE(test, test_cache);
 704        KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
 705        for (i = 0; i < size; i++)
 706                buf[i] = i + 1;
 707
 708        /* Check that we successfully change the size. */
 709        buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
 710        /* Note: Might no longer be a KFENCE alloc. */
 711        KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
 712        for (i = 0; i < size; i++)
 713                KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
 714        for (; i < size * 3; i++) /* Fill to extra bytes. */
 715                buf[i] = i + 1;
 716
 717        buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
 718        KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
 719        for (i = 0; i < size * 2; i++)
 720                KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
 721
 722        buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
 723        KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
 724        KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
 725
 726        READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
 727        KUNIT_ASSERT_TRUE(test, report_matches(&expect));
 728}
 729
 730/* Test that some objects from a bulk allocation belong to KFENCE pool. */
 731static void test_memcache_alloc_bulk(struct kunit *test)
 732{
 733        const size_t size = 32;
 734        bool pass = false;
 735        unsigned long timeout;
 736
 737        setup_test_cache(test, size, 0, NULL);
 738        KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
 739        /*
 740         * 100x the sample interval should be more than enough to ensure we get
 741         * a KFENCE allocation eventually.
 742         */
 743        timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
 744        do {
 745                void *objects[100];
 746                int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
 747                                                   objects);
 748                if (!num)
 749                        continue;
 750                for (i = 0; i < ARRAY_SIZE(objects); i++) {
 751                        if (is_kfence_address(objects[i])) {
 752                                pass = true;
 753                                break;
 754                        }
 755                }
 756                kmem_cache_free_bulk(test_cache, num, objects);
 757                /*
 758                 * kmem_cache_alloc_bulk() disables interrupts, and calling it
 759                 * in a tight loop may not give KFENCE a chance to switch the
 760                 * static branch. Call cond_resched() to let KFENCE chime in.
 761                 */
 762                cond_resched();
 763        } while (!pass && time_before(jiffies, timeout));
 764
 765        KUNIT_EXPECT_TRUE(test, pass);
 766        KUNIT_EXPECT_FALSE(test, report_available());
 767}
 768
 769/*
 770 * KUnit does not provide a way to provide arguments to tests, and we encode
 771 * additional info in the name. Set up 2 tests per test case, one using the
 772 * default allocator, and another using a custom memcache (suffix '-memcache').
 773 */
 774#define KFENCE_KUNIT_CASE(test_name)                                            \
 775        { .run_case = test_name, .name = #test_name },                          \
 776        { .run_case = test_name, .name = #test_name "-memcache" }
 777
 778static struct kunit_case kfence_test_cases[] = {
 779        KFENCE_KUNIT_CASE(test_out_of_bounds_read),
 780        KFENCE_KUNIT_CASE(test_out_of_bounds_write),
 781        KFENCE_KUNIT_CASE(test_use_after_free_read),
 782        KFENCE_KUNIT_CASE(test_double_free),
 783        KFENCE_KUNIT_CASE(test_invalid_addr_free),
 784        KFENCE_KUNIT_CASE(test_corruption),
 785        KFENCE_KUNIT_CASE(test_free_bulk),
 786        KFENCE_KUNIT_CASE(test_init_on_free),
 787        KUNIT_CASE(test_kmalloc_aligned_oob_read),
 788        KUNIT_CASE(test_kmalloc_aligned_oob_write),
 789        KUNIT_CASE(test_shrink_memcache),
 790        KUNIT_CASE(test_memcache_ctor),
 791        KUNIT_CASE(test_invalid_access),
 792        KUNIT_CASE(test_gfpzero),
 793        KUNIT_CASE(test_memcache_typesafe_by_rcu),
 794        KUNIT_CASE(test_krealloc),
 795        KUNIT_CASE(test_memcache_alloc_bulk),
 796        {},
 797};
 798
 799/* ===== End test cases ===== */
 800
 801static int test_init(struct kunit *test)
 802{
 803        unsigned long flags;
 804        int i;
 805
 806        if (!__kfence_pool)
 807                return -EINVAL;
 808
 809        spin_lock_irqsave(&observed.lock, flags);
 810        for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
 811                observed.lines[i][0] = '\0';
 812        observed.nlines = 0;
 813        spin_unlock_irqrestore(&observed.lock, flags);
 814
 815        /* Any test with 'memcache' in its name will want a memcache. */
 816        if (strstr(test->name, "memcache"))
 817                test->priv = TEST_PRIV_WANT_MEMCACHE;
 818        else
 819                test->priv = NULL;
 820
 821        return 0;
 822}
 823
 824static void test_exit(struct kunit *test)
 825{
 826        test_cache_destroy();
 827}
 828
 829static struct kunit_suite kfence_test_suite = {
 830        .name = "kfence",
 831        .test_cases = kfence_test_cases,
 832        .init = test_init,
 833        .exit = test_exit,
 834};
 835static struct kunit_suite *kfence_test_suites[] = { &kfence_test_suite, NULL };
 836
 837static void register_tracepoints(struct tracepoint *tp, void *ignore)
 838{
 839        check_trace_callback_type_console(probe_console);
 840        if (!strcmp(tp->name, "console"))
 841                WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
 842}
 843
 844static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
 845{
 846        if (!strcmp(tp->name, "console"))
 847                tracepoint_probe_unregister(tp, probe_console, NULL);
 848}
 849
 850/*
 851 * We only want to do tracepoints setup and teardown once, therefore we have to
 852 * customize the init and exit functions and cannot rely on kunit_test_suite().
 853 */
 854static int __init kfence_test_init(void)
 855{
 856        /*
 857         * Because we want to be able to build the test as a module, we need to
 858         * iterate through all known tracepoints, since the static registration
 859         * won't work here.
 860         */
 861        for_each_kernel_tracepoint(register_tracepoints, NULL);
 862        return __kunit_test_suites_init(kfence_test_suites);
 863}
 864
 865static void kfence_test_exit(void)
 866{
 867        __kunit_test_suites_exit(kfence_test_suites);
 868        for_each_kernel_tracepoint(unregister_tracepoints, NULL);
 869        tracepoint_synchronize_unregister();
 870}
 871
 872late_initcall_sync(kfence_test_init);
 873module_exit(kfence_test_exit);
 874
 875MODULE_LICENSE("GPL v2");
 876MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");
 877