linux/fs/btrfs/tests/extent-io-tests.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2013 Fusion IO.  All rights reserved.
   4 */
   5
   6#include <linux/pagemap.h>
   7#include <linux/sched.h>
   8#include <linux/slab.h>
   9#include <linux/sizes.h>
  10#include "btrfs-tests.h"
  11#include "../ctree.h"
  12#include "../extent_io.h"
  13#include "../btrfs_inode.h"
  14
  15#define PROCESS_UNLOCK          (1 << 0)
  16#define PROCESS_RELEASE         (1 << 1)
  17#define PROCESS_TEST_LOCKED     (1 << 2)
  18
  19static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
  20                                       unsigned long flags)
  21{
  22        int ret;
  23        struct page *pages[16];
  24        unsigned long index = start >> PAGE_SHIFT;
  25        unsigned long end_index = end >> PAGE_SHIFT;
  26        unsigned long nr_pages = end_index - index + 1;
  27        int i;
  28        int count = 0;
  29        int loops = 0;
  30
  31        while (nr_pages > 0) {
  32                ret = find_get_pages_contig(inode->i_mapping, index,
  33                                     min_t(unsigned long, nr_pages,
  34                                     ARRAY_SIZE(pages)), pages);
  35                for (i = 0; i < ret; i++) {
  36                        if (flags & PROCESS_TEST_LOCKED &&
  37                            !PageLocked(pages[i]))
  38                                count++;
  39                        if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
  40                                unlock_page(pages[i]);
  41                        put_page(pages[i]);
  42                        if (flags & PROCESS_RELEASE)
  43                                put_page(pages[i]);
  44                }
  45                nr_pages -= ret;
  46                index += ret;
  47                cond_resched();
  48                loops++;
  49                if (loops > 100000) {
  50                        printk(KERN_ERR
  51                "stuck in a loop, start %llu, end %llu, nr_pages %lu, ret %d\n",
  52                                start, end, nr_pages, ret);
  53                        break;
  54                }
  55        }
  56        return count;
  57}
  58
  59static int test_find_delalloc(u32 sectorsize)
  60{
  61        struct inode *inode;
  62        struct extent_io_tree *tmp;
  63        struct page *page;
  64        struct page *locked_page = NULL;
  65        unsigned long index = 0;
  66        /* In this test we need at least 2 file extents at its maximum size */
  67        u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
  68        u64 total_dirty = 2 * max_bytes;
  69        u64 start, end, test_start;
  70        bool found;
  71        int ret = -EINVAL;
  72
  73        test_msg("running find delalloc tests");
  74
  75        inode = btrfs_new_test_inode();
  76        if (!inode) {
  77                test_std_err(TEST_ALLOC_INODE);
  78                return -ENOMEM;
  79        }
  80        tmp = &BTRFS_I(inode)->io_tree;
  81
  82        /*
  83         * Passing NULL as we don't have fs_info but tracepoints are not used
  84         * at this point
  85         */
  86        extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST, NULL);
  87
  88        /*
  89         * First go through and create and mark all of our pages dirty, we pin
  90         * everything to make sure our pages don't get evicted and screw up our
  91         * test.
  92         */
  93        for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
  94                page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
  95                if (!page) {
  96                        test_err("failed to allocate test page");
  97                        ret = -ENOMEM;
  98                        goto out;
  99                }
 100                SetPageDirty(page);
 101                if (index) {
 102                        unlock_page(page);
 103                } else {
 104                        get_page(page);
 105                        locked_page = page;
 106                }
 107        }
 108
 109        /* Test this scenario
 110         * |--- delalloc ---|
 111         * |---  search  ---|
 112         */
 113        set_extent_delalloc(tmp, 0, sectorsize - 1, 0, NULL);
 114        start = 0;
 115        end = 0;
 116        found = find_lock_delalloc_range(inode, locked_page, &start,
 117                                         &end);
 118        if (!found) {
 119                test_err("should have found at least one delalloc");
 120                goto out_bits;
 121        }
 122        if (start != 0 || end != (sectorsize - 1)) {
 123                test_err("expected start 0 end %u, got start %llu end %llu",
 124                        sectorsize - 1, start, end);
 125                goto out_bits;
 126        }
 127        unlock_extent(tmp, start, end);
 128        unlock_page(locked_page);
 129        put_page(locked_page);
 130
 131        /*
 132         * Test this scenario
 133         *
 134         * |--- delalloc ---|
 135         *           |--- search ---|
 136         */
 137        test_start = SZ_64M;
 138        locked_page = find_lock_page(inode->i_mapping,
 139                                     test_start >> PAGE_SHIFT);
 140        if (!locked_page) {
 141                test_err("couldn't find the locked page");
 142                goto out_bits;
 143        }
 144        set_extent_delalloc(tmp, sectorsize, max_bytes - 1, 0, NULL);
 145        start = test_start;
 146        end = 0;
 147        found = find_lock_delalloc_range(inode, locked_page, &start,
 148                                         &end);
 149        if (!found) {
 150                test_err("couldn't find delalloc in our range");
 151                goto out_bits;
 152        }
 153        if (start != test_start || end != max_bytes - 1) {
 154                test_err("expected start %llu end %llu, got start %llu, end %llu",
 155                                test_start, max_bytes - 1, start, end);
 156                goto out_bits;
 157        }
 158        if (process_page_range(inode, start, end,
 159                               PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
 160                test_err("there were unlocked pages in the range");
 161                goto out_bits;
 162        }
 163        unlock_extent(tmp, start, end);
 164        /* locked_page was unlocked above */
 165        put_page(locked_page);
 166
 167        /*
 168         * Test this scenario
 169         * |--- delalloc ---|
 170         *                    |--- search ---|
 171         */
 172        test_start = max_bytes + sectorsize;
 173        locked_page = find_lock_page(inode->i_mapping, test_start >>
 174                                     PAGE_SHIFT);
 175        if (!locked_page) {
 176                test_err("couldn't find the locked page");
 177                goto out_bits;
 178        }
 179        start = test_start;
 180        end = 0;
 181        found = find_lock_delalloc_range(inode, locked_page, &start,
 182                                         &end);
 183        if (found) {
 184                test_err("found range when we shouldn't have");
 185                goto out_bits;
 186        }
 187        if (end != (u64)-1) {
 188                test_err("did not return the proper end offset");
 189                goto out_bits;
 190        }
 191
 192        /*
 193         * Test this scenario
 194         * [------- delalloc -------|
 195         * [max_bytes]|-- search--|
 196         *
 197         * We are re-using our test_start from above since it works out well.
 198         */
 199        set_extent_delalloc(tmp, max_bytes, total_dirty - 1, 0, NULL);
 200        start = test_start;
 201        end = 0;
 202        found = find_lock_delalloc_range(inode, locked_page, &start,
 203                                         &end);
 204        if (!found) {
 205                test_err("didn't find our range");
 206                goto out_bits;
 207        }
 208        if (start != test_start || end != total_dirty - 1) {
 209                test_err("expected start %llu end %llu, got start %llu end %llu",
 210                         test_start, total_dirty - 1, start, end);
 211                goto out_bits;
 212        }
 213        if (process_page_range(inode, start, end,
 214                               PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
 215                test_err("pages in range were not all locked");
 216                goto out_bits;
 217        }
 218        unlock_extent(tmp, start, end);
 219
 220        /*
 221         * Now to test where we run into a page that is no longer dirty in the
 222         * range we want to find.
 223         */
 224        page = find_get_page(inode->i_mapping,
 225                             (max_bytes + SZ_1M) >> PAGE_SHIFT);
 226        if (!page) {
 227                test_err("couldn't find our page");
 228                goto out_bits;
 229        }
 230        ClearPageDirty(page);
 231        put_page(page);
 232
 233        /* We unlocked it in the previous test */
 234        lock_page(locked_page);
 235        start = test_start;
 236        end = 0;
 237        /*
 238         * Currently if we fail to find dirty pages in the delalloc range we
 239         * will adjust max_bytes down to PAGE_SIZE and then re-search.  If
 240         * this changes at any point in the future we will need to fix this
 241         * tests expected behavior.
 242         */
 243        found = find_lock_delalloc_range(inode, locked_page, &start,
 244                                         &end);
 245        if (!found) {
 246                test_err("didn't find our range");
 247                goto out_bits;
 248        }
 249        if (start != test_start && end != test_start + PAGE_SIZE - 1) {
 250                test_err("expected start %llu end %llu, got start %llu end %llu",
 251                         test_start, test_start + PAGE_SIZE - 1, start, end);
 252                goto out_bits;
 253        }
 254        if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
 255                               PROCESS_UNLOCK)) {
 256                test_err("pages in range were not all locked");
 257                goto out_bits;
 258        }
 259        ret = 0;
 260out_bits:
 261        clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1);
 262out:
 263        if (locked_page)
 264                put_page(locked_page);
 265        process_page_range(inode, 0, total_dirty - 1,
 266                           PROCESS_UNLOCK | PROCESS_RELEASE);
 267        iput(inode);
 268        return ret;
 269}
 270
 271static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb,
 272                           unsigned long len)
 273{
 274        unsigned long i;
 275
 276        for (i = 0; i < len * BITS_PER_BYTE; i++) {
 277                int bit, bit1;
 278
 279                bit = !!test_bit(i, bitmap);
 280                bit1 = !!extent_buffer_test_bit(eb, 0, i);
 281                if (bit1 != bit) {
 282                        test_err("bits do not match");
 283                        return -EINVAL;
 284                }
 285
 286                bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
 287                                                i % BITS_PER_BYTE);
 288                if (bit1 != bit) {
 289                        test_err("offset bits do not match");
 290                        return -EINVAL;
 291                }
 292        }
 293        return 0;
 294}
 295
 296static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
 297                             unsigned long len)
 298{
 299        unsigned long i, j;
 300        u32 x;
 301        int ret;
 302
 303        memset(bitmap, 0, len);
 304        memzero_extent_buffer(eb, 0, len);
 305        if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
 306                test_err("bitmap was not zeroed");
 307                return -EINVAL;
 308        }
 309
 310        bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
 311        extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
 312        ret = check_eb_bitmap(bitmap, eb, len);
 313        if (ret) {
 314                test_err("setting all bits failed");
 315                return ret;
 316        }
 317
 318        bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
 319        extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
 320        ret = check_eb_bitmap(bitmap, eb, len);
 321        if (ret) {
 322                test_err("clearing all bits failed");
 323                return ret;
 324        }
 325
 326        /* Straddling pages test */
 327        if (len > PAGE_SIZE) {
 328                bitmap_set(bitmap,
 329                        (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
 330                        sizeof(long) * BITS_PER_BYTE);
 331                extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
 332                                        sizeof(long) * BITS_PER_BYTE);
 333                ret = check_eb_bitmap(bitmap, eb, len);
 334                if (ret) {
 335                        test_err("setting straddling pages failed");
 336                        return ret;
 337                }
 338
 339                bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
 340                bitmap_clear(bitmap,
 341                        (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
 342                        sizeof(long) * BITS_PER_BYTE);
 343                extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
 344                extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
 345                                        sizeof(long) * BITS_PER_BYTE);
 346                ret = check_eb_bitmap(bitmap, eb, len);
 347                if (ret) {
 348                        test_err("clearing straddling pages failed");
 349                        return ret;
 350                }
 351        }
 352
 353        /*
 354         * Generate a wonky pseudo-random bit pattern for the sake of not using
 355         * something repetitive that could miss some hypothetical off-by-n bug.
 356         */
 357        x = 0;
 358        bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
 359        extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
 360        for (i = 0; i < len * BITS_PER_BYTE / 32; i++) {
 361                x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
 362                for (j = 0; j < 32; j++) {
 363                        if (x & (1U << j)) {
 364                                bitmap_set(bitmap, i * 32 + j, 1);
 365                                extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
 366                        }
 367                }
 368        }
 369
 370        ret = check_eb_bitmap(bitmap, eb, len);
 371        if (ret) {
 372                test_err("random bit pattern failed");
 373                return ret;
 374        }
 375
 376        return 0;
 377}
 378
 379static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
 380{
 381        struct btrfs_fs_info *fs_info;
 382        unsigned long len;
 383        unsigned long *bitmap = NULL;
 384        struct extent_buffer *eb = NULL;
 385        int ret;
 386
 387        test_msg("running extent buffer bitmap tests");
 388
 389        /*
 390         * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
 391         * BTRFS_MAX_METADATA_BLOCKSIZE.
 392         */
 393        len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
 394                ? sectorsize * 4 : sectorsize;
 395
 396        fs_info = btrfs_alloc_dummy_fs_info(len, len);
 397        if (!fs_info) {
 398                test_std_err(TEST_ALLOC_FS_INFO);
 399                return -ENOMEM;
 400        }
 401
 402        bitmap = kmalloc(len, GFP_KERNEL);
 403        if (!bitmap) {
 404                test_err("couldn't allocate test bitmap");
 405                ret = -ENOMEM;
 406                goto out;
 407        }
 408
 409        eb = __alloc_dummy_extent_buffer(fs_info, 0, len);
 410        if (!eb) {
 411                test_std_err(TEST_ALLOC_ROOT);
 412                ret = -ENOMEM;
 413                goto out;
 414        }
 415
 416        ret = __test_eb_bitmaps(bitmap, eb, len);
 417        if (ret)
 418                goto out;
 419
 420        /* Do it over again with an extent buffer which isn't page-aligned. */
 421        free_extent_buffer(eb);
 422        eb = __alloc_dummy_extent_buffer(fs_info, nodesize / 2, len);
 423        if (!eb) {
 424                test_std_err(TEST_ALLOC_ROOT);
 425                ret = -ENOMEM;
 426                goto out;
 427        }
 428
 429        ret = __test_eb_bitmaps(bitmap, eb, len);
 430out:
 431        free_extent_buffer(eb);
 432        kfree(bitmap);
 433        btrfs_free_dummy_fs_info(fs_info);
 434        return ret;
 435}
 436
 437static int test_find_first_clear_extent_bit(void)
 438{
 439        struct extent_io_tree tree;
 440        u64 start, end;
 441
 442        test_msg("running find_first_clear_extent_bit test");
 443        extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST, NULL);
 444
 445        /*
 446         * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
 447         * 4M-32M
 448         */
 449        set_extent_bits(&tree, SZ_1M, SZ_4M - 1,
 450                        CHUNK_TRIMMED | CHUNK_ALLOCATED);
 451
 452        find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
 453                                    CHUNK_TRIMMED | CHUNK_ALLOCATED);
 454
 455        if (start != 0 || end != SZ_1M -1)
 456                test_err("error finding beginning range: start %llu end %llu",
 457                         start, end);
 458
 459        /* Now add 32M-64M so that we have a hole between 4M-32M */
 460        set_extent_bits(&tree, SZ_32M, SZ_64M - 1,
 461                        CHUNK_TRIMMED | CHUNK_ALLOCATED);
 462
 463        /*
 464         * Request first hole starting at 12M, we should get 4M-32M
 465         */
 466        find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
 467                                    CHUNK_TRIMMED | CHUNK_ALLOCATED);
 468
 469        if (start != SZ_4M || end != SZ_32M - 1)
 470                test_err("error finding trimmed range: start %llu end %llu",
 471                         start, end);
 472
 473        /*
 474         * Search in the middle of allocated range, should get the next one
 475         * available, which happens to be unallocated -> 4M-32M
 476         */
 477        find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
 478                                    CHUNK_TRIMMED | CHUNK_ALLOCATED);
 479
 480        if (start != SZ_4M || end != SZ_32M -1)
 481                test_err("error finding next unalloc range: start %llu end %llu",
 482                         start, end);
 483
 484        /*
 485         * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
 486         * being unset in this range, we should get the entry in range 64M-72M
 487         */
 488        set_extent_bits(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED);
 489        find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
 490                                    CHUNK_TRIMMED);
 491
 492        if (start != SZ_64M || end != SZ_64M + SZ_8M - 1)
 493                test_err("error finding exact range: start %llu end %llu",
 494                         start, end);
 495
 496        find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
 497                                    CHUNK_TRIMMED);
 498
 499        /*
 500         * Search in the middle of set range whose immediate neighbour doesn't
 501         * have the bits set so it must be returned
 502         */
 503        if (start != SZ_64M || end != SZ_64M + SZ_8M - 1)
 504                test_err("error finding next alloc range: start %llu end %llu",
 505                         start, end);
 506
 507        /*
 508         * Search beyond any known range, shall return after last known range
 509         * and end should be -1
 510         */
 511        find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
 512        if (start != SZ_64M + SZ_8M || end != -1)
 513                test_err(
 514                "error handling beyond end of range search: start %llu end %llu",
 515                        start, end);
 516
 517        return 0;
 518}
 519
 520int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
 521{
 522        int ret;
 523
 524        test_msg("running extent I/O tests");
 525
 526        ret = test_find_delalloc(sectorsize);
 527        if (ret)
 528                goto out;
 529
 530        ret = test_find_first_clear_extent_bit();
 531        if (ret)
 532                goto out;
 533
 534        ret = test_eb_bitmaps(sectorsize, nodesize);
 535out:
 536        return ret;
 537}
 538