linux/drivers/gpu/drm/selftests/test-drm_mm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Test cases for the drm_mm range manager
   4 */
   5
   6#define pr_fmt(fmt) "drm_mm: " fmt
   7
   8#include <linux/module.h>
   9#include <linux/prime_numbers.h>
  10#include <linux/slab.h>
  11#include <linux/random.h>
  12#include <linux/vmalloc.h>
  13#include <linux/ktime.h>
  14
  15#include <drm/drm_mm.h>
  16
  17#include "../lib/drm_random.h"
  18
  19#define TESTS "drm_mm_selftests.h"
  20#include "drm_selftest.h"
  21
  22static unsigned int random_seed;
  23static unsigned int max_iterations = 8192;
  24static unsigned int max_prime = 128;
  25
  26enum {
  27        BEST,
  28        BOTTOMUP,
  29        TOPDOWN,
  30        EVICT,
  31};
  32
  33static const struct insert_mode {
  34        const char *name;
  35        enum drm_mm_insert_mode mode;
  36} insert_modes[] = {
  37        [BEST] = { "best", DRM_MM_INSERT_BEST },
  38        [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
  39        [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
  40        [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
  41        {}
  42}, evict_modes[] = {
  43        { "bottom-up", DRM_MM_INSERT_LOW },
  44        { "top-down", DRM_MM_INSERT_HIGH },
  45        {}
  46};
  47
  48static int igt_sanitycheck(void *ignored)
  49{
  50        pr_info("%s - ok!\n", __func__);
  51        return 0;
  52}
  53
  54static bool assert_no_holes(const struct drm_mm *mm)
  55{
  56        struct drm_mm_node *hole;
  57        u64 hole_start, hole_end;
  58        unsigned long count;
  59
  60        count = 0;
  61        drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
  62                count++;
  63        if (count) {
  64                pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
  65                return false;
  66        }
  67
  68        drm_mm_for_each_node(hole, mm) {
  69                if (drm_mm_hole_follows(hole)) {
  70                        pr_err("Hole follows node, expected none!\n");
  71                        return false;
  72                }
  73        }
  74
  75        return true;
  76}
  77
  78static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
  79{
  80        struct drm_mm_node *hole;
  81        u64 hole_start, hole_end;
  82        unsigned long count;
  83        bool ok = true;
  84
  85        if (end <= start)
  86                return true;
  87
  88        count = 0;
  89        drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
  90                if (start != hole_start || end != hole_end) {
  91                        if (ok)
  92                                pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
  93                                       hole_start, hole_end,
  94                                       start, end);
  95                        ok = false;
  96                }
  97                count++;
  98        }
  99        if (count != 1) {
 100                pr_err("Expected to find one hole, found %lu instead\n", count);
 101                ok = false;
 102        }
 103
 104        return ok;
 105}
 106
 107static bool assert_continuous(const struct drm_mm *mm, u64 size)
 108{
 109        struct drm_mm_node *node, *check, *found;
 110        unsigned long n;
 111        u64 addr;
 112
 113        if (!assert_no_holes(mm))
 114                return false;
 115
 116        n = 0;
 117        addr = 0;
 118        drm_mm_for_each_node(node, mm) {
 119                if (node->start != addr) {
 120                        pr_err("node[%ld] list out of order, expected %llx found %llx\n",
 121                               n, addr, node->start);
 122                        return false;
 123                }
 124
 125                if (node->size != size) {
 126                        pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
 127                               n, size, node->size);
 128                        return false;
 129                }
 130
 131                if (drm_mm_hole_follows(node)) {
 132                        pr_err("node[%ld] is followed by a hole!\n", n);
 133                        return false;
 134                }
 135
 136                found = NULL;
 137                drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
 138                        if (node != check) {
 139                                pr_err("lookup return wrong node, expected start %llx, found %llx\n",
 140                                       node->start, check->start);
 141                                return false;
 142                        }
 143                        found = check;
 144                }
 145                if (!found) {
 146                        pr_err("lookup failed for node %llx + %llx\n",
 147                               addr, size);
 148                        return false;
 149                }
 150
 151                addr += size;
 152                n++;
 153        }
 154
 155        return true;
 156}
 157
 158static u64 misalignment(struct drm_mm_node *node, u64 alignment)
 159{
 160        u64 rem;
 161
 162        if (!alignment)
 163                return 0;
 164
 165        div64_u64_rem(node->start, alignment, &rem);
 166        return rem;
 167}
 168
 169static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
 170                        u64 size, u64 alignment, unsigned long color)
 171{
 172        bool ok = true;
 173
 174        if (!drm_mm_node_allocated(node) || node->mm != mm) {
 175                pr_err("node not allocated\n");
 176                ok = false;
 177        }
 178
 179        if (node->size != size) {
 180                pr_err("node has wrong size, found %llu, expected %llu\n",
 181                       node->size, size);
 182                ok = false;
 183        }
 184
 185        if (misalignment(node, alignment)) {
 186                pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
 187                       node->start, misalignment(node, alignment), alignment);
 188                ok = false;
 189        }
 190
 191        if (node->color != color) {
 192                pr_err("node has wrong color, found %lu, expected %lu\n",
 193                       node->color, color);
 194                ok = false;
 195        }
 196
 197        return ok;
 198}
 199
 200#define show_mm(mm) do { \
 201        struct drm_printer __p = drm_debug_printer(__func__); \
 202        drm_mm_print((mm), &__p); } while (0)
 203
 204static int igt_init(void *ignored)
 205{
 206        const unsigned int size = 4096;
 207        struct drm_mm mm;
 208        struct drm_mm_node tmp;
 209        int ret = -EINVAL;
 210
 211        /* Start with some simple checks on initialising the struct drm_mm */
 212        memset(&mm, 0, sizeof(mm));
 213        if (drm_mm_initialized(&mm)) {
 214                pr_err("zeroed mm claims to be initialized\n");
 215                return ret;
 216        }
 217
 218        memset(&mm, 0xff, sizeof(mm));
 219        drm_mm_init(&mm, 0, size);
 220        if (!drm_mm_initialized(&mm)) {
 221                pr_err("mm claims not to be initialized\n");
 222                goto out;
 223        }
 224
 225        if (!drm_mm_clean(&mm)) {
 226                pr_err("mm not empty on creation\n");
 227                goto out;
 228        }
 229
 230        /* After creation, it should all be one massive hole */
 231        if (!assert_one_hole(&mm, 0, size)) {
 232                ret = -EINVAL;
 233                goto out;
 234        }
 235
 236        memset(&tmp, 0, sizeof(tmp));
 237        tmp.start = 0;
 238        tmp.size = size;
 239        ret = drm_mm_reserve_node(&mm, &tmp);
 240        if (ret) {
 241                pr_err("failed to reserve whole drm_mm\n");
 242                goto out;
 243        }
 244
 245        /* After filling the range entirely, there should be no holes */
 246        if (!assert_no_holes(&mm)) {
 247                ret = -EINVAL;
 248                goto out;
 249        }
 250
 251        /* And then after emptying it again, the massive hole should be back */
 252        drm_mm_remove_node(&tmp);
 253        if (!assert_one_hole(&mm, 0, size)) {
 254                ret = -EINVAL;
 255                goto out;
 256        }
 257
 258out:
 259        if (ret)
 260                show_mm(&mm);
 261        drm_mm_takedown(&mm);
 262        return ret;
 263}
 264
 265static int igt_debug(void *ignored)
 266{
 267        struct drm_mm mm;
 268        struct drm_mm_node nodes[2];
 269        int ret;
 270
 271        /* Create a small drm_mm with a couple of nodes and a few holes, and
 272         * check that the debug iterator doesn't explode over a trivial drm_mm.
 273         */
 274
 275        drm_mm_init(&mm, 0, 4096);
 276
 277        memset(nodes, 0, sizeof(nodes));
 278        nodes[0].start = 512;
 279        nodes[0].size = 1024;
 280        ret = drm_mm_reserve_node(&mm, &nodes[0]);
 281        if (ret) {
 282                pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
 283                       nodes[0].start, nodes[0].size);
 284                return ret;
 285        }
 286
 287        nodes[1].size = 1024;
 288        nodes[1].start = 4096 - 512 - nodes[1].size;
 289        ret = drm_mm_reserve_node(&mm, &nodes[1]);
 290        if (ret) {
 291                pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
 292                       nodes[1].start, nodes[1].size);
 293                return ret;
 294        }
 295
 296        show_mm(&mm);
 297        return 0;
 298}
 299
 300static struct drm_mm_node *set_node(struct drm_mm_node *node,
 301                                    u64 start, u64 size)
 302{
 303        node->start = start;
 304        node->size = size;
 305        return node;
 306}
 307
 308static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
 309{
 310        int err;
 311
 312        err = drm_mm_reserve_node(mm, node);
 313        if (likely(err == -ENOSPC))
 314                return true;
 315
 316        if (!err) {
 317                pr_err("impossible reserve succeeded, node %llu + %llu\n",
 318                       node->start, node->size);
 319                drm_mm_remove_node(node);
 320        } else {
 321                pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
 322                       err, -ENOSPC, node->start, node->size);
 323        }
 324        return false;
 325}
 326
 327static bool check_reserve_boundaries(struct drm_mm *mm,
 328                                     unsigned int count,
 329                                     u64 size)
 330{
 331        const struct boundary {
 332                u64 start, size;
 333                const char *name;
 334        } boundaries[] = {
 335#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
 336                B(0, 0),
 337                B(-size, 0),
 338                B(size, 0),
 339                B(size * count, 0),
 340                B(-size, size),
 341                B(-size, -size),
 342                B(-size, 2*size),
 343                B(0, -size),
 344                B(size, -size),
 345                B(count*size, size),
 346                B(count*size, -size),
 347                B(count*size, count*size),
 348                B(count*size, -count*size),
 349                B(count*size, -(count+1)*size),
 350                B((count+1)*size, size),
 351                B((count+1)*size, -size),
 352                B((count+1)*size, -2*size),
 353#undef B
 354        };
 355        struct drm_mm_node tmp = {};
 356        int n;
 357
 358        for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
 359                if (!expect_reserve_fail(mm,
 360                                         set_node(&tmp,
 361                                                  boundaries[n].start,
 362                                                  boundaries[n].size))) {
 363                        pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
 364                               n, boundaries[n].name, count, size);
 365                        return false;
 366                }
 367        }
 368
 369        return true;
 370}
 371
 372static int __igt_reserve(unsigned int count, u64 size)
 373{
 374        DRM_RND_STATE(prng, random_seed);
 375        struct drm_mm mm;
 376        struct drm_mm_node tmp, *nodes, *node, *next;
 377        unsigned int *order, n, m, o = 0;
 378        int ret, err;
 379
 380        /* For exercising drm_mm_reserve_node(), we want to check that
 381         * reservations outside of the drm_mm range are rejected, and to
 382         * overlapping and otherwise already occupied ranges. Afterwards,
 383         * the tree and nodes should be intact.
 384         */
 385
 386        DRM_MM_BUG_ON(!count);
 387        DRM_MM_BUG_ON(!size);
 388
 389        ret = -ENOMEM;
 390        order = drm_random_order(count, &prng);
 391        if (!order)
 392                goto err;
 393
 394        nodes = vzalloc(array_size(count, sizeof(*nodes)));
 395        if (!nodes)
 396                goto err_order;
 397
 398        ret = -EINVAL;
 399        drm_mm_init(&mm, 0, count * size);
 400
 401        if (!check_reserve_boundaries(&mm, count, size))
 402                goto out;
 403
 404        for (n = 0; n < count; n++) {
 405                nodes[n].start = order[n] * size;
 406                nodes[n].size = size;
 407
 408                err = drm_mm_reserve_node(&mm, &nodes[n]);
 409                if (err) {
 410                        pr_err("reserve failed, step %d, start %llu\n",
 411                               n, nodes[n].start);
 412                        ret = err;
 413                        goto out;
 414                }
 415
 416                if (!drm_mm_node_allocated(&nodes[n])) {
 417                        pr_err("reserved node not allocated! step %d, start %llu\n",
 418                               n, nodes[n].start);
 419                        goto out;
 420                }
 421
 422                if (!expect_reserve_fail(&mm, &nodes[n]))
 423                        goto out;
 424        }
 425
 426        /* After random insertion the nodes should be in order */
 427        if (!assert_continuous(&mm, size))
 428                goto out;
 429
 430        /* Repeated use should then fail */
 431        drm_random_reorder(order, count, &prng);
 432        for (n = 0; n < count; n++) {
 433                if (!expect_reserve_fail(&mm,
 434                                         set_node(&tmp, order[n] * size, 1)))
 435                        goto out;
 436
 437                /* Remove and reinsert should work */
 438                drm_mm_remove_node(&nodes[order[n]]);
 439                err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
 440                if (err) {
 441                        pr_err("reserve failed, step %d, start %llu\n",
 442                               n, nodes[n].start);
 443                        ret = err;
 444                        goto out;
 445                }
 446        }
 447
 448        if (!assert_continuous(&mm, size))
 449                goto out;
 450
 451        /* Overlapping use should then fail */
 452        for (n = 0; n < count; n++) {
 453                if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
 454                        goto out;
 455        }
 456        for (n = 0; n < count; n++) {
 457                if (!expect_reserve_fail(&mm,
 458                                         set_node(&tmp,
 459                                                  size * n,
 460                                                  size * (count - n))))
 461                        goto out;
 462        }
 463
 464        /* Remove several, reinsert, check full */
 465        for_each_prime_number(n, min(max_prime, count)) {
 466                for (m = 0; m < n; m++) {
 467                        node = &nodes[order[(o + m) % count]];
 468                        drm_mm_remove_node(node);
 469                }
 470
 471                for (m = 0; m < n; m++) {
 472                        node = &nodes[order[(o + m) % count]];
 473                        err = drm_mm_reserve_node(&mm, node);
 474                        if (err) {
 475                                pr_err("reserve failed, step %d/%d, start %llu\n",
 476                                       m, n, node->start);
 477                                ret = err;
 478                                goto out;
 479                        }
 480                }
 481
 482                o += n;
 483
 484                if (!assert_continuous(&mm, size))
 485                        goto out;
 486        }
 487
 488        ret = 0;
 489out:
 490        drm_mm_for_each_node_safe(node, next, &mm)
 491                drm_mm_remove_node(node);
 492        drm_mm_takedown(&mm);
 493        vfree(nodes);
 494err_order:
 495        kfree(order);
 496err:
 497        return ret;
 498}
 499
 500static int igt_reserve(void *ignored)
 501{
 502        const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
 503        int n, ret;
 504
 505        for_each_prime_number_from(n, 1, 54) {
 506                u64 size = BIT_ULL(n);
 507
 508                ret = __igt_reserve(count, size - 1);
 509                if (ret)
 510                        return ret;
 511
 512                ret = __igt_reserve(count, size);
 513                if (ret)
 514                        return ret;
 515
 516                ret = __igt_reserve(count, size + 1);
 517                if (ret)
 518                        return ret;
 519
 520                cond_resched();
 521        }
 522
 523        return 0;
 524}
 525
 526static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
 527                          u64 size, u64 alignment, unsigned long color,
 528                          const struct insert_mode *mode)
 529{
 530        int err;
 531
 532        err = drm_mm_insert_node_generic(mm, node,
 533                                         size, alignment, color,
 534                                         mode->mode);
 535        if (err) {
 536                pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
 537                       size, alignment, color, mode->name, err);
 538                return false;
 539        }
 540
 541        if (!assert_node(node, mm, size, alignment, color)) {
 542                drm_mm_remove_node(node);
 543                return false;
 544        }
 545
 546        return true;
 547}
 548
 549static bool expect_insert_fail(struct drm_mm *mm, u64 size)
 550{
 551        struct drm_mm_node tmp = {};
 552        int err;
 553
 554        err = drm_mm_insert_node(mm, &tmp, size);
 555        if (likely(err == -ENOSPC))
 556                return true;
 557
 558        if (!err) {
 559                pr_err("impossible insert succeeded, node %llu + %llu\n",
 560                       tmp.start, tmp.size);
 561                drm_mm_remove_node(&tmp);
 562        } else {
 563                pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
 564                       err, -ENOSPC, size);
 565        }
 566        return false;
 567}
 568
 569static int __igt_insert(unsigned int count, u64 size, bool replace)
 570{
 571        DRM_RND_STATE(prng, random_seed);
 572        const struct insert_mode *mode;
 573        struct drm_mm mm;
 574        struct drm_mm_node *nodes, *node, *next;
 575        unsigned int *order, n, m, o = 0;
 576        int ret;
 577
 578        /* Fill a range with lots of nodes, check it doesn't fail too early */
 579
 580        DRM_MM_BUG_ON(!count);
 581        DRM_MM_BUG_ON(!size);
 582
 583        ret = -ENOMEM;
 584        nodes = vmalloc(array_size(count, sizeof(*nodes)));
 585        if (!nodes)
 586                goto err;
 587
 588        order = drm_random_order(count, &prng);
 589        if (!order)
 590                goto err_nodes;
 591
 592        ret = -EINVAL;
 593        drm_mm_init(&mm, 0, count * size);
 594
 595        for (mode = insert_modes; mode->name; mode++) {
 596                for (n = 0; n < count; n++) {
 597                        struct drm_mm_node tmp;
 598
 599                        node = replace ? &tmp : &nodes[n];
 600                        memset(node, 0, sizeof(*node));
 601                        if (!expect_insert(&mm, node, size, 0, n, mode)) {
 602                                pr_err("%s insert failed, size %llu step %d\n",
 603                                       mode->name, size, n);
 604                                goto out;
 605                        }
 606
 607                        if (replace) {
 608                                drm_mm_replace_node(&tmp, &nodes[n]);
 609                                if (drm_mm_node_allocated(&tmp)) {
 610                                        pr_err("replaced old-node still allocated! step %d\n",
 611                                               n);
 612                                        goto out;
 613                                }
 614
 615                                if (!assert_node(&nodes[n], &mm, size, 0, n)) {
 616                                        pr_err("replaced node did not inherit parameters, size %llu step %d\n",
 617                                               size, n);
 618                                        goto out;
 619                                }
 620
 621                                if (tmp.start != nodes[n].start) {
 622                                        pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
 623                                               tmp.start, size,
 624                                               nodes[n].start, nodes[n].size);
 625                                        goto out;
 626                                }
 627                        }
 628                }
 629
 630                /* After random insertion the nodes should be in order */
 631                if (!assert_continuous(&mm, size))
 632                        goto out;
 633
 634                /* Repeated use should then fail */
 635                if (!expect_insert_fail(&mm, size))
 636                        goto out;
 637
 638                /* Remove one and reinsert, as the only hole it should refill itself */
 639                for (n = 0; n < count; n++) {
 640                        u64 addr = nodes[n].start;
 641
 642                        drm_mm_remove_node(&nodes[n]);
 643                        if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
 644                                pr_err("%s reinsert failed, size %llu step %d\n",
 645                                       mode->name, size, n);
 646                                goto out;
 647                        }
 648
 649                        if (nodes[n].start != addr) {
 650                                pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
 651                                       mode->name, n, addr, nodes[n].start);
 652                                goto out;
 653                        }
 654
 655                        if (!assert_continuous(&mm, size))
 656                                goto out;
 657                }
 658
 659                /* Remove several, reinsert, check full */
 660                for_each_prime_number(n, min(max_prime, count)) {
 661                        for (m = 0; m < n; m++) {
 662                                node = &nodes[order[(o + m) % count]];
 663                                drm_mm_remove_node(node);
 664                        }
 665
 666                        for (m = 0; m < n; m++) {
 667                                node = &nodes[order[(o + m) % count]];
 668                                if (!expect_insert(&mm, node, size, 0, n, mode)) {
 669                                        pr_err("%s multiple reinsert failed, size %llu step %d\n",
 670                                               mode->name, size, n);
 671                                        goto out;
 672                                }
 673                        }
 674
 675                        o += n;
 676
 677                        if (!assert_continuous(&mm, size))
 678                                goto out;
 679
 680                        if (!expect_insert_fail(&mm, size))
 681                                goto out;
 682                }
 683
 684                drm_mm_for_each_node_safe(node, next, &mm)
 685                        drm_mm_remove_node(node);
 686                DRM_MM_BUG_ON(!drm_mm_clean(&mm));
 687
 688                cond_resched();
 689        }
 690
 691        ret = 0;
 692out:
 693        drm_mm_for_each_node_safe(node, next, &mm)
 694                drm_mm_remove_node(node);
 695        drm_mm_takedown(&mm);
 696        kfree(order);
 697err_nodes:
 698        vfree(nodes);
 699err:
 700        return ret;
 701}
 702
 703static int igt_insert(void *ignored)
 704{
 705        const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
 706        unsigned int n;
 707        int ret;
 708
 709        for_each_prime_number_from(n, 1, 54) {
 710                u64 size = BIT_ULL(n);
 711
 712                ret = __igt_insert(count, size - 1, false);
 713                if (ret)
 714                        return ret;
 715
 716                ret = __igt_insert(count, size, false);
 717                if (ret)
 718                        return ret;
 719
 720                ret = __igt_insert(count, size + 1, false);
 721                if (ret)
 722                        return ret;
 723
 724                cond_resched();
 725        }
 726
 727        return 0;
 728}
 729
 730static int igt_replace(void *ignored)
 731{
 732        const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
 733        unsigned int n;
 734        int ret;
 735
 736        /* Reuse igt_insert to exercise replacement by inserting a dummy node,
 737         * then replacing it with the intended node. We want to check that
 738         * the tree is intact and all the information we need is carried
 739         * across to the target node.
 740         */
 741
 742        for_each_prime_number_from(n, 1, 54) {
 743                u64 size = BIT_ULL(n);
 744
 745                ret = __igt_insert(count, size - 1, true);
 746                if (ret)
 747                        return ret;
 748
 749                ret = __igt_insert(count, size, true);
 750                if (ret)
 751                        return ret;
 752
 753                ret = __igt_insert(count, size + 1, true);
 754                if (ret)
 755                        return ret;
 756
 757                cond_resched();
 758        }
 759
 760        return 0;
 761}
 762
 763static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
 764                                   u64 size, u64 alignment, unsigned long color,
 765                                   u64 range_start, u64 range_end,
 766                                   const struct insert_mode *mode)
 767{
 768        int err;
 769
 770        err = drm_mm_insert_node_in_range(mm, node,
 771                                          size, alignment, color,
 772                                          range_start, range_end,
 773                                          mode->mode);
 774        if (err) {
 775                pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
 776                       size, alignment, color, mode->name,
 777                       range_start, range_end, err);
 778                return false;
 779        }
 780
 781        if (!assert_node(node, mm, size, alignment, color)) {
 782                drm_mm_remove_node(node);
 783                return false;
 784        }
 785
 786        return true;
 787}
 788
 789static bool expect_insert_in_range_fail(struct drm_mm *mm,
 790                                        u64 size,
 791                                        u64 range_start,
 792                                        u64 range_end)
 793{
 794        struct drm_mm_node tmp = {};
 795        int err;
 796
 797        err = drm_mm_insert_node_in_range(mm, &tmp,
 798                                          size, 0, 0,
 799                                          range_start, range_end,
 800                                          0);
 801        if (likely(err == -ENOSPC))
 802                return true;
 803
 804        if (!err) {
 805                pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
 806                       tmp.start, tmp.size, range_start, range_end);
 807                drm_mm_remove_node(&tmp);
 808        } else {
 809                pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
 810                       err, -ENOSPC, size, range_start, range_end);
 811        }
 812
 813        return false;
 814}
 815
 816static bool assert_contiguous_in_range(struct drm_mm *mm,
 817                                       u64 size,
 818                                       u64 start,
 819                                       u64 end)
 820{
 821        struct drm_mm_node *node;
 822        unsigned int n;
 823
 824        if (!expect_insert_in_range_fail(mm, size, start, end))
 825                return false;
 826
 827        n = div64_u64(start + size - 1, size);
 828        drm_mm_for_each_node(node, mm) {
 829                if (node->start < start || node->start + node->size > end) {
 830                        pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
 831                               n, node->start, node->start + node->size, start, end);
 832                        return false;
 833                }
 834
 835                if (node->start != n * size) {
 836                        pr_err("node %d out of order, expected start %llx, found %llx\n",
 837                               n, n * size, node->start);
 838                        return false;
 839                }
 840
 841                if (node->size != size) {
 842                        pr_err("node %d has wrong size, expected size %llx, found %llx\n",
 843                               n, size, node->size);
 844                        return false;
 845                }
 846
 847                if (drm_mm_hole_follows(node) &&
 848                    drm_mm_hole_node_end(node) < end) {
 849                        pr_err("node %d is followed by a hole!\n", n);
 850                        return false;
 851                }
 852
 853                n++;
 854        }
 855
 856        if (start > 0) {
 857                node = __drm_mm_interval_first(mm, 0, start - 1);
 858                if (drm_mm_node_allocated(node)) {
 859                        pr_err("node before start: node=%llx+%llu, start=%llx\n",
 860                               node->start, node->size, start);
 861                        return false;
 862                }
 863        }
 864
 865        if (end < U64_MAX) {
 866                node = __drm_mm_interval_first(mm, end, U64_MAX);
 867                if (drm_mm_node_allocated(node)) {
 868                        pr_err("node after end: node=%llx+%llu, end=%llx\n",
 869                               node->start, node->size, end);
 870                        return false;
 871                }
 872        }
 873
 874        return true;
 875}
 876
 877static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
 878{
 879        const struct insert_mode *mode;
 880        struct drm_mm mm;
 881        struct drm_mm_node *nodes, *node, *next;
 882        unsigned int n, start_n, end_n;
 883        int ret;
 884
 885        DRM_MM_BUG_ON(!count);
 886        DRM_MM_BUG_ON(!size);
 887        DRM_MM_BUG_ON(end <= start);
 888
 889        /* Very similar to __igt_insert(), but now instead of populating the
 890         * full range of the drm_mm, we try to fill a small portion of it.
 891         */
 892
 893        ret = -ENOMEM;
 894        nodes = vzalloc(array_size(count, sizeof(*nodes)));
 895        if (!nodes)
 896                goto err;
 897
 898        ret = -EINVAL;
 899        drm_mm_init(&mm, 0, count * size);
 900
 901        start_n = div64_u64(start + size - 1, size);
 902        end_n = div64_u64(end - size, size);
 903
 904        for (mode = insert_modes; mode->name; mode++) {
 905                for (n = start_n; n <= end_n; n++) {
 906                        if (!expect_insert_in_range(&mm, &nodes[n],
 907                                                    size, size, n,
 908                                                    start, end, mode)) {
 909                                pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
 910                                       mode->name, size, n,
 911                                       start_n, end_n,
 912                                       start, end);
 913                                goto out;
 914                        }
 915                }
 916
 917                if (!assert_contiguous_in_range(&mm, size, start, end)) {
 918                        pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
 919                               mode->name, start, end, size);
 920                        goto out;
 921                }
 922
 923                /* Remove one and reinsert, it should refill itself */
 924                for (n = start_n; n <= end_n; n++) {
 925                        u64 addr = nodes[n].start;
 926
 927                        drm_mm_remove_node(&nodes[n]);
 928                        if (!expect_insert_in_range(&mm, &nodes[n],
 929                                                    size, size, n,
 930                                                    start, end, mode)) {
 931                                pr_err("%s reinsert failed, step %d\n", mode->name, n);
 932                                goto out;
 933                        }
 934
 935                        if (nodes[n].start != addr) {
 936                                pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
 937                                       mode->name, n, addr, nodes[n].start);
 938                                goto out;
 939                        }
 940                }
 941
 942                if (!assert_contiguous_in_range(&mm, size, start, end)) {
 943                        pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
 944                               mode->name, start, end, size);
 945                        goto out;
 946                }
 947
 948                drm_mm_for_each_node_safe(node, next, &mm)
 949                        drm_mm_remove_node(node);
 950                DRM_MM_BUG_ON(!drm_mm_clean(&mm));
 951
 952                cond_resched();
 953        }
 954
 955        ret = 0;
 956out:
 957        drm_mm_for_each_node_safe(node, next, &mm)
 958                drm_mm_remove_node(node);
 959        drm_mm_takedown(&mm);
 960        vfree(nodes);
 961err:
 962        return ret;
 963}
 964
 965static int insert_outside_range(void)
 966{
 967        struct drm_mm mm;
 968        const unsigned int start = 1024;
 969        const unsigned int end = 2048;
 970        const unsigned int size = end - start;
 971
 972        drm_mm_init(&mm, start, size);
 973
 974        if (!expect_insert_in_range_fail(&mm, 1, 0, start))
 975                return -EINVAL;
 976
 977        if (!expect_insert_in_range_fail(&mm, size,
 978                                         start - size/2, start + (size+1)/2))
 979                return -EINVAL;
 980
 981        if (!expect_insert_in_range_fail(&mm, size,
 982                                         end - (size+1)/2, end + size/2))
 983                return -EINVAL;
 984
 985        if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
 986                return -EINVAL;
 987
 988        drm_mm_takedown(&mm);
 989        return 0;
 990}
 991
 992static int igt_insert_range(void *ignored)
 993{
 994        const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
 995        unsigned int n;
 996        int ret;
 997
 998        /* Check that requests outside the bounds of drm_mm are rejected. */
 999        ret = insert_outside_range();
1000        if (ret)
1001                return ret;
1002
1003        for_each_prime_number_from(n, 1, 50) {
1004                const u64 size = BIT_ULL(n);
1005                const u64 max = count * size;
1006
1007                ret = __igt_insert_range(count, size, 0, max);
1008                if (ret)
1009                        return ret;
1010
1011                ret = __igt_insert_range(count, size, 1, max);
1012                if (ret)
1013                        return ret;
1014
1015                ret = __igt_insert_range(count, size, 0, max - 1);
1016                if (ret)
1017                        return ret;
1018
1019                ret = __igt_insert_range(count, size, 0, max/2);
1020                if (ret)
1021                        return ret;
1022
1023                ret = __igt_insert_range(count, size, max/2, max);
1024                if (ret)
1025                        return ret;
1026
1027                ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1028                if (ret)
1029                        return ret;
1030
1031                cond_resched();
1032        }
1033
1034        return 0;
1035}
1036
1037static int prepare_igt_frag(struct drm_mm *mm,
1038                            struct drm_mm_node *nodes,
1039                            unsigned int num_insert,
1040                            const struct insert_mode *mode)
1041{
1042        unsigned int size = 4096;
1043        unsigned int i;
1044
1045        for (i = 0; i < num_insert; i++) {
1046                if (!expect_insert(mm, &nodes[i], size, 0, i,
1047                                   mode) != 0) {
1048                        pr_err("%s insert failed\n", mode->name);
1049                        return -EINVAL;
1050                }
1051        }
1052
1053        /* introduce fragmentation by freeing every other node */
1054        for (i = 0; i < num_insert; i++) {
1055                if (i % 2 == 0)
1056                        drm_mm_remove_node(&nodes[i]);
1057        }
1058
1059        return 0;
1060
1061}
1062
1063static u64 get_insert_time(struct drm_mm *mm,
1064                           unsigned int num_insert,
1065                           struct drm_mm_node *nodes,
1066                           const struct insert_mode *mode)
1067{
1068        unsigned int size = 8192;
1069        ktime_t start;
1070        unsigned int i;
1071
1072        start = ktime_get();
1073        for (i = 0; i < num_insert; i++) {
1074                if (!expect_insert(mm, &nodes[i], size, 0, i, mode) != 0) {
1075                        pr_err("%s insert failed\n", mode->name);
1076                        return 0;
1077                }
1078        }
1079
1080        return ktime_to_ns(ktime_sub(ktime_get(), start));
1081}
1082
1083static int igt_frag(void *ignored)
1084{
1085        struct drm_mm mm;
1086        const struct insert_mode *mode;
1087        struct drm_mm_node *nodes, *node, *next;
1088        unsigned int insert_size = 10000;
1089        unsigned int scale_factor = 4;
1090        int ret = -EINVAL;
1091
1092        /* We need 4 * insert_size nodes to hold intermediate allocated
1093         * drm_mm nodes.
1094         * 1 times for prepare_igt_frag()
1095         * 1 times for get_insert_time()
1096         * 2 times for get_insert_time()
1097         */
1098        nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
1099        if (!nodes)
1100                return -ENOMEM;
1101
1102        /* For BOTTOMUP and TOPDOWN, we first fragment the
1103         * address space using prepare_igt_frag() and then try to verify
1104         * that that insertions scale quadratically from 10k to 20k insertions
1105         */
1106        drm_mm_init(&mm, 1, U64_MAX - 2);
1107        for (mode = insert_modes; mode->name; mode++) {
1108                u64 insert_time1, insert_time2;
1109
1110                if (mode->mode != DRM_MM_INSERT_LOW &&
1111                    mode->mode != DRM_MM_INSERT_HIGH)
1112                        continue;
1113
1114                ret = prepare_igt_frag(&mm, nodes, insert_size, mode);
1115                if (ret)
1116                        goto err;
1117
1118                insert_time1 = get_insert_time(&mm, insert_size,
1119                                               nodes + insert_size, mode);
1120                if (insert_time1 == 0)
1121                        goto err;
1122
1123                insert_time2 = get_insert_time(&mm, (insert_size * 2),
1124                                               nodes + insert_size * 2, mode);
1125                if (insert_time2 == 0)
1126                        goto err;
1127
1128                pr_info("%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
1129                        mode->name, insert_size, insert_size * 2,
1130                        insert_time1, insert_time2);
1131
1132                if (insert_time2 > (scale_factor * insert_time1)) {
1133                        pr_err("%s fragmented insert took %llu nsecs more\n",
1134                               mode->name,
1135                               insert_time2 - (scale_factor * insert_time1));
1136                        goto err;
1137                }
1138
1139                drm_mm_for_each_node_safe(node, next, &mm)
1140                        drm_mm_remove_node(node);
1141        }
1142
1143        ret = 0;
1144err:
1145        drm_mm_for_each_node_safe(node, next, &mm)
1146                drm_mm_remove_node(node);
1147        drm_mm_takedown(&mm);
1148        vfree(nodes);
1149
1150        return ret;
1151}
1152
1153static int igt_align(void *ignored)
1154{
1155        const struct insert_mode *mode;
1156        const unsigned int max_count = min(8192u, max_prime);
1157        struct drm_mm mm;
1158        struct drm_mm_node *nodes, *node, *next;
1159        unsigned int prime;
1160        int ret = -EINVAL;
1161
1162        /* For each of the possible insertion modes, we pick a few
1163         * arbitrary alignments and check that the inserted node
1164         * meets our requirements.
1165         */
1166
1167        nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
1168        if (!nodes)
1169                goto err;
1170
1171        drm_mm_init(&mm, 1, U64_MAX - 2);
1172
1173        for (mode = insert_modes; mode->name; mode++) {
1174                unsigned int i = 0;
1175
1176                for_each_prime_number_from(prime, 1, max_count) {
1177                        u64 size = next_prime_number(prime);
1178
1179                        if (!expect_insert(&mm, &nodes[i],
1180                                           size, prime, i,
1181                                           mode)) {
1182                                pr_err("%s insert failed with alignment=%d",
1183                                       mode->name, prime);
1184                                goto out;
1185                        }
1186
1187                        i++;
1188                }
1189
1190                drm_mm_for_each_node_safe(node, next, &mm)
1191                        drm_mm_remove_node(node);
1192                DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1193
1194                cond_resched();
1195        }
1196
1197        ret = 0;
1198out:
1199        drm_mm_for_each_node_safe(node, next, &mm)
1200                drm_mm_remove_node(node);
1201        drm_mm_takedown(&mm);
1202        vfree(nodes);
1203err:
1204        return ret;
1205}
1206
1207static int igt_align_pot(int max)
1208{
1209        struct drm_mm mm;
1210        struct drm_mm_node *node, *next;
1211        int bit;
1212        int ret = -EINVAL;
1213
1214        /* Check that we can align to the full u64 address space */
1215
1216        drm_mm_init(&mm, 1, U64_MAX - 2);
1217
1218        for (bit = max - 1; bit; bit--) {
1219                u64 align, size;
1220
1221                node = kzalloc(sizeof(*node), GFP_KERNEL);
1222                if (!node) {
1223                        ret = -ENOMEM;
1224                        goto out;
1225                }
1226
1227                align = BIT_ULL(bit);
1228                size = BIT_ULL(bit-1) + 1;
1229                if (!expect_insert(&mm, node,
1230                                   size, align, bit,
1231                                   &insert_modes[0])) {
1232                        pr_err("insert failed with alignment=%llx [%d]",
1233                               align, bit);
1234                        goto out;
1235                }
1236
1237                cond_resched();
1238        }
1239
1240        ret = 0;
1241out:
1242        drm_mm_for_each_node_safe(node, next, &mm) {
1243                drm_mm_remove_node(node);
1244                kfree(node);
1245        }
1246        drm_mm_takedown(&mm);
1247        return ret;
1248}
1249
1250static int igt_align32(void *ignored)
1251{
1252        return igt_align_pot(32);
1253}
1254
1255static int igt_align64(void *ignored)
1256{
1257        return igt_align_pot(64);
1258}
1259
1260static void show_scan(const struct drm_mm_scan *scan)
1261{
1262        pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1263                scan->hit_start, scan->hit_end,
1264                scan->size, scan->alignment, scan->color);
1265}
1266
1267static void show_holes(const struct drm_mm *mm, int count)
1268{
1269        u64 hole_start, hole_end;
1270        struct drm_mm_node *hole;
1271
1272        drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1273                struct drm_mm_node *next = list_next_entry(hole, node_list);
1274                const char *node1 = NULL, *node2 = NULL;
1275
1276                if (drm_mm_node_allocated(hole))
1277                        node1 = kasprintf(GFP_KERNEL,
1278                                          "[%llx + %lld, color=%ld], ",
1279                                          hole->start, hole->size, hole->color);
1280
1281                if (drm_mm_node_allocated(next))
1282                        node2 = kasprintf(GFP_KERNEL,
1283                                          ", [%llx + %lld, color=%ld]",
1284                                          next->start, next->size, next->color);
1285
1286                pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1287                        node1,
1288                        hole_start, hole_end, hole_end - hole_start,
1289                        node2);
1290
1291                kfree(node2);
1292                kfree(node1);
1293
1294                if (!--count)
1295                        break;
1296        }
1297}
1298
1299struct evict_node {
1300        struct drm_mm_node node;
1301        struct list_head link;
1302};
1303
1304static bool evict_nodes(struct drm_mm_scan *scan,
1305                        struct evict_node *nodes,
1306                        unsigned int *order,
1307                        unsigned int count,
1308                        bool use_color,
1309                        struct list_head *evict_list)
1310{
1311        struct evict_node *e, *en;
1312        unsigned int i;
1313
1314        for (i = 0; i < count; i++) {
1315                e = &nodes[order ? order[i] : i];
1316                list_add(&e->link, evict_list);
1317                if (drm_mm_scan_add_block(scan, &e->node))
1318                        break;
1319        }
1320        list_for_each_entry_safe(e, en, evict_list, link) {
1321                if (!drm_mm_scan_remove_block(scan, &e->node))
1322                        list_del(&e->link);
1323        }
1324        if (list_empty(evict_list)) {
1325                pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1326                       scan->size, count, scan->alignment, scan->color);
1327                return false;
1328        }
1329
1330        list_for_each_entry(e, evict_list, link)
1331                drm_mm_remove_node(&e->node);
1332
1333        if (use_color) {
1334                struct drm_mm_node *node;
1335
1336                while ((node = drm_mm_scan_color_evict(scan))) {
1337                        e = container_of(node, typeof(*e), node);
1338                        drm_mm_remove_node(&e->node);
1339                        list_add(&e->link, evict_list);
1340                }
1341        } else {
1342                if (drm_mm_scan_color_evict(scan)) {
1343                        pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1344                        return false;
1345                }
1346        }
1347
1348        return true;
1349}
1350
1351static bool evict_nothing(struct drm_mm *mm,
1352                          unsigned int total_size,
1353                          struct evict_node *nodes)
1354{
1355        struct drm_mm_scan scan;
1356        LIST_HEAD(evict_list);
1357        struct evict_node *e;
1358        struct drm_mm_node *node;
1359        unsigned int n;
1360
1361        drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1362        for (n = 0; n < total_size; n++) {
1363                e = &nodes[n];
1364                list_add(&e->link, &evict_list);
1365                drm_mm_scan_add_block(&scan, &e->node);
1366        }
1367        list_for_each_entry(e, &evict_list, link)
1368                drm_mm_scan_remove_block(&scan, &e->node);
1369
1370        for (n = 0; n < total_size; n++) {
1371                e = &nodes[n];
1372
1373                if (!drm_mm_node_allocated(&e->node)) {
1374                        pr_err("node[%d] no longer allocated!\n", n);
1375                        return false;
1376                }
1377
1378                e->link.next = NULL;
1379        }
1380
1381        drm_mm_for_each_node(node, mm) {
1382                e = container_of(node, typeof(*e), node);
1383                e->link.next = &e->link;
1384        }
1385
1386        for (n = 0; n < total_size; n++) {
1387                e = &nodes[n];
1388
1389                if (!e->link.next) {
1390                        pr_err("node[%d] no longer connected!\n", n);
1391                        return false;
1392                }
1393        }
1394
1395        return assert_continuous(mm, nodes[0].node.size);
1396}
1397
1398static bool evict_everything(struct drm_mm *mm,
1399                             unsigned int total_size,
1400                             struct evict_node *nodes)
1401{
1402        struct drm_mm_scan scan;
1403        LIST_HEAD(evict_list);
1404        struct evict_node *e;
1405        unsigned int n;
1406        int err;
1407
1408        drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1409        for (n = 0; n < total_size; n++) {
1410                e = &nodes[n];
1411                list_add(&e->link, &evict_list);
1412                if (drm_mm_scan_add_block(&scan, &e->node))
1413                        break;
1414        }
1415
1416        err = 0;
1417        list_for_each_entry(e, &evict_list, link) {
1418                if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1419                        if (!err) {
1420                                pr_err("Node %lld not marked for eviction!\n",
1421                                       e->node.start);
1422                                err = -EINVAL;
1423                        }
1424                }
1425        }
1426        if (err)
1427                return false;
1428
1429        list_for_each_entry(e, &evict_list, link)
1430                drm_mm_remove_node(&e->node);
1431
1432        if (!assert_one_hole(mm, 0, total_size))
1433                return false;
1434
1435        list_for_each_entry(e, &evict_list, link) {
1436                err = drm_mm_reserve_node(mm, &e->node);
1437                if (err) {
1438                        pr_err("Failed to reinsert node after eviction: start=%llx\n",
1439                               e->node.start);
1440                        return false;
1441                }
1442        }
1443
1444        return assert_continuous(mm, nodes[0].node.size);
1445}
1446
1447static int evict_something(struct drm_mm *mm,
1448                           u64 range_start, u64 range_end,
1449                           struct evict_node *nodes,
1450                           unsigned int *order,
1451                           unsigned int count,
1452                           unsigned int size,
1453                           unsigned int alignment,
1454                           const struct insert_mode *mode)
1455{
1456        struct drm_mm_scan scan;
1457        LIST_HEAD(evict_list);
1458        struct evict_node *e;
1459        struct drm_mm_node tmp;
1460        int err;
1461
1462        drm_mm_scan_init_with_range(&scan, mm,
1463                                    size, alignment, 0,
1464                                    range_start, range_end,
1465                                    mode->mode);
1466        if (!evict_nodes(&scan,
1467                         nodes, order, count, false,
1468                         &evict_list))
1469                return -EINVAL;
1470
1471        memset(&tmp, 0, sizeof(tmp));
1472        err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1473                                         DRM_MM_INSERT_EVICT);
1474        if (err) {
1475                pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1476                       size, alignment);
1477                show_scan(&scan);
1478                show_holes(mm, 3);
1479                return err;
1480        }
1481
1482        if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1483                pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1484                       tmp.start, tmp.size, range_start, range_end);
1485                err = -EINVAL;
1486        }
1487
1488        if (!assert_node(&tmp, mm, size, alignment, 0) ||
1489            drm_mm_hole_follows(&tmp)) {
1490                pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1491                       tmp.size, size,
1492                       alignment, misalignment(&tmp, alignment),
1493                       tmp.start, drm_mm_hole_follows(&tmp));
1494                err = -EINVAL;
1495        }
1496
1497        drm_mm_remove_node(&tmp);
1498        if (err)
1499                return err;
1500
1501        list_for_each_entry(e, &evict_list, link) {
1502                err = drm_mm_reserve_node(mm, &e->node);
1503                if (err) {
1504                        pr_err("Failed to reinsert node after eviction: start=%llx\n",
1505                               e->node.start);
1506                        return err;
1507                }
1508        }
1509
1510        if (!assert_continuous(mm, nodes[0].node.size)) {
1511                pr_err("range is no longer continuous\n");
1512                return -EINVAL;
1513        }
1514
1515        return 0;
1516}
1517
1518static int igt_evict(void *ignored)
1519{
1520        DRM_RND_STATE(prng, random_seed);
1521        const unsigned int size = 8192;
1522        const struct insert_mode *mode;
1523        struct drm_mm mm;
1524        struct evict_node *nodes;
1525        struct drm_mm_node *node, *next;
1526        unsigned int *order, n;
1527        int ret, err;
1528
1529        /* Here we populate a full drm_mm and then try and insert a new node
1530         * by evicting other nodes in a random order. The drm_mm_scan should
1531         * pick the first matching hole it finds from the random list. We
1532         * repeat that for different allocation strategies, alignments and
1533         * sizes to try and stress the hole finder.
1534         */
1535
1536        ret = -ENOMEM;
1537        nodes = vzalloc(array_size(size, sizeof(*nodes)));
1538        if (!nodes)
1539                goto err;
1540
1541        order = drm_random_order(size, &prng);
1542        if (!order)
1543                goto err_nodes;
1544
1545        ret = -EINVAL;
1546        drm_mm_init(&mm, 0, size);
1547        for (n = 0; n < size; n++) {
1548                err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1549                if (err) {
1550                        pr_err("insert failed, step %d\n", n);
1551                        ret = err;
1552                        goto out;
1553                }
1554        }
1555
1556        /* First check that using the scanner doesn't break the mm */
1557        if (!evict_nothing(&mm, size, nodes)) {
1558                pr_err("evict_nothing() failed\n");
1559                goto out;
1560        }
1561        if (!evict_everything(&mm, size, nodes)) {
1562                pr_err("evict_everything() failed\n");
1563                goto out;
1564        }
1565
1566        for (mode = evict_modes; mode->name; mode++) {
1567                for (n = 1; n <= size; n <<= 1) {
1568                        drm_random_reorder(order, size, &prng);
1569                        err = evict_something(&mm, 0, U64_MAX,
1570                                              nodes, order, size,
1571                                              n, 1,
1572                                              mode);
1573                        if (err) {
1574                                pr_err("%s evict_something(size=%u) failed\n",
1575                                       mode->name, n);
1576                                ret = err;
1577                                goto out;
1578                        }
1579                }
1580
1581                for (n = 1; n < size; n <<= 1) {
1582                        drm_random_reorder(order, size, &prng);
1583                        err = evict_something(&mm, 0, U64_MAX,
1584                                              nodes, order, size,
1585                                              size/2, n,
1586                                              mode);
1587                        if (err) {
1588                                pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1589                                       mode->name, size/2, n);
1590                                ret = err;
1591                                goto out;
1592                        }
1593                }
1594
1595                for_each_prime_number_from(n, 1, min(size, max_prime)) {
1596                        unsigned int nsize = (size - n + 1) / 2;
1597
1598                        DRM_MM_BUG_ON(!nsize);
1599
1600                        drm_random_reorder(order, size, &prng);
1601                        err = evict_something(&mm, 0, U64_MAX,
1602                                              nodes, order, size,
1603                                              nsize, n,
1604                                              mode);
1605                        if (err) {
1606                                pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1607                                       mode->name, nsize, n);
1608                                ret = err;
1609                                goto out;
1610                        }
1611                }
1612
1613                cond_resched();
1614        }
1615
1616        ret = 0;
1617out:
1618        drm_mm_for_each_node_safe(node, next, &mm)
1619                drm_mm_remove_node(node);
1620        drm_mm_takedown(&mm);
1621        kfree(order);
1622err_nodes:
1623        vfree(nodes);
1624err:
1625        return ret;
1626}
1627
1628static int igt_evict_range(void *ignored)
1629{
1630        DRM_RND_STATE(prng, random_seed);
1631        const unsigned int size = 8192;
1632        const unsigned int range_size = size / 2;
1633        const unsigned int range_start = size / 4;
1634        const unsigned int range_end = range_start + range_size;
1635        const struct insert_mode *mode;
1636        struct drm_mm mm;
1637        struct evict_node *nodes;
1638        struct drm_mm_node *node, *next;
1639        unsigned int *order, n;
1640        int ret, err;
1641
1642        /* Like igt_evict() but now we are limiting the search to a
1643         * small portion of the full drm_mm.
1644         */
1645
1646        ret = -ENOMEM;
1647        nodes = vzalloc(array_size(size, sizeof(*nodes)));
1648        if (!nodes)
1649                goto err;
1650
1651        order = drm_random_order(size, &prng);
1652        if (!order)
1653                goto err_nodes;
1654
1655        ret = -EINVAL;
1656        drm_mm_init(&mm, 0, size);
1657        for (n = 0; n < size; n++) {
1658                err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1659                if (err) {
1660                        pr_err("insert failed, step %d\n", n);
1661                        ret = err;
1662                        goto out;
1663                }
1664        }
1665
1666        for (mode = evict_modes; mode->name; mode++) {
1667                for (n = 1; n <= range_size; n <<= 1) {
1668                        drm_random_reorder(order, size, &prng);
1669                        err = evict_something(&mm, range_start, range_end,
1670                                              nodes, order, size,
1671                                              n, 1,
1672                                              mode);
1673                        if (err) {
1674                                pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1675                                       mode->name, n, range_start, range_end);
1676                                goto out;
1677                        }
1678                }
1679
1680                for (n = 1; n <= range_size; n <<= 1) {
1681                        drm_random_reorder(order, size, &prng);
1682                        err = evict_something(&mm, range_start, range_end,
1683                                              nodes, order, size,
1684                                              range_size/2, n,
1685                                              mode);
1686                        if (err) {
1687                                pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1688                                       mode->name, range_size/2, n, range_start, range_end);
1689                                goto out;
1690                        }
1691                }
1692
1693                for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1694                        unsigned int nsize = (range_size - n + 1) / 2;
1695
1696                        DRM_MM_BUG_ON(!nsize);
1697
1698                        drm_random_reorder(order, size, &prng);
1699                        err = evict_something(&mm, range_start, range_end,
1700                                              nodes, order, size,
1701                                              nsize, n,
1702                                              mode);
1703                        if (err) {
1704                                pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1705                                       mode->name, nsize, n, range_start, range_end);
1706                                goto out;
1707                        }
1708                }
1709
1710                cond_resched();
1711        }
1712
1713        ret = 0;
1714out:
1715        drm_mm_for_each_node_safe(node, next, &mm)
1716                drm_mm_remove_node(node);
1717        drm_mm_takedown(&mm);
1718        kfree(order);
1719err_nodes:
1720        vfree(nodes);
1721err:
1722        return ret;
1723}
1724
1725static unsigned int node_index(const struct drm_mm_node *node)
1726{
1727        return div64_u64(node->start, node->size);
1728}
1729
1730static int igt_topdown(void *ignored)
1731{
1732        const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1733        DRM_RND_STATE(prng, random_seed);
1734        const unsigned int count = 8192;
1735        unsigned int size;
1736        unsigned long *bitmap;
1737        struct drm_mm mm;
1738        struct drm_mm_node *nodes, *node, *next;
1739        unsigned int *order, n, m, o = 0;
1740        int ret;
1741
1742        /* When allocating top-down, we expect to be returned a node
1743         * from a suitable hole at the top of the drm_mm. We check that
1744         * the returned node does match the highest available slot.
1745         */
1746
1747        ret = -ENOMEM;
1748        nodes = vzalloc(array_size(count, sizeof(*nodes)));
1749        if (!nodes)
1750                goto err;
1751
1752        bitmap = bitmap_zalloc(count, GFP_KERNEL);
1753        if (!bitmap)
1754                goto err_nodes;
1755
1756        order = drm_random_order(count, &prng);
1757        if (!order)
1758                goto err_bitmap;
1759
1760        ret = -EINVAL;
1761        for (size = 1; size <= 64; size <<= 1) {
1762                drm_mm_init(&mm, 0, size*count);
1763                for (n = 0; n < count; n++) {
1764                        if (!expect_insert(&mm, &nodes[n],
1765                                           size, 0, n,
1766                                           topdown)) {
1767                                pr_err("insert failed, size %u step %d\n", size, n);
1768                                goto out;
1769                        }
1770
1771                        if (drm_mm_hole_follows(&nodes[n])) {
1772                                pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1773                                       n, nodes[n].start, size);
1774                                goto out;
1775                        }
1776
1777                        if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1778                                goto out;
1779                }
1780
1781                if (!assert_continuous(&mm, size))
1782                        goto out;
1783
1784                drm_random_reorder(order, count, &prng);
1785                for_each_prime_number_from(n, 1, min(count, max_prime)) {
1786                        for (m = 0; m < n; m++) {
1787                                node = &nodes[order[(o + m) % count]];
1788                                drm_mm_remove_node(node);
1789                                __set_bit(node_index(node), bitmap);
1790                        }
1791
1792                        for (m = 0; m < n; m++) {
1793                                unsigned int last;
1794
1795                                node = &nodes[order[(o + m) % count]];
1796                                if (!expect_insert(&mm, node,
1797                                                   size, 0, 0,
1798                                                   topdown)) {
1799                                        pr_err("insert failed, step %d/%d\n", m, n);
1800                                        goto out;
1801                                }
1802
1803                                if (drm_mm_hole_follows(node)) {
1804                                        pr_err("hole after topdown insert %d/%d, start=%llx\n",
1805                                               m, n, node->start);
1806                                        goto out;
1807                                }
1808
1809                                last = find_last_bit(bitmap, count);
1810                                if (node_index(node) != last) {
1811                                        pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1812                                               m, n, size, last, node_index(node));
1813                                        goto out;
1814                                }
1815
1816                                __clear_bit(last, bitmap);
1817                        }
1818
1819                        DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1820
1821                        o += n;
1822                }
1823
1824                drm_mm_for_each_node_safe(node, next, &mm)
1825                        drm_mm_remove_node(node);
1826                DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1827                cond_resched();
1828        }
1829
1830        ret = 0;
1831out:
1832        drm_mm_for_each_node_safe(node, next, &mm)
1833                drm_mm_remove_node(node);
1834        drm_mm_takedown(&mm);
1835        kfree(order);
1836err_bitmap:
1837        bitmap_free(bitmap);
1838err_nodes:
1839        vfree(nodes);
1840err:
1841        return ret;
1842}
1843
1844static int igt_bottomup(void *ignored)
1845{
1846        const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1847        DRM_RND_STATE(prng, random_seed);
1848        const unsigned int count = 8192;
1849        unsigned int size;
1850        unsigned long *bitmap;
1851        struct drm_mm mm;
1852        struct drm_mm_node *nodes, *node, *next;
1853        unsigned int *order, n, m, o = 0;
1854        int ret;
1855
1856        /* Like igt_topdown, but instead of searching for the last hole,
1857         * we search for the first.
1858         */
1859
1860        ret = -ENOMEM;
1861        nodes = vzalloc(array_size(count, sizeof(*nodes)));
1862        if (!nodes)
1863                goto err;
1864
1865        bitmap = bitmap_zalloc(count, GFP_KERNEL);
1866        if (!bitmap)
1867                goto err_nodes;
1868
1869        order = drm_random_order(count, &prng);
1870        if (!order)
1871                goto err_bitmap;
1872
1873        ret = -EINVAL;
1874        for (size = 1; size <= 64; size <<= 1) {
1875                drm_mm_init(&mm, 0, size*count);
1876                for (n = 0; n < count; n++) {
1877                        if (!expect_insert(&mm, &nodes[n],
1878                                           size, 0, n,
1879                                           bottomup)) {
1880                                pr_err("bottomup insert failed, size %u step %d\n", size, n);
1881                                goto out;
1882                        }
1883
1884                        if (!assert_one_hole(&mm, size*(n + 1), size*count))
1885                                goto out;
1886                }
1887
1888                if (!assert_continuous(&mm, size))
1889                        goto out;
1890
1891                drm_random_reorder(order, count, &prng);
1892                for_each_prime_number_from(n, 1, min(count, max_prime)) {
1893                        for (m = 0; m < n; m++) {
1894                                node = &nodes[order[(o + m) % count]];
1895                                drm_mm_remove_node(node);
1896                                __set_bit(node_index(node), bitmap);
1897                        }
1898
1899                        for (m = 0; m < n; m++) {
1900                                unsigned int first;
1901
1902                                node = &nodes[order[(o + m) % count]];
1903                                if (!expect_insert(&mm, node,
1904                                                   size, 0, 0,
1905                                                   bottomup)) {
1906                                        pr_err("insert failed, step %d/%d\n", m, n);
1907                                        goto out;
1908                                }
1909
1910                                first = find_first_bit(bitmap, count);
1911                                if (node_index(node) != first) {
1912                                        pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1913                                               m, n, first, node_index(node));
1914                                        goto out;
1915                                }
1916                                __clear_bit(first, bitmap);
1917                        }
1918
1919                        DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1920
1921                        o += n;
1922                }
1923
1924                drm_mm_for_each_node_safe(node, next, &mm)
1925                        drm_mm_remove_node(node);
1926                DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1927                cond_resched();
1928        }
1929
1930        ret = 0;
1931out:
1932        drm_mm_for_each_node_safe(node, next, &mm)
1933                drm_mm_remove_node(node);
1934        drm_mm_takedown(&mm);
1935        kfree(order);
1936err_bitmap:
1937        bitmap_free(bitmap);
1938err_nodes:
1939        vfree(nodes);
1940err:
1941        return ret;
1942}
1943
1944static int __igt_once(unsigned int mode)
1945{
1946        struct drm_mm mm;
1947        struct drm_mm_node rsvd_lo, rsvd_hi, node;
1948        int err;
1949
1950        drm_mm_init(&mm, 0, 7);
1951
1952        memset(&rsvd_lo, 0, sizeof(rsvd_lo));
1953        rsvd_lo.start = 1;
1954        rsvd_lo.size = 1;
1955        err = drm_mm_reserve_node(&mm, &rsvd_lo);
1956        if (err) {
1957                pr_err("Could not reserve low node\n");
1958                goto err;
1959        }
1960
1961        memset(&rsvd_hi, 0, sizeof(rsvd_hi));
1962        rsvd_hi.start = 5;
1963        rsvd_hi.size = 1;
1964        err = drm_mm_reserve_node(&mm, &rsvd_hi);
1965        if (err) {
1966                pr_err("Could not reserve low node\n");
1967                goto err_lo;
1968        }
1969
1970        if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
1971                pr_err("Expected a hole after lo and high nodes!\n");
1972                err = -EINVAL;
1973                goto err_hi;
1974        }
1975
1976        memset(&node, 0, sizeof(node));
1977        err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
1978        if (err) {
1979                pr_err("Could not insert the node into the available hole!\n");
1980                err = -EINVAL;
1981                goto err_hi;
1982        }
1983
1984        drm_mm_remove_node(&node);
1985err_hi:
1986        drm_mm_remove_node(&rsvd_hi);
1987err_lo:
1988        drm_mm_remove_node(&rsvd_lo);
1989err:
1990        drm_mm_takedown(&mm);
1991        return err;
1992}
1993
1994static int igt_lowest(void *ignored)
1995{
1996        return __igt_once(DRM_MM_INSERT_LOW);
1997}
1998
1999static int igt_highest(void *ignored)
2000{
2001        return __igt_once(DRM_MM_INSERT_HIGH);
2002}
2003
2004static void separate_adjacent_colors(const struct drm_mm_node *node,
2005                                     unsigned long color,
2006                                     u64 *start,
2007                                     u64 *end)
2008{
2009        if (drm_mm_node_allocated(node) && node->color != color)
2010                ++*start;
2011
2012        node = list_next_entry(node, node_list);
2013        if (drm_mm_node_allocated(node) && node->color != color)
2014                --*end;
2015}
2016
2017static bool colors_abutt(const struct drm_mm_node *node)
2018{
2019        if (!drm_mm_hole_follows(node) &&
2020            drm_mm_node_allocated(list_next_entry(node, node_list))) {
2021                pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
2022                       node->color, node->start, node->size,
2023                       list_next_entry(node, node_list)->color,
2024                       list_next_entry(node, node_list)->start,
2025                       list_next_entry(node, node_list)->size);
2026                return true;
2027        }
2028
2029        return false;
2030}
2031
2032static int igt_color(void *ignored)
2033{
2034        const unsigned int count = min(4096u, max_iterations);
2035        const struct insert_mode *mode;
2036        struct drm_mm mm;
2037        struct drm_mm_node *node, *nn;
2038        unsigned int n;
2039        int ret = -EINVAL, err;
2040
2041        /* Color adjustment complicates everything. First we just check
2042         * that when we insert a node we apply any color_adjustment callback.
2043         * The callback we use should ensure that there is a gap between
2044         * any two nodes, and so after each insertion we check that those
2045         * holes are inserted and that they are preserved.
2046         */
2047
2048        drm_mm_init(&mm, 0, U64_MAX);
2049
2050        for (n = 1; n <= count; n++) {
2051                node = kzalloc(sizeof(*node), GFP_KERNEL);
2052                if (!node) {
2053                        ret = -ENOMEM;
2054                        goto out;
2055                }
2056
2057                if (!expect_insert(&mm, node,
2058                                   n, 0, n,
2059                                   &insert_modes[0])) {
2060                        pr_err("insert failed, step %d\n", n);
2061                        kfree(node);
2062                        goto out;
2063                }
2064        }
2065
2066        drm_mm_for_each_node_safe(node, nn, &mm) {
2067                if (node->color != node->size) {
2068                        pr_err("invalid color stored: expected %lld, found %ld\n",
2069                               node->size, node->color);
2070
2071                        goto out;
2072                }
2073
2074                drm_mm_remove_node(node);
2075                kfree(node);
2076        }
2077
2078        /* Now, let's start experimenting with applying a color callback */
2079        mm.color_adjust = separate_adjacent_colors;
2080        for (mode = insert_modes; mode->name; mode++) {
2081                u64 last;
2082
2083                node = kzalloc(sizeof(*node), GFP_KERNEL);
2084                if (!node) {
2085                        ret = -ENOMEM;
2086                        goto out;
2087                }
2088
2089                node->size = 1 + 2*count;
2090                node->color = node->size;
2091
2092                err = drm_mm_reserve_node(&mm, node);
2093                if (err) {
2094                        pr_err("initial reserve failed!\n");
2095                        ret = err;
2096                        goto out;
2097                }
2098
2099                last = node->start + node->size;
2100
2101                for (n = 1; n <= count; n++) {
2102                        int rem;
2103
2104                        node = kzalloc(sizeof(*node), GFP_KERNEL);
2105                        if (!node) {
2106                                ret = -ENOMEM;
2107                                goto out;
2108                        }
2109
2110                        node->start = last;
2111                        node->size = n + count;
2112                        node->color = node->size;
2113
2114                        err = drm_mm_reserve_node(&mm, node);
2115                        if (err != -ENOSPC) {
2116                                pr_err("reserve %d did not report color overlap! err=%d\n",
2117                                       n, err);
2118                                goto out;
2119                        }
2120
2121                        node->start += n + 1;
2122                        rem = misalignment(node, n + count);
2123                        node->start += n + count - rem;
2124
2125                        err = drm_mm_reserve_node(&mm, node);
2126                        if (err) {
2127                                pr_err("reserve %d failed, err=%d\n", n, err);
2128                                ret = err;
2129                                goto out;
2130                        }
2131
2132                        last = node->start + node->size;
2133                }
2134
2135                for (n = 1; n <= count; n++) {
2136                        node = kzalloc(sizeof(*node), GFP_KERNEL);
2137                        if (!node) {
2138                                ret = -ENOMEM;
2139                                goto out;
2140                        }
2141
2142                        if (!expect_insert(&mm, node,
2143                                           n, n, n,
2144                                           mode)) {
2145                                pr_err("%s insert failed, step %d\n",
2146                                       mode->name, n);
2147                                kfree(node);
2148                                goto out;
2149                        }
2150                }
2151
2152                drm_mm_for_each_node_safe(node, nn, &mm) {
2153                        u64 rem;
2154
2155                        if (node->color != node->size) {
2156                                pr_err("%s invalid color stored: expected %lld, found %ld\n",
2157                                       mode->name, node->size, node->color);
2158
2159                                goto out;
2160                        }
2161
2162                        if (colors_abutt(node))
2163                                goto out;
2164
2165                        div64_u64_rem(node->start, node->size, &rem);
2166                        if (rem) {
2167                                pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
2168                                       mode->name, node->start, node->size, rem);
2169                                goto out;
2170                        }
2171
2172                        drm_mm_remove_node(node);
2173                        kfree(node);
2174                }
2175
2176                cond_resched();
2177        }
2178
2179        ret = 0;
2180out:
2181        drm_mm_for_each_node_safe(node, nn, &mm) {
2182                drm_mm_remove_node(node);
2183                kfree(node);
2184        }
2185        drm_mm_takedown(&mm);
2186        return ret;
2187}
2188
2189static int evict_color(struct drm_mm *mm,
2190                       u64 range_start, u64 range_end,
2191                       struct evict_node *nodes,
2192                       unsigned int *order,
2193                       unsigned int count,
2194                       unsigned int size,
2195                       unsigned int alignment,
2196                       unsigned long color,
2197                       const struct insert_mode *mode)
2198{
2199        struct drm_mm_scan scan;
2200        LIST_HEAD(evict_list);
2201        struct evict_node *e;
2202        struct drm_mm_node tmp;
2203        int err;
2204
2205        drm_mm_scan_init_with_range(&scan, mm,
2206                                    size, alignment, color,
2207                                    range_start, range_end,
2208                                    mode->mode);
2209        if (!evict_nodes(&scan,
2210                         nodes, order, count, true,
2211                         &evict_list))
2212                return -EINVAL;
2213
2214        memset(&tmp, 0, sizeof(tmp));
2215        err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2216                                         DRM_MM_INSERT_EVICT);
2217        if (err) {
2218                pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2219                       size, alignment, color, err);
2220                show_scan(&scan);
2221                show_holes(mm, 3);
2222                return err;
2223        }
2224
2225        if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2226                pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2227                       tmp.start, tmp.size, range_start, range_end);
2228                err = -EINVAL;
2229        }
2230
2231        if (colors_abutt(&tmp))
2232                err = -EINVAL;
2233
2234        if (!assert_node(&tmp, mm, size, alignment, color)) {
2235                pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2236                       tmp.size, size,
2237                       alignment, misalignment(&tmp, alignment), tmp.start);
2238                err = -EINVAL;
2239        }
2240
2241        drm_mm_remove_node(&tmp);
2242        if (err)
2243                return err;
2244
2245        list_for_each_entry(e, &evict_list, link) {
2246                err = drm_mm_reserve_node(mm, &e->node);
2247                if (err) {
2248                        pr_err("Failed to reinsert node after eviction: start=%llx\n",
2249                               e->node.start);
2250                        return err;
2251                }
2252        }
2253
2254        cond_resched();
2255        return 0;
2256}
2257
2258static int igt_color_evict(void *ignored)
2259{
2260        DRM_RND_STATE(prng, random_seed);
2261        const unsigned int total_size = min(8192u, max_iterations);
2262        const struct insert_mode *mode;
2263        unsigned long color = 0;
2264        struct drm_mm mm;
2265        struct evict_node *nodes;
2266        struct drm_mm_node *node, *next;
2267        unsigned int *order, n;
2268        int ret, err;
2269
2270        /* Check that the drm_mm_scan also honours color adjustment when
2271         * choosing its victims to create a hole. Our color_adjust does not
2272         * allow two nodes to be placed together without an intervening hole
2273         * enlarging the set of victims that must be evicted.
2274         */
2275
2276        ret = -ENOMEM;
2277        nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2278        if (!nodes)
2279                goto err;
2280
2281        order = drm_random_order(total_size, &prng);
2282        if (!order)
2283                goto err_nodes;
2284
2285        ret = -EINVAL;
2286        drm_mm_init(&mm, 0, 2*total_size - 1);
2287        mm.color_adjust = separate_adjacent_colors;
2288        for (n = 0; n < total_size; n++) {
2289                if (!expect_insert(&mm, &nodes[n].node,
2290                                   1, 0, color++,
2291                                   &insert_modes[0])) {
2292                        pr_err("insert failed, step %d\n", n);
2293                        goto out;
2294                }
2295        }
2296
2297        for (mode = evict_modes; mode->name; mode++) {
2298                for (n = 1; n <= total_size; n <<= 1) {
2299                        drm_random_reorder(order, total_size, &prng);
2300                        err = evict_color(&mm, 0, U64_MAX,
2301                                          nodes, order, total_size,
2302                                          n, 1, color++,
2303                                          mode);
2304                        if (err) {
2305                                pr_err("%s evict_color(size=%u) failed\n",
2306                                       mode->name, n);
2307                                goto out;
2308                        }
2309                }
2310
2311                for (n = 1; n < total_size; n <<= 1) {
2312                        drm_random_reorder(order, total_size, &prng);
2313                        err = evict_color(&mm, 0, U64_MAX,
2314                                          nodes, order, total_size,
2315                                          total_size/2, n, color++,
2316                                          mode);
2317                        if (err) {
2318                                pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2319                                       mode->name, total_size/2, n);
2320                                goto out;
2321                        }
2322                }
2323
2324                for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2325                        unsigned int nsize = (total_size - n + 1) / 2;
2326
2327                        DRM_MM_BUG_ON(!nsize);
2328
2329                        drm_random_reorder(order, total_size, &prng);
2330                        err = evict_color(&mm, 0, U64_MAX,
2331                                          nodes, order, total_size,
2332                                          nsize, n, color++,
2333                                          mode);
2334                        if (err) {
2335                                pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2336                                       mode->name, nsize, n);
2337                                goto out;
2338                        }
2339                }
2340
2341                cond_resched();
2342        }
2343
2344        ret = 0;
2345out:
2346        if (ret)
2347                show_mm(&mm);
2348        drm_mm_for_each_node_safe(node, next, &mm)
2349                drm_mm_remove_node(node);
2350        drm_mm_takedown(&mm);
2351        kfree(order);
2352err_nodes:
2353        vfree(nodes);
2354err:
2355        return ret;
2356}
2357
2358static int igt_color_evict_range(void *ignored)
2359{
2360        DRM_RND_STATE(prng, random_seed);
2361        const unsigned int total_size = 8192;
2362        const unsigned int range_size = total_size / 2;
2363        const unsigned int range_start = total_size / 4;
2364        const unsigned int range_end = range_start + range_size;
2365        const struct insert_mode *mode;
2366        unsigned long color = 0;
2367        struct drm_mm mm;
2368        struct evict_node *nodes;
2369        struct drm_mm_node *node, *next;
2370        unsigned int *order, n;
2371        int ret, err;
2372
2373        /* Like igt_color_evict(), but limited to small portion of the full
2374         * drm_mm range.
2375         */
2376
2377        ret = -ENOMEM;
2378        nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2379        if (!nodes)
2380                goto err;
2381
2382        order = drm_random_order(total_size, &prng);
2383        if (!order)
2384                goto err_nodes;
2385
2386        ret = -EINVAL;
2387        drm_mm_init(&mm, 0, 2*total_size - 1);
2388        mm.color_adjust = separate_adjacent_colors;
2389        for (n = 0; n < total_size; n++) {
2390                if (!expect_insert(&mm, &nodes[n].node,
2391                                   1, 0, color++,
2392                                   &insert_modes[0])) {
2393                        pr_err("insert failed, step %d\n", n);
2394                        goto out;
2395                }
2396        }
2397
2398        for (mode = evict_modes; mode->name; mode++) {
2399                for (n = 1; n <= range_size; n <<= 1) {
2400                        drm_random_reorder(order, range_size, &prng);
2401                        err = evict_color(&mm, range_start, range_end,
2402                                          nodes, order, total_size,
2403                                          n, 1, color++,
2404                                          mode);
2405                        if (err) {
2406                                pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2407                                       mode->name, n, range_start, range_end);
2408                                goto out;
2409                        }
2410                }
2411
2412                for (n = 1; n < range_size; n <<= 1) {
2413                        drm_random_reorder(order, total_size, &prng);
2414                        err = evict_color(&mm, range_start, range_end,
2415                                          nodes, order, total_size,
2416                                          range_size/2, n, color++,
2417                                          mode);
2418                        if (err) {
2419                                pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2420                                       mode->name, total_size/2, n, range_start, range_end);
2421                                goto out;
2422                        }
2423                }
2424
2425                for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2426                        unsigned int nsize = (range_size - n + 1) / 2;
2427
2428                        DRM_MM_BUG_ON(!nsize);
2429
2430                        drm_random_reorder(order, total_size, &prng);
2431                        err = evict_color(&mm, range_start, range_end,
2432                                          nodes, order, total_size,
2433                                          nsize, n, color++,
2434                                          mode);
2435                        if (err) {
2436                                pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2437                                       mode->name, nsize, n, range_start, range_end);
2438                                goto out;
2439                        }
2440                }
2441
2442                cond_resched();
2443        }
2444
2445        ret = 0;
2446out:
2447        if (ret)
2448                show_mm(&mm);
2449        drm_mm_for_each_node_safe(node, next, &mm)
2450                drm_mm_remove_node(node);
2451        drm_mm_takedown(&mm);
2452        kfree(order);
2453err_nodes:
2454        vfree(nodes);
2455err:
2456        return ret;
2457}
2458
2459#include "drm_selftest.c"
2460
2461static int __init test_drm_mm_init(void)
2462{
2463        int err;
2464
2465        while (!random_seed)
2466                random_seed = get_random_int();
2467
2468        pr_info("Testing DRM range manager (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2469                random_seed, max_iterations, max_prime);
2470        err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2471
2472        return err > 0 ? 0 : err;
2473}
2474
2475static void __exit test_drm_mm_exit(void)
2476{
2477}
2478
2479module_init(test_drm_mm_init);
2480module_exit(test_drm_mm_exit);
2481
2482module_param(random_seed, uint, 0400);
2483module_param(max_iterations, uint, 0400);
2484module_param(max_prime, uint, 0400);
2485
2486MODULE_AUTHOR("Intel Corporation");
2487MODULE_LICENSE("GPL");
2488