linux/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/list_sort.h>
  26#include <linux/prime_numbers.h>
  27
  28#include "gem/i915_gem_context.h"
  29#include "gem/selftests/mock_context.h"
  30#include "gt/intel_context.h"
  31
  32#include "i915_random.h"
  33#include "i915_selftest.h"
  34
  35#include "mock_drm.h"
  36#include "mock_gem_device.h"
  37#include "mock_gtt.h"
  38#include "igt_flush_test.h"
  39
  40static void cleanup_freed_objects(struct drm_i915_private *i915)
  41{
  42        i915_gem_drain_freed_objects(i915);
  43}
  44
  45static void fake_free_pages(struct drm_i915_gem_object *obj,
  46                            struct sg_table *pages)
  47{
  48        sg_free_table(pages);
  49        kfree(pages);
  50}
  51
  52static int fake_get_pages(struct drm_i915_gem_object *obj)
  53{
  54#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
  55#define PFN_BIAS 0x1000
  56        struct sg_table *pages;
  57        struct scatterlist *sg;
  58        unsigned int sg_page_sizes;
  59        typeof(obj->base.size) rem;
  60
  61        pages = kmalloc(sizeof(*pages), GFP);
  62        if (!pages)
  63                return -ENOMEM;
  64
  65        rem = round_up(obj->base.size, BIT(31)) >> 31;
  66        if (sg_alloc_table(pages, rem, GFP)) {
  67                kfree(pages);
  68                return -ENOMEM;
  69        }
  70
  71        sg_page_sizes = 0;
  72        rem = obj->base.size;
  73        for (sg = pages->sgl; sg; sg = sg_next(sg)) {
  74                unsigned long len = min_t(typeof(rem), rem, BIT(31));
  75
  76                GEM_BUG_ON(!len);
  77                sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
  78                sg_dma_address(sg) = page_to_phys(sg_page(sg));
  79                sg_dma_len(sg) = len;
  80                sg_page_sizes |= len;
  81
  82                rem -= len;
  83        }
  84        GEM_BUG_ON(rem);
  85
  86        __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
  87
  88        return 0;
  89#undef GFP
  90}
  91
  92static void fake_put_pages(struct drm_i915_gem_object *obj,
  93                           struct sg_table *pages)
  94{
  95        fake_free_pages(obj, pages);
  96        obj->mm.dirty = false;
  97}
  98
  99static const struct drm_i915_gem_object_ops fake_ops = {
 100        .name = "fake-gem",
 101        .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
 102        .get_pages = fake_get_pages,
 103        .put_pages = fake_put_pages,
 104};
 105
 106static struct drm_i915_gem_object *
 107fake_dma_object(struct drm_i915_private *i915, u64 size)
 108{
 109        static struct lock_class_key lock_class;
 110        struct drm_i915_gem_object *obj;
 111
 112        GEM_BUG_ON(!size);
 113        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
 114
 115        if (overflows_type(size, obj->base.size))
 116                return ERR_PTR(-E2BIG);
 117
 118        obj = i915_gem_object_alloc();
 119        if (!obj)
 120                goto err;
 121
 122        drm_gem_private_object_init(&i915->drm, &obj->base, size);
 123        i915_gem_object_init(obj, &fake_ops, &lock_class);
 124
 125        i915_gem_object_set_volatile(obj);
 126
 127        obj->write_domain = I915_GEM_DOMAIN_CPU;
 128        obj->read_domains = I915_GEM_DOMAIN_CPU;
 129        obj->cache_level = I915_CACHE_NONE;
 130
 131        /* Preallocate the "backing storage" */
 132        if (i915_gem_object_pin_pages(obj))
 133                goto err_obj;
 134
 135        i915_gem_object_unpin_pages(obj);
 136        return obj;
 137
 138err_obj:
 139        i915_gem_object_put(obj);
 140err:
 141        return ERR_PTR(-ENOMEM);
 142}
 143
 144static int igt_ppgtt_alloc(void *arg)
 145{
 146        struct drm_i915_private *dev_priv = arg;
 147        struct i915_ppgtt *ppgtt;
 148        u64 size, last, limit;
 149        int err = 0;
 150
 151        /* Allocate a ppggt and try to fill the entire range */
 152
 153        if (!HAS_PPGTT(dev_priv))
 154                return 0;
 155
 156        ppgtt = i915_ppgtt_create(&dev_priv->gt);
 157        if (IS_ERR(ppgtt))
 158                return PTR_ERR(ppgtt);
 159
 160        if (!ppgtt->vm.allocate_va_range)
 161                goto err_ppgtt_cleanup;
 162
 163        /*
 164         * While we only allocate the page tables here and so we could
 165         * address a much larger GTT than we could actually fit into
 166         * RAM, a practical limit is the amount of physical pages in the system.
 167         * This should ensure that we do not run into the oomkiller during
 168         * the test and take down the machine wilfully.
 169         */
 170        limit = totalram_pages() << PAGE_SHIFT;
 171        limit = min(ppgtt->vm.total, limit);
 172
 173        /* Check we can allocate the entire range */
 174        for (size = 4096; size <= limit; size <<= 2) {
 175                struct i915_vm_pt_stash stash = {};
 176
 177                err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
 178                if (err)
 179                        goto err_ppgtt_cleanup;
 180
 181                err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
 182                if (err) {
 183                        i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 184                        goto err_ppgtt_cleanup;
 185                }
 186
 187                ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
 188                cond_resched();
 189
 190                ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
 191
 192                i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 193        }
 194
 195        /* Check we can incrementally allocate the entire range */
 196        for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
 197                struct i915_vm_pt_stash stash = {};
 198
 199                err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
 200                if (err)
 201                        goto err_ppgtt_cleanup;
 202
 203                err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
 204                if (err) {
 205                        i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 206                        goto err_ppgtt_cleanup;
 207                }
 208
 209                ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
 210                                            last, size - last);
 211                cond_resched();
 212
 213                i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 214        }
 215
 216err_ppgtt_cleanup:
 217        i915_vm_put(&ppgtt->vm);
 218        return err;
 219}
 220
 221static int lowlevel_hole(struct i915_address_space *vm,
 222                         u64 hole_start, u64 hole_end,
 223                         unsigned long end_time)
 224{
 225        I915_RND_STATE(seed_prng);
 226        struct i915_vma *mock_vma;
 227        unsigned int size;
 228
 229        mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
 230        if (!mock_vma)
 231                return -ENOMEM;
 232
 233        /* Keep creating larger objects until one cannot fit into the hole */
 234        for (size = 12; (hole_end - hole_start) >> size; size++) {
 235                I915_RND_SUBSTATE(prng, seed_prng);
 236                struct drm_i915_gem_object *obj;
 237                unsigned int *order, count, n;
 238                u64 hole_size;
 239
 240                hole_size = (hole_end - hole_start) >> size;
 241                if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
 242                        hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
 243                count = hole_size >> 1;
 244                if (!count) {
 245                        pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
 246                                 __func__, hole_start, hole_end, size, hole_size);
 247                        break;
 248                }
 249
 250                do {
 251                        order = i915_random_order(count, &prng);
 252                        if (order)
 253                                break;
 254                } while (count >>= 1);
 255                if (!count) {
 256                        kfree(mock_vma);
 257                        return -ENOMEM;
 258                }
 259                GEM_BUG_ON(!order);
 260
 261                GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
 262                GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
 263
 264                /* Ignore allocation failures (i.e. don't report them as
 265                 * a test failure) as we are purposefully allocating very
 266                 * large objects without checking that we have sufficient
 267                 * memory. We expect to hit -ENOMEM.
 268                 */
 269
 270                obj = fake_dma_object(vm->i915, BIT_ULL(size));
 271                if (IS_ERR(obj)) {
 272                        kfree(order);
 273                        break;
 274                }
 275
 276                GEM_BUG_ON(obj->base.size != BIT_ULL(size));
 277
 278                if (i915_gem_object_pin_pages(obj)) {
 279                        i915_gem_object_put(obj);
 280                        kfree(order);
 281                        break;
 282                }
 283
 284                for (n = 0; n < count; n++) {
 285                        u64 addr = hole_start + order[n] * BIT_ULL(size);
 286                        intel_wakeref_t wakeref;
 287
 288                        GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 289
 290                        if (igt_timeout(end_time,
 291                                        "%s timed out before %d/%d\n",
 292                                        __func__, n, count)) {
 293                                hole_end = hole_start; /* quit */
 294                                break;
 295                        }
 296
 297                        if (vm->allocate_va_range) {
 298                                struct i915_vm_pt_stash stash = {};
 299
 300                                if (i915_vm_alloc_pt_stash(vm, &stash,
 301                                                           BIT_ULL(size)))
 302                                        break;
 303
 304                                if (i915_vm_pin_pt_stash(vm, &stash)) {
 305                                        i915_vm_free_pt_stash(vm, &stash);
 306                                        break;
 307                                }
 308
 309                                vm->allocate_va_range(vm, &stash,
 310                                                      addr, BIT_ULL(size));
 311
 312                                i915_vm_free_pt_stash(vm, &stash);
 313                        }
 314
 315                        mock_vma->pages = obj->mm.pages;
 316                        mock_vma->node.size = BIT_ULL(size);
 317                        mock_vma->node.start = addr;
 318
 319                        with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
 320                                vm->insert_entries(vm, mock_vma,
 321                                                   I915_CACHE_NONE, 0);
 322                }
 323                count = n;
 324
 325                i915_random_reorder(order, count, &prng);
 326                for (n = 0; n < count; n++) {
 327                        u64 addr = hole_start + order[n] * BIT_ULL(size);
 328                        intel_wakeref_t wakeref;
 329
 330                        GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 331                        with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
 332                                vm->clear_range(vm, addr, BIT_ULL(size));
 333                }
 334
 335                i915_gem_object_unpin_pages(obj);
 336                i915_gem_object_put(obj);
 337
 338                kfree(order);
 339
 340                cleanup_freed_objects(vm->i915);
 341        }
 342
 343        kfree(mock_vma);
 344        return 0;
 345}
 346
 347static void close_object_list(struct list_head *objects,
 348                              struct i915_address_space *vm)
 349{
 350        struct drm_i915_gem_object *obj, *on;
 351        int ignored;
 352
 353        list_for_each_entry_safe(obj, on, objects, st_link) {
 354                struct i915_vma *vma;
 355
 356                vma = i915_vma_instance(obj, vm, NULL);
 357                if (!IS_ERR(vma))
 358                        ignored = i915_vma_unbind(vma);
 359
 360                list_del(&obj->st_link);
 361                i915_gem_object_put(obj);
 362        }
 363}
 364
 365static int fill_hole(struct i915_address_space *vm,
 366                     u64 hole_start, u64 hole_end,
 367                     unsigned long end_time)
 368{
 369        const u64 hole_size = hole_end - hole_start;
 370        struct drm_i915_gem_object *obj;
 371        const unsigned long max_pages =
 372                min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
 373        const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
 374        unsigned long npages, prime, flags;
 375        struct i915_vma *vma;
 376        LIST_HEAD(objects);
 377        int err;
 378
 379        /* Try binding many VMA working inwards from either edge */
 380
 381        flags = PIN_OFFSET_FIXED | PIN_USER;
 382        if (i915_is_ggtt(vm))
 383                flags |= PIN_GLOBAL;
 384
 385        for_each_prime_number_from(prime, 2, max_step) {
 386                for (npages = 1; npages <= max_pages; npages *= prime) {
 387                        const u64 full_size = npages << PAGE_SHIFT;
 388                        const struct {
 389                                const char *name;
 390                                u64 offset;
 391                                int step;
 392                        } phases[] = {
 393                                { "top-down", hole_end, -1, },
 394                                { "bottom-up", hole_start, 1, },
 395                                { }
 396                        }, *p;
 397
 398                        obj = fake_dma_object(vm->i915, full_size);
 399                        if (IS_ERR(obj))
 400                                break;
 401
 402                        list_add(&obj->st_link, &objects);
 403
 404                        /* Align differing sized objects against the edges, and
 405                         * check we don't walk off into the void when binding
 406                         * them into the GTT.
 407                         */
 408                        for (p = phases; p->name; p++) {
 409                                u64 offset;
 410
 411                                offset = p->offset;
 412                                list_for_each_entry(obj, &objects, st_link) {
 413                                        vma = i915_vma_instance(obj, vm, NULL);
 414                                        if (IS_ERR(vma))
 415                                                continue;
 416
 417                                        if (p->step < 0) {
 418                                                if (offset < hole_start + obj->base.size)
 419                                                        break;
 420                                                offset -= obj->base.size;
 421                                        }
 422
 423                                        err = i915_vma_pin(vma, 0, 0, offset | flags);
 424                                        if (err) {
 425                                                pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
 426                                                       __func__, p->name, err, npages, prime, offset);
 427                                                goto err;
 428                                        }
 429
 430                                        if (!drm_mm_node_allocated(&vma->node) ||
 431                                            i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 432                                                pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 433                                                       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 434                                                       offset);
 435                                                err = -EINVAL;
 436                                                goto err;
 437                                        }
 438
 439                                        i915_vma_unpin(vma);
 440
 441                                        if (p->step > 0) {
 442                                                if (offset + obj->base.size > hole_end)
 443                                                        break;
 444                                                offset += obj->base.size;
 445                                        }
 446                                }
 447
 448                                offset = p->offset;
 449                                list_for_each_entry(obj, &objects, st_link) {
 450                                        vma = i915_vma_instance(obj, vm, NULL);
 451                                        if (IS_ERR(vma))
 452                                                continue;
 453
 454                                        if (p->step < 0) {
 455                                                if (offset < hole_start + obj->base.size)
 456                                                        break;
 457                                                offset -= obj->base.size;
 458                                        }
 459
 460                                        if (!drm_mm_node_allocated(&vma->node) ||
 461                                            i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 462                                                pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
 463                                                       __func__, p->name, vma->node.start, vma->node.size,
 464                                                       offset);
 465                                                err = -EINVAL;
 466                                                goto err;
 467                                        }
 468
 469                                        err = i915_vma_unbind(vma);
 470                                        if (err) {
 471                                                pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
 472                                                       __func__, p->name, vma->node.start, vma->node.size,
 473                                                       err);
 474                                                goto err;
 475                                        }
 476
 477                                        if (p->step > 0) {
 478                                                if (offset + obj->base.size > hole_end)
 479                                                        break;
 480                                                offset += obj->base.size;
 481                                        }
 482                                }
 483
 484                                offset = p->offset;
 485                                list_for_each_entry_reverse(obj, &objects, st_link) {
 486                                        vma = i915_vma_instance(obj, vm, NULL);
 487                                        if (IS_ERR(vma))
 488                                                continue;
 489
 490                                        if (p->step < 0) {
 491                                                if (offset < hole_start + obj->base.size)
 492                                                        break;
 493                                                offset -= obj->base.size;
 494                                        }
 495
 496                                        err = i915_vma_pin(vma, 0, 0, offset | flags);
 497                                        if (err) {
 498                                                pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
 499                                                       __func__, p->name, err, npages, prime, offset);
 500                                                goto err;
 501                                        }
 502
 503                                        if (!drm_mm_node_allocated(&vma->node) ||
 504                                            i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 505                                                pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 506                                                       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 507                                                       offset);
 508                                                err = -EINVAL;
 509                                                goto err;
 510                                        }
 511
 512                                        i915_vma_unpin(vma);
 513
 514                                        if (p->step > 0) {
 515                                                if (offset + obj->base.size > hole_end)
 516                                                        break;
 517                                                offset += obj->base.size;
 518                                        }
 519                                }
 520
 521                                offset = p->offset;
 522                                list_for_each_entry_reverse(obj, &objects, st_link) {
 523                                        vma = i915_vma_instance(obj, vm, NULL);
 524                                        if (IS_ERR(vma))
 525                                                continue;
 526
 527                                        if (p->step < 0) {
 528                                                if (offset < hole_start + obj->base.size)
 529                                                        break;
 530                                                offset -= obj->base.size;
 531                                        }
 532
 533                                        if (!drm_mm_node_allocated(&vma->node) ||
 534                                            i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 535                                                pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 536                                                       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 537                                                       offset);
 538                                                err = -EINVAL;
 539                                                goto err;
 540                                        }
 541
 542                                        err = i915_vma_unbind(vma);
 543                                        if (err) {
 544                                                pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
 545                                                       __func__, p->name, vma->node.start, vma->node.size,
 546                                                       err);
 547                                                goto err;
 548                                        }
 549
 550                                        if (p->step > 0) {
 551                                                if (offset + obj->base.size > hole_end)
 552                                                        break;
 553                                                offset += obj->base.size;
 554                                        }
 555                                }
 556                        }
 557
 558                        if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
 559                                        __func__, npages, prime)) {
 560                                err = -EINTR;
 561                                goto err;
 562                        }
 563                }
 564
 565                close_object_list(&objects, vm);
 566                cleanup_freed_objects(vm->i915);
 567        }
 568
 569        return 0;
 570
 571err:
 572        close_object_list(&objects, vm);
 573        return err;
 574}
 575
 576static int walk_hole(struct i915_address_space *vm,
 577                     u64 hole_start, u64 hole_end,
 578                     unsigned long end_time)
 579{
 580        const u64 hole_size = hole_end - hole_start;
 581        const unsigned long max_pages =
 582                min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
 583        unsigned long flags;
 584        u64 size;
 585
 586        /* Try binding a single VMA in different positions within the hole */
 587
 588        flags = PIN_OFFSET_FIXED | PIN_USER;
 589        if (i915_is_ggtt(vm))
 590                flags |= PIN_GLOBAL;
 591
 592        for_each_prime_number_from(size, 1, max_pages) {
 593                struct drm_i915_gem_object *obj;
 594                struct i915_vma *vma;
 595                u64 addr;
 596                int err = 0;
 597
 598                obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
 599                if (IS_ERR(obj))
 600                        break;
 601
 602                vma = i915_vma_instance(obj, vm, NULL);
 603                if (IS_ERR(vma)) {
 604                        err = PTR_ERR(vma);
 605                        goto err_put;
 606                }
 607
 608                for (addr = hole_start;
 609                     addr + obj->base.size < hole_end;
 610                     addr += obj->base.size) {
 611                        err = i915_vma_pin(vma, 0, 0, addr | flags);
 612                        if (err) {
 613                                pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
 614                                       __func__, addr, vma->size,
 615                                       hole_start, hole_end, err);
 616                                goto err_put;
 617                        }
 618                        i915_vma_unpin(vma);
 619
 620                        if (!drm_mm_node_allocated(&vma->node) ||
 621                            i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 622                                pr_err("%s incorrect at %llx + %llx\n",
 623                                       __func__, addr, vma->size);
 624                                err = -EINVAL;
 625                                goto err_put;
 626                        }
 627
 628                        err = i915_vma_unbind(vma);
 629                        if (err) {
 630                                pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
 631                                       __func__, addr, vma->size, err);
 632                                goto err_put;
 633                        }
 634
 635                        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 636
 637                        if (igt_timeout(end_time,
 638                                        "%s timed out at %llx\n",
 639                                        __func__, addr)) {
 640                                err = -EINTR;
 641                                goto err_put;
 642                        }
 643                }
 644
 645err_put:
 646                i915_gem_object_put(obj);
 647                if (err)
 648                        return err;
 649
 650                cleanup_freed_objects(vm->i915);
 651        }
 652
 653        return 0;
 654}
 655
 656static int pot_hole(struct i915_address_space *vm,
 657                    u64 hole_start, u64 hole_end,
 658                    unsigned long end_time)
 659{
 660        struct drm_i915_gem_object *obj;
 661        struct i915_vma *vma;
 662        unsigned long flags;
 663        unsigned int pot;
 664        int err = 0;
 665
 666        flags = PIN_OFFSET_FIXED | PIN_USER;
 667        if (i915_is_ggtt(vm))
 668                flags |= PIN_GLOBAL;
 669
 670        obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
 671        if (IS_ERR(obj))
 672                return PTR_ERR(obj);
 673
 674        vma = i915_vma_instance(obj, vm, NULL);
 675        if (IS_ERR(vma)) {
 676                err = PTR_ERR(vma);
 677                goto err_obj;
 678        }
 679
 680        /* Insert a pair of pages across every pot boundary within the hole */
 681        for (pot = fls64(hole_end - 1) - 1;
 682             pot > ilog2(2 * I915_GTT_PAGE_SIZE);
 683             pot--) {
 684                u64 step = BIT_ULL(pot);
 685                u64 addr;
 686
 687                for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
 688                     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
 689                     addr += step) {
 690                        err = i915_vma_pin(vma, 0, 0, addr | flags);
 691                        if (err) {
 692                                pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
 693                                       __func__,
 694                                       addr,
 695                                       hole_start, hole_end,
 696                                       err);
 697                                goto err_obj;
 698                        }
 699
 700                        if (!drm_mm_node_allocated(&vma->node) ||
 701                            i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 702                                pr_err("%s incorrect at %llx + %llx\n",
 703                                       __func__, addr, vma->size);
 704                                i915_vma_unpin(vma);
 705                                err = i915_vma_unbind(vma);
 706                                err = -EINVAL;
 707                                goto err_obj;
 708                        }
 709
 710                        i915_vma_unpin(vma);
 711                        err = i915_vma_unbind(vma);
 712                        GEM_BUG_ON(err);
 713                }
 714
 715                if (igt_timeout(end_time,
 716                                "%s timed out after %d/%d\n",
 717                                __func__, pot, fls64(hole_end - 1) - 1)) {
 718                        err = -EINTR;
 719                        goto err_obj;
 720                }
 721        }
 722
 723err_obj:
 724        i915_gem_object_put(obj);
 725        return err;
 726}
 727
 728static int drunk_hole(struct i915_address_space *vm,
 729                      u64 hole_start, u64 hole_end,
 730                      unsigned long end_time)
 731{
 732        I915_RND_STATE(prng);
 733        unsigned int size;
 734        unsigned long flags;
 735
 736        flags = PIN_OFFSET_FIXED | PIN_USER;
 737        if (i915_is_ggtt(vm))
 738                flags |= PIN_GLOBAL;
 739
 740        /* Keep creating larger objects until one cannot fit into the hole */
 741        for (size = 12; (hole_end - hole_start) >> size; size++) {
 742                struct drm_i915_gem_object *obj;
 743                unsigned int *order, count, n;
 744                struct i915_vma *vma;
 745                u64 hole_size;
 746                int err = -ENODEV;
 747
 748                hole_size = (hole_end - hole_start) >> size;
 749                if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
 750                        hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
 751                count = hole_size >> 1;
 752                if (!count) {
 753                        pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
 754                                 __func__, hole_start, hole_end, size, hole_size);
 755                        break;
 756                }
 757
 758                do {
 759                        order = i915_random_order(count, &prng);
 760                        if (order)
 761                                break;
 762                } while (count >>= 1);
 763                if (!count)
 764                        return -ENOMEM;
 765                GEM_BUG_ON(!order);
 766
 767                /* Ignore allocation failures (i.e. don't report them as
 768                 * a test failure) as we are purposefully allocating very
 769                 * large objects without checking that we have sufficient
 770                 * memory. We expect to hit -ENOMEM.
 771                 */
 772
 773                obj = fake_dma_object(vm->i915, BIT_ULL(size));
 774                if (IS_ERR(obj)) {
 775                        kfree(order);
 776                        break;
 777                }
 778
 779                vma = i915_vma_instance(obj, vm, NULL);
 780                if (IS_ERR(vma)) {
 781                        err = PTR_ERR(vma);
 782                        goto err_obj;
 783                }
 784
 785                GEM_BUG_ON(vma->size != BIT_ULL(size));
 786
 787                for (n = 0; n < count; n++) {
 788                        u64 addr = hole_start + order[n] * BIT_ULL(size);
 789
 790                        err = i915_vma_pin(vma, 0, 0, addr | flags);
 791                        if (err) {
 792                                pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
 793                                       __func__,
 794                                       addr, BIT_ULL(size),
 795                                       hole_start, hole_end,
 796                                       err);
 797                                goto err_obj;
 798                        }
 799
 800                        if (!drm_mm_node_allocated(&vma->node) ||
 801                            i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 802                                pr_err("%s incorrect at %llx + %llx\n",
 803                                       __func__, addr, BIT_ULL(size));
 804                                i915_vma_unpin(vma);
 805                                err = i915_vma_unbind(vma);
 806                                err = -EINVAL;
 807                                goto err_obj;
 808                        }
 809
 810                        i915_vma_unpin(vma);
 811                        err = i915_vma_unbind(vma);
 812                        GEM_BUG_ON(err);
 813
 814                        if (igt_timeout(end_time,
 815                                        "%s timed out after %d/%d\n",
 816                                        __func__, n, count)) {
 817                                err = -EINTR;
 818                                goto err_obj;
 819                        }
 820                }
 821
 822err_obj:
 823                i915_gem_object_put(obj);
 824                kfree(order);
 825                if (err)
 826                        return err;
 827
 828                cleanup_freed_objects(vm->i915);
 829        }
 830
 831        return 0;
 832}
 833
 834static int __shrink_hole(struct i915_address_space *vm,
 835                         u64 hole_start, u64 hole_end,
 836                         unsigned long end_time)
 837{
 838        struct drm_i915_gem_object *obj;
 839        unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
 840        unsigned int order = 12;
 841        LIST_HEAD(objects);
 842        int err = 0;
 843        u64 addr;
 844
 845        /* Keep creating larger objects until one cannot fit into the hole */
 846        for (addr = hole_start; addr < hole_end; ) {
 847                struct i915_vma *vma;
 848                u64 size = BIT_ULL(order++);
 849
 850                size = min(size, hole_end - addr);
 851                obj = fake_dma_object(vm->i915, size);
 852                if (IS_ERR(obj)) {
 853                        err = PTR_ERR(obj);
 854                        break;
 855                }
 856
 857                list_add(&obj->st_link, &objects);
 858
 859                vma = i915_vma_instance(obj, vm, NULL);
 860                if (IS_ERR(vma)) {
 861                        err = PTR_ERR(vma);
 862                        break;
 863                }
 864
 865                GEM_BUG_ON(vma->size != size);
 866
 867                err = i915_vma_pin(vma, 0, 0, addr | flags);
 868                if (err) {
 869                        pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
 870                               __func__, addr, size, hole_start, hole_end, err);
 871                        break;
 872                }
 873
 874                if (!drm_mm_node_allocated(&vma->node) ||
 875                    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 876                        pr_err("%s incorrect at %llx + %llx\n",
 877                               __func__, addr, size);
 878                        i915_vma_unpin(vma);
 879                        err = i915_vma_unbind(vma);
 880                        err = -EINVAL;
 881                        break;
 882                }
 883
 884                i915_vma_unpin(vma);
 885                addr += size;
 886
 887                /*
 888                 * Since we are injecting allocation faults at random intervals,
 889                 * wait for this allocation to complete before we change the
 890                 * faultinjection.
 891                 */
 892                err = i915_vma_sync(vma);
 893                if (err)
 894                        break;
 895
 896                if (igt_timeout(end_time,
 897                                "%s timed out at ofset %llx [%llx - %llx]\n",
 898                                __func__, addr, hole_start, hole_end)) {
 899                        err = -EINTR;
 900                        break;
 901                }
 902        }
 903
 904        close_object_list(&objects, vm);
 905        cleanup_freed_objects(vm->i915);
 906        return err;
 907}
 908
 909static int shrink_hole(struct i915_address_space *vm,
 910                       u64 hole_start, u64 hole_end,
 911                       unsigned long end_time)
 912{
 913        unsigned long prime;
 914        int err;
 915
 916        vm->fault_attr.probability = 999;
 917        atomic_set(&vm->fault_attr.times, -1);
 918
 919        for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
 920                vm->fault_attr.interval = prime;
 921                err = __shrink_hole(vm, hole_start, hole_end, end_time);
 922                if (err)
 923                        break;
 924        }
 925
 926        memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
 927
 928        return err;
 929}
 930
 931static int shrink_boom(struct i915_address_space *vm,
 932                       u64 hole_start, u64 hole_end,
 933                       unsigned long end_time)
 934{
 935        unsigned int sizes[] = { SZ_2M, SZ_1G };
 936        struct drm_i915_gem_object *purge;
 937        struct drm_i915_gem_object *explode;
 938        int err;
 939        int i;
 940
 941        /*
 942         * Catch the case which shrink_hole seems to miss. The setup here
 943         * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
 944         * ensuring that all vma assiocated with the respective pd/pdp are
 945         * unpinned at the time.
 946         */
 947
 948        for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
 949                unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 950                unsigned int size = sizes[i];
 951                struct i915_vma *vma;
 952
 953                purge = fake_dma_object(vm->i915, size);
 954                if (IS_ERR(purge))
 955                        return PTR_ERR(purge);
 956
 957                vma = i915_vma_instance(purge, vm, NULL);
 958                if (IS_ERR(vma)) {
 959                        err = PTR_ERR(vma);
 960                        goto err_purge;
 961                }
 962
 963                err = i915_vma_pin(vma, 0, 0, flags);
 964                if (err)
 965                        goto err_purge;
 966
 967                /* Should now be ripe for purging */
 968                i915_vma_unpin(vma);
 969
 970                explode = fake_dma_object(vm->i915, size);
 971                if (IS_ERR(explode)) {
 972                        err = PTR_ERR(explode);
 973                        goto err_purge;
 974                }
 975
 976                vm->fault_attr.probability = 100;
 977                vm->fault_attr.interval = 1;
 978                atomic_set(&vm->fault_attr.times, -1);
 979
 980                vma = i915_vma_instance(explode, vm, NULL);
 981                if (IS_ERR(vma)) {
 982                        err = PTR_ERR(vma);
 983                        goto err_explode;
 984                }
 985
 986                err = i915_vma_pin(vma, 0, 0, flags | size);
 987                if (err)
 988                        goto err_explode;
 989
 990                i915_vma_unpin(vma);
 991
 992                i915_gem_object_put(purge);
 993                i915_gem_object_put(explode);
 994
 995                memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
 996                cleanup_freed_objects(vm->i915);
 997        }
 998
 999        return 0;
1000
1001err_explode:
1002        i915_gem_object_put(explode);
1003err_purge:
1004        i915_gem_object_put(purge);
1005        memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1006        return err;
1007}
1008
1009static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1010                          int (*func)(struct i915_address_space *vm,
1011                                      u64 hole_start, u64 hole_end,
1012                                      unsigned long end_time))
1013{
1014        struct i915_ppgtt *ppgtt;
1015        IGT_TIMEOUT(end_time);
1016        struct file *file;
1017        int err;
1018
1019        if (!HAS_FULL_PPGTT(dev_priv))
1020                return 0;
1021
1022        file = mock_file(dev_priv);
1023        if (IS_ERR(file))
1024                return PTR_ERR(file);
1025
1026        ppgtt = i915_ppgtt_create(&dev_priv->gt);
1027        if (IS_ERR(ppgtt)) {
1028                err = PTR_ERR(ppgtt);
1029                goto out_free;
1030        }
1031        GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1032        GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1033
1034        err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1035
1036        i915_vm_put(&ppgtt->vm);
1037
1038out_free:
1039        fput(file);
1040        return err;
1041}
1042
1043static int igt_ppgtt_fill(void *arg)
1044{
1045        return exercise_ppgtt(arg, fill_hole);
1046}
1047
1048static int igt_ppgtt_walk(void *arg)
1049{
1050        return exercise_ppgtt(arg, walk_hole);
1051}
1052
1053static int igt_ppgtt_pot(void *arg)
1054{
1055        return exercise_ppgtt(arg, pot_hole);
1056}
1057
1058static int igt_ppgtt_drunk(void *arg)
1059{
1060        return exercise_ppgtt(arg, drunk_hole);
1061}
1062
1063static int igt_ppgtt_lowlevel(void *arg)
1064{
1065        return exercise_ppgtt(arg, lowlevel_hole);
1066}
1067
1068static int igt_ppgtt_shrink(void *arg)
1069{
1070        return exercise_ppgtt(arg, shrink_hole);
1071}
1072
1073static int igt_ppgtt_shrink_boom(void *arg)
1074{
1075        return exercise_ppgtt(arg, shrink_boom);
1076}
1077
1078static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1079{
1080        struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1081        struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1082
1083        if (a->start < b->start)
1084                return -1;
1085        else
1086                return 1;
1087}
1088
1089static int exercise_ggtt(struct drm_i915_private *i915,
1090                         int (*func)(struct i915_address_space *vm,
1091                                     u64 hole_start, u64 hole_end,
1092                                     unsigned long end_time))
1093{
1094        struct i915_ggtt *ggtt = &i915->ggtt;
1095        u64 hole_start, hole_end, last = 0;
1096        struct drm_mm_node *node;
1097        IGT_TIMEOUT(end_time);
1098        int err = 0;
1099
1100restart:
1101        list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1102        drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1103                if (hole_start < last)
1104                        continue;
1105
1106                if (ggtt->vm.mm.color_adjust)
1107                        ggtt->vm.mm.color_adjust(node, 0,
1108                                                 &hole_start, &hole_end);
1109                if (hole_start >= hole_end)
1110                        continue;
1111
1112                err = func(&ggtt->vm, hole_start, hole_end, end_time);
1113                if (err)
1114                        break;
1115
1116                /* As we have manipulated the drm_mm, the list may be corrupt */
1117                last = hole_end;
1118                goto restart;
1119        }
1120
1121        return err;
1122}
1123
1124static int igt_ggtt_fill(void *arg)
1125{
1126        return exercise_ggtt(arg, fill_hole);
1127}
1128
1129static int igt_ggtt_walk(void *arg)
1130{
1131        return exercise_ggtt(arg, walk_hole);
1132}
1133
1134static int igt_ggtt_pot(void *arg)
1135{
1136        return exercise_ggtt(arg, pot_hole);
1137}
1138
1139static int igt_ggtt_drunk(void *arg)
1140{
1141        return exercise_ggtt(arg, drunk_hole);
1142}
1143
1144static int igt_ggtt_lowlevel(void *arg)
1145{
1146        return exercise_ggtt(arg, lowlevel_hole);
1147}
1148
1149static int igt_ggtt_page(void *arg)
1150{
1151        const unsigned int count = PAGE_SIZE/sizeof(u32);
1152        I915_RND_STATE(prng);
1153        struct drm_i915_private *i915 = arg;
1154        struct i915_ggtt *ggtt = &i915->ggtt;
1155        struct drm_i915_gem_object *obj;
1156        intel_wakeref_t wakeref;
1157        struct drm_mm_node tmp;
1158        unsigned int *order, n;
1159        int err;
1160
1161        if (!i915_ggtt_has_aperture(ggtt))
1162                return 0;
1163
1164        obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1165        if (IS_ERR(obj))
1166                return PTR_ERR(obj);
1167
1168        err = i915_gem_object_pin_pages(obj);
1169        if (err)
1170                goto out_free;
1171
1172        memset(&tmp, 0, sizeof(tmp));
1173        mutex_lock(&ggtt->vm.mutex);
1174        err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1175                                          count * PAGE_SIZE, 0,
1176                                          I915_COLOR_UNEVICTABLE,
1177                                          0, ggtt->mappable_end,
1178                                          DRM_MM_INSERT_LOW);
1179        mutex_unlock(&ggtt->vm.mutex);
1180        if (err)
1181                goto out_unpin;
1182
1183        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1184
1185        for (n = 0; n < count; n++) {
1186                u64 offset = tmp.start + n * PAGE_SIZE;
1187
1188                ggtt->vm.insert_page(&ggtt->vm,
1189                                     i915_gem_object_get_dma_address(obj, 0),
1190                                     offset, I915_CACHE_NONE, 0);
1191        }
1192
1193        order = i915_random_order(count, &prng);
1194        if (!order) {
1195                err = -ENOMEM;
1196                goto out_remove;
1197        }
1198
1199        for (n = 0; n < count; n++) {
1200                u64 offset = tmp.start + order[n] * PAGE_SIZE;
1201                u32 __iomem *vaddr;
1202
1203                vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1204                iowrite32(n, vaddr + n);
1205                io_mapping_unmap_atomic(vaddr);
1206        }
1207        intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1208
1209        i915_random_reorder(order, count, &prng);
1210        for (n = 0; n < count; n++) {
1211                u64 offset = tmp.start + order[n] * PAGE_SIZE;
1212                u32 __iomem *vaddr;
1213                u32 val;
1214
1215                vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1216                val = ioread32(vaddr + n);
1217                io_mapping_unmap_atomic(vaddr);
1218
1219                if (val != n) {
1220                        pr_err("insert page failed: found %d, expected %d\n",
1221                               val, n);
1222                        err = -EINVAL;
1223                        break;
1224                }
1225        }
1226
1227        kfree(order);
1228out_remove:
1229        ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1230        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1231        mutex_lock(&ggtt->vm.mutex);
1232        drm_mm_remove_node(&tmp);
1233        mutex_unlock(&ggtt->vm.mutex);
1234out_unpin:
1235        i915_gem_object_unpin_pages(obj);
1236out_free:
1237        i915_gem_object_put(obj);
1238        return err;
1239}
1240
1241static void track_vma_bind(struct i915_vma *vma)
1242{
1243        struct drm_i915_gem_object *obj = vma->obj;
1244
1245        __i915_gem_object_pin_pages(obj);
1246
1247        GEM_BUG_ON(vma->pages);
1248        atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1249        __i915_gem_object_pin_pages(obj);
1250        vma->pages = obj->mm.pages;
1251
1252        mutex_lock(&vma->vm->mutex);
1253        list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1254        mutex_unlock(&vma->vm->mutex);
1255}
1256
1257static int exercise_mock(struct drm_i915_private *i915,
1258                         int (*func)(struct i915_address_space *vm,
1259                                     u64 hole_start, u64 hole_end,
1260                                     unsigned long end_time))
1261{
1262        const u64 limit = totalram_pages() << PAGE_SHIFT;
1263        struct i915_address_space *vm;
1264        struct i915_gem_context *ctx;
1265        IGT_TIMEOUT(end_time);
1266        int err;
1267
1268        ctx = mock_context(i915, "mock");
1269        if (!ctx)
1270                return -ENOMEM;
1271
1272        vm = i915_gem_context_get_vm_rcu(ctx);
1273        err = func(vm, 0, min(vm->total, limit), end_time);
1274        i915_vm_put(vm);
1275
1276        mock_context_close(ctx);
1277        return err;
1278}
1279
1280static int igt_mock_fill(void *arg)
1281{
1282        struct i915_ggtt *ggtt = arg;
1283
1284        return exercise_mock(ggtt->vm.i915, fill_hole);
1285}
1286
1287static int igt_mock_walk(void *arg)
1288{
1289        struct i915_ggtt *ggtt = arg;
1290
1291        return exercise_mock(ggtt->vm.i915, walk_hole);
1292}
1293
1294static int igt_mock_pot(void *arg)
1295{
1296        struct i915_ggtt *ggtt = arg;
1297
1298        return exercise_mock(ggtt->vm.i915, pot_hole);
1299}
1300
1301static int igt_mock_drunk(void *arg)
1302{
1303        struct i915_ggtt *ggtt = arg;
1304
1305        return exercise_mock(ggtt->vm.i915, drunk_hole);
1306}
1307
1308static int igt_gtt_reserve(void *arg)
1309{
1310        struct i915_ggtt *ggtt = arg;
1311        struct drm_i915_gem_object *obj, *on;
1312        I915_RND_STATE(prng);
1313        LIST_HEAD(objects);
1314        u64 total;
1315        int err = -ENODEV;
1316
1317        /* i915_gem_gtt_reserve() tries to reserve the precise range
1318         * for the node, and evicts if it has to. So our test checks that
1319         * it can give us the requsted space and prevent overlaps.
1320         */
1321
1322        /* Start by filling the GGTT */
1323        for (total = 0;
1324             total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1325             total += 2 * I915_GTT_PAGE_SIZE) {
1326                struct i915_vma *vma;
1327
1328                obj = i915_gem_object_create_internal(ggtt->vm.i915,
1329                                                      2 * PAGE_SIZE);
1330                if (IS_ERR(obj)) {
1331                        err = PTR_ERR(obj);
1332                        goto out;
1333                }
1334
1335                err = i915_gem_object_pin_pages(obj);
1336                if (err) {
1337                        i915_gem_object_put(obj);
1338                        goto out;
1339                }
1340
1341                list_add(&obj->st_link, &objects);
1342
1343                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1344                if (IS_ERR(vma)) {
1345                        err = PTR_ERR(vma);
1346                        goto out;
1347                }
1348
1349                mutex_lock(&ggtt->vm.mutex);
1350                err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1351                                           obj->base.size,
1352                                           total,
1353                                           obj->cache_level,
1354                                           0);
1355                mutex_unlock(&ggtt->vm.mutex);
1356                if (err) {
1357                        pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1358                               total, ggtt->vm.total, err);
1359                        goto out;
1360                }
1361                track_vma_bind(vma);
1362
1363                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1364                if (vma->node.start != total ||
1365                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1366                        pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1367                               vma->node.start, vma->node.size,
1368                               total, 2*I915_GTT_PAGE_SIZE);
1369                        err = -EINVAL;
1370                        goto out;
1371                }
1372        }
1373
1374        /* Now we start forcing evictions */
1375        for (total = I915_GTT_PAGE_SIZE;
1376             total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1377             total += 2 * I915_GTT_PAGE_SIZE) {
1378                struct i915_vma *vma;
1379
1380                obj = i915_gem_object_create_internal(ggtt->vm.i915,
1381                                                      2 * PAGE_SIZE);
1382                if (IS_ERR(obj)) {
1383                        err = PTR_ERR(obj);
1384                        goto out;
1385                }
1386
1387                err = i915_gem_object_pin_pages(obj);
1388                if (err) {
1389                        i915_gem_object_put(obj);
1390                        goto out;
1391                }
1392
1393                list_add(&obj->st_link, &objects);
1394
1395                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1396                if (IS_ERR(vma)) {
1397                        err = PTR_ERR(vma);
1398                        goto out;
1399                }
1400
1401                mutex_lock(&ggtt->vm.mutex);
1402                err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1403                                           obj->base.size,
1404                                           total,
1405                                           obj->cache_level,
1406                                           0);
1407                mutex_unlock(&ggtt->vm.mutex);
1408                if (err) {
1409                        pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1410                               total, ggtt->vm.total, err);
1411                        goto out;
1412                }
1413                track_vma_bind(vma);
1414
1415                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1416                if (vma->node.start != total ||
1417                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1418                        pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1419                               vma->node.start, vma->node.size,
1420                               total, 2*I915_GTT_PAGE_SIZE);
1421                        err = -EINVAL;
1422                        goto out;
1423                }
1424        }
1425
1426        /* And then try at random */
1427        list_for_each_entry_safe(obj, on, &objects, st_link) {
1428                struct i915_vma *vma;
1429                u64 offset;
1430
1431                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1432                if (IS_ERR(vma)) {
1433                        err = PTR_ERR(vma);
1434                        goto out;
1435                }
1436
1437                err = i915_vma_unbind(vma);
1438                if (err) {
1439                        pr_err("i915_vma_unbind failed with err=%d!\n", err);
1440                        goto out;
1441                }
1442
1443                offset = igt_random_offset(&prng,
1444                                           0, ggtt->vm.total,
1445                                           2 * I915_GTT_PAGE_SIZE,
1446                                           I915_GTT_MIN_ALIGNMENT);
1447
1448                mutex_lock(&ggtt->vm.mutex);
1449                err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1450                                           obj->base.size,
1451                                           offset,
1452                                           obj->cache_level,
1453                                           0);
1454                mutex_unlock(&ggtt->vm.mutex);
1455                if (err) {
1456                        pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1457                               total, ggtt->vm.total, err);
1458                        goto out;
1459                }
1460                track_vma_bind(vma);
1461
1462                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1463                if (vma->node.start != offset ||
1464                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1465                        pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1466                               vma->node.start, vma->node.size,
1467                               offset, 2*I915_GTT_PAGE_SIZE);
1468                        err = -EINVAL;
1469                        goto out;
1470                }
1471        }
1472
1473out:
1474        list_for_each_entry_safe(obj, on, &objects, st_link) {
1475                i915_gem_object_unpin_pages(obj);
1476                i915_gem_object_put(obj);
1477        }
1478        return err;
1479}
1480
1481static int igt_gtt_insert(void *arg)
1482{
1483        struct i915_ggtt *ggtt = arg;
1484        struct drm_i915_gem_object *obj, *on;
1485        struct drm_mm_node tmp = {};
1486        const struct invalid_insert {
1487                u64 size;
1488                u64 alignment;
1489                u64 start, end;
1490        } invalid_insert[] = {
1491                {
1492                        ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1493                        0, ggtt->vm.total,
1494                },
1495                {
1496                        2*I915_GTT_PAGE_SIZE, 0,
1497                        0, I915_GTT_PAGE_SIZE,
1498                },
1499                {
1500                        -(u64)I915_GTT_PAGE_SIZE, 0,
1501                        0, 4*I915_GTT_PAGE_SIZE,
1502                },
1503                {
1504                        -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1505                        0, 4*I915_GTT_PAGE_SIZE,
1506                },
1507                {
1508                        I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1509                        I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1510                },
1511                {}
1512        }, *ii;
1513        LIST_HEAD(objects);
1514        u64 total;
1515        int err = -ENODEV;
1516
1517        /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1518         * to the node, evicting if required.
1519         */
1520
1521        /* Check a couple of obviously invalid requests */
1522        for (ii = invalid_insert; ii->size; ii++) {
1523                mutex_lock(&ggtt->vm.mutex);
1524                err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1525                                          ii->size, ii->alignment,
1526                                          I915_COLOR_UNEVICTABLE,
1527                                          ii->start, ii->end,
1528                                          0);
1529                mutex_unlock(&ggtt->vm.mutex);
1530                if (err != -ENOSPC) {
1531                        pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1532                               ii->size, ii->alignment, ii->start, ii->end,
1533                               err);
1534                        return -EINVAL;
1535                }
1536        }
1537
1538        /* Start by filling the GGTT */
1539        for (total = 0;
1540             total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1541             total += I915_GTT_PAGE_SIZE) {
1542                struct i915_vma *vma;
1543
1544                obj = i915_gem_object_create_internal(ggtt->vm.i915,
1545                                                      I915_GTT_PAGE_SIZE);
1546                if (IS_ERR(obj)) {
1547                        err = PTR_ERR(obj);
1548                        goto out;
1549                }
1550
1551                err = i915_gem_object_pin_pages(obj);
1552                if (err) {
1553                        i915_gem_object_put(obj);
1554                        goto out;
1555                }
1556
1557                list_add(&obj->st_link, &objects);
1558
1559                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1560                if (IS_ERR(vma)) {
1561                        err = PTR_ERR(vma);
1562                        goto out;
1563                }
1564
1565                mutex_lock(&ggtt->vm.mutex);
1566                err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1567                                          obj->base.size, 0, obj->cache_level,
1568                                          0, ggtt->vm.total,
1569                                          0);
1570                mutex_unlock(&ggtt->vm.mutex);
1571                if (err == -ENOSPC) {
1572                        /* maxed out the GGTT space */
1573                        i915_gem_object_put(obj);
1574                        break;
1575                }
1576                if (err) {
1577                        pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1578                               total, ggtt->vm.total, err);
1579                        goto out;
1580                }
1581                track_vma_bind(vma);
1582                __i915_vma_pin(vma);
1583
1584                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1585        }
1586
1587        list_for_each_entry(obj, &objects, st_link) {
1588                struct i915_vma *vma;
1589
1590                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1591                if (IS_ERR(vma)) {
1592                        err = PTR_ERR(vma);
1593                        goto out;
1594                }
1595
1596                if (!drm_mm_node_allocated(&vma->node)) {
1597                        pr_err("VMA was unexpectedly evicted!\n");
1598                        err = -EINVAL;
1599                        goto out;
1600                }
1601
1602                __i915_vma_unpin(vma);
1603        }
1604
1605        /* If we then reinsert, we should find the same hole */
1606        list_for_each_entry_safe(obj, on, &objects, st_link) {
1607                struct i915_vma *vma;
1608                u64 offset;
1609
1610                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1611                if (IS_ERR(vma)) {
1612                        err = PTR_ERR(vma);
1613                        goto out;
1614                }
1615
1616                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1617                offset = vma->node.start;
1618
1619                err = i915_vma_unbind(vma);
1620                if (err) {
1621                        pr_err("i915_vma_unbind failed with err=%d!\n", err);
1622                        goto out;
1623                }
1624
1625                mutex_lock(&ggtt->vm.mutex);
1626                err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1627                                          obj->base.size, 0, obj->cache_level,
1628                                          0, ggtt->vm.total,
1629                                          0);
1630                mutex_unlock(&ggtt->vm.mutex);
1631                if (err) {
1632                        pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1633                               total, ggtt->vm.total, err);
1634                        goto out;
1635                }
1636                track_vma_bind(vma);
1637
1638                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1639                if (vma->node.start != offset) {
1640                        pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1641                               offset, vma->node.start);
1642                        err = -EINVAL;
1643                        goto out;
1644                }
1645        }
1646
1647        /* And then force evictions */
1648        for (total = 0;
1649             total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1650             total += 2 * I915_GTT_PAGE_SIZE) {
1651                struct i915_vma *vma;
1652
1653                obj = i915_gem_object_create_internal(ggtt->vm.i915,
1654                                                      2 * I915_GTT_PAGE_SIZE);
1655                if (IS_ERR(obj)) {
1656                        err = PTR_ERR(obj);
1657                        goto out;
1658                }
1659
1660                err = i915_gem_object_pin_pages(obj);
1661                if (err) {
1662                        i915_gem_object_put(obj);
1663                        goto out;
1664                }
1665
1666                list_add(&obj->st_link, &objects);
1667
1668                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1669                if (IS_ERR(vma)) {
1670                        err = PTR_ERR(vma);
1671                        goto out;
1672                }
1673
1674                mutex_lock(&ggtt->vm.mutex);
1675                err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1676                                          obj->base.size, 0, obj->cache_level,
1677                                          0, ggtt->vm.total,
1678                                          0);
1679                mutex_unlock(&ggtt->vm.mutex);
1680                if (err) {
1681                        pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1682                               total, ggtt->vm.total, err);
1683                        goto out;
1684                }
1685                track_vma_bind(vma);
1686
1687                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1688        }
1689
1690out:
1691        list_for_each_entry_safe(obj, on, &objects, st_link) {
1692                i915_gem_object_unpin_pages(obj);
1693                i915_gem_object_put(obj);
1694        }
1695        return err;
1696}
1697
1698int i915_gem_gtt_mock_selftests(void)
1699{
1700        static const struct i915_subtest tests[] = {
1701                SUBTEST(igt_mock_drunk),
1702                SUBTEST(igt_mock_walk),
1703                SUBTEST(igt_mock_pot),
1704                SUBTEST(igt_mock_fill),
1705                SUBTEST(igt_gtt_reserve),
1706                SUBTEST(igt_gtt_insert),
1707        };
1708        struct drm_i915_private *i915;
1709        struct i915_ggtt *ggtt;
1710        int err;
1711
1712        i915 = mock_gem_device();
1713        if (!i915)
1714                return -ENOMEM;
1715
1716        ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1717        if (!ggtt) {
1718                err = -ENOMEM;
1719                goto out_put;
1720        }
1721        mock_init_ggtt(i915, ggtt);
1722
1723        err = i915_subtests(tests, ggtt);
1724
1725        mock_device_flush(i915);
1726        i915_gem_drain_freed_objects(i915);
1727        mock_fini_ggtt(ggtt);
1728        kfree(ggtt);
1729out_put:
1730        mock_destroy_device(i915);
1731        return err;
1732}
1733
1734static int context_sync(struct intel_context *ce)
1735{
1736        struct i915_request *rq;
1737        long timeout;
1738
1739        rq = intel_context_create_request(ce);
1740        if (IS_ERR(rq))
1741                return PTR_ERR(rq);
1742
1743        i915_request_get(rq);
1744        i915_request_add(rq);
1745
1746        timeout = i915_request_wait(rq, 0, HZ / 5);
1747        i915_request_put(rq);
1748
1749        return timeout < 0 ? -EIO : 0;
1750}
1751
1752static struct i915_request *
1753submit_batch(struct intel_context *ce, u64 addr)
1754{
1755        struct i915_request *rq;
1756        int err;
1757
1758        rq = intel_context_create_request(ce);
1759        if (IS_ERR(rq))
1760                return rq;
1761
1762        err = 0;
1763        if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1764                err = rq->engine->emit_init_breadcrumb(rq);
1765        if (err == 0)
1766                err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1767
1768        if (err == 0)
1769                i915_request_get(rq);
1770        i915_request_add(rq);
1771
1772        return err ? ERR_PTR(err) : rq;
1773}
1774
1775static u32 *spinner(u32 *batch, int i)
1776{
1777        return batch + i * 64 / sizeof(*batch) + 4;
1778}
1779
1780static void end_spin(u32 *batch, int i)
1781{
1782        *spinner(batch, i) = MI_BATCH_BUFFER_END;
1783        wmb();
1784}
1785
1786static int igt_cs_tlb(void *arg)
1787{
1788        const unsigned int count = PAGE_SIZE / 64;
1789        const unsigned int chunk_size = count * PAGE_SIZE;
1790        struct drm_i915_private *i915 = arg;
1791        struct drm_i915_gem_object *bbe, *act, *out;
1792        struct i915_gem_engines_iter it;
1793        struct i915_address_space *vm;
1794        struct i915_gem_context *ctx;
1795        struct intel_context *ce;
1796        struct i915_vma *vma;
1797        I915_RND_STATE(prng);
1798        struct file *file;
1799        unsigned int i;
1800        u32 *result;
1801        u32 *batch;
1802        int err = 0;
1803
1804        /*
1805         * Our mission here is to fool the hardware to execute something
1806         * from scratch as it has not seen the batch move (due to missing
1807         * the TLB invalidate).
1808         */
1809
1810        file = mock_file(i915);
1811        if (IS_ERR(file))
1812                return PTR_ERR(file);
1813
1814        ctx = live_context(i915, file);
1815        if (IS_ERR(ctx)) {
1816                err = PTR_ERR(ctx);
1817                goto out_unlock;
1818        }
1819
1820        vm = i915_gem_context_get_vm_rcu(ctx);
1821        if (i915_is_ggtt(vm))
1822                goto out_vm;
1823
1824        /* Create two pages; dummy we prefill the TLB, and intended */
1825        bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1826        if (IS_ERR(bbe)) {
1827                err = PTR_ERR(bbe);
1828                goto out_vm;
1829        }
1830
1831        batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1832        if (IS_ERR(batch)) {
1833                err = PTR_ERR(batch);
1834                goto out_put_bbe;
1835        }
1836        memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1837        i915_gem_object_flush_map(bbe);
1838        i915_gem_object_unpin_map(bbe);
1839
1840        act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1841        if (IS_ERR(act)) {
1842                err = PTR_ERR(act);
1843                goto out_put_bbe;
1844        }
1845
1846        /* Track the execution of each request by writing into different slot */
1847        batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1848        if (IS_ERR(batch)) {
1849                err = PTR_ERR(batch);
1850                goto out_put_act;
1851        }
1852        for (i = 0; i < count; i++) {
1853                u32 *cs = batch + i * 64 / sizeof(*cs);
1854                u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1855
1856                GEM_BUG_ON(INTEL_GEN(i915) < 6);
1857                cs[0] = MI_STORE_DWORD_IMM_GEN4;
1858                if (INTEL_GEN(i915) >= 8) {
1859                        cs[1] = lower_32_bits(addr);
1860                        cs[2] = upper_32_bits(addr);
1861                        cs[3] = i;
1862                        cs[4] = MI_NOOP;
1863                        cs[5] = MI_BATCH_BUFFER_START_GEN8;
1864                } else {
1865                        cs[1] = 0;
1866                        cs[2] = lower_32_bits(addr);
1867                        cs[3] = i;
1868                        cs[4] = MI_NOOP;
1869                        cs[5] = MI_BATCH_BUFFER_START;
1870                }
1871        }
1872
1873        out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1874        if (IS_ERR(out)) {
1875                err = PTR_ERR(out);
1876                goto out_put_batch;
1877        }
1878        i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1879
1880        vma = i915_vma_instance(out, vm, NULL);
1881        if (IS_ERR(vma)) {
1882                err = PTR_ERR(vma);
1883                goto out_put_batch;
1884        }
1885
1886        err = i915_vma_pin(vma, 0, 0,
1887                           PIN_USER |
1888                           PIN_OFFSET_FIXED |
1889                           (vm->total - PAGE_SIZE));
1890        if (err)
1891                goto out_put_out;
1892        GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1893
1894        result = i915_gem_object_pin_map(out, I915_MAP_WB);
1895        if (IS_ERR(result)) {
1896                err = PTR_ERR(result);
1897                goto out_put_out;
1898        }
1899
1900        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1901                IGT_TIMEOUT(end_time);
1902                unsigned long pass = 0;
1903
1904                if (!intel_engine_can_store_dword(ce->engine))
1905                        continue;
1906
1907                while (!__igt_timeout(end_time, NULL)) {
1908                        struct i915_vm_pt_stash stash = {};
1909                        struct i915_request *rq;
1910                        u64 offset;
1911
1912                        offset = igt_random_offset(&prng,
1913                                                   0, vm->total - PAGE_SIZE,
1914                                                   chunk_size, PAGE_SIZE);
1915
1916                        memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1917
1918                        vma = i915_vma_instance(bbe, vm, NULL);
1919                        if (IS_ERR(vma)) {
1920                                err = PTR_ERR(vma);
1921                                goto end;
1922                        }
1923
1924                        err = vma->ops->set_pages(vma);
1925                        if (err)
1926                                goto end;
1927
1928                        err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
1929                        if (err)
1930                                goto end;
1931
1932                        err = i915_vm_pin_pt_stash(vm, &stash);
1933                        if (err) {
1934                                i915_vm_free_pt_stash(vm, &stash);
1935                                goto end;
1936                        }
1937
1938                        vm->allocate_va_range(vm, &stash, offset, chunk_size);
1939
1940                        i915_vm_free_pt_stash(vm, &stash);
1941
1942                        /* Prime the TLB with the dummy pages */
1943                        for (i = 0; i < count; i++) {
1944                                vma->node.start = offset + i * PAGE_SIZE;
1945                                vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1946
1947                                rq = submit_batch(ce, vma->node.start);
1948                                if (IS_ERR(rq)) {
1949                                        err = PTR_ERR(rq);
1950                                        goto end;
1951                                }
1952                                i915_request_put(rq);
1953                        }
1954
1955                        vma->ops->clear_pages(vma);
1956
1957                        err = context_sync(ce);
1958                        if (err) {
1959                                pr_err("%s: dummy setup timed out\n",
1960                                       ce->engine->name);
1961                                goto end;
1962                        }
1963
1964                        vma = i915_vma_instance(act, vm, NULL);
1965                        if (IS_ERR(vma)) {
1966                                err = PTR_ERR(vma);
1967                                goto end;
1968                        }
1969
1970                        err = vma->ops->set_pages(vma);
1971                        if (err)
1972                                goto end;
1973
1974                        /* Replace the TLB with target batches */
1975                        for (i = 0; i < count; i++) {
1976                                struct i915_request *rq;
1977                                u32 *cs = batch + i * 64 / sizeof(*cs);
1978                                u64 addr;
1979
1980                                vma->node.start = offset + i * PAGE_SIZE;
1981                                vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1982
1983                                addr = vma->node.start + i * 64;
1984                                cs[4] = MI_NOOP;
1985                                cs[6] = lower_32_bits(addr);
1986                                cs[7] = upper_32_bits(addr);
1987                                wmb();
1988
1989                                rq = submit_batch(ce, addr);
1990                                if (IS_ERR(rq)) {
1991                                        err = PTR_ERR(rq);
1992                                        goto end;
1993                                }
1994
1995                                /* Wait until the context chain has started */
1996                                if (i == 0) {
1997                                        while (READ_ONCE(result[i]) &&
1998                                               !i915_request_completed(rq))
1999                                                cond_resched();
2000                                } else {
2001                                        end_spin(batch, i - 1);
2002                                }
2003
2004                                i915_request_put(rq);
2005                        }
2006                        end_spin(batch, count - 1);
2007
2008                        vma->ops->clear_pages(vma);
2009
2010                        err = context_sync(ce);
2011                        if (err) {
2012                                pr_err("%s: writes timed out\n",
2013                                       ce->engine->name);
2014                                goto end;
2015                        }
2016
2017                        for (i = 0; i < count; i++) {
2018                                if (result[i] != i) {
2019                                        pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2020                                               ce->engine->name, pass,
2021                                               offset, i, result[i], i);
2022                                        err = -EINVAL;
2023                                        goto end;
2024                                }
2025                        }
2026
2027                        vm->clear_range(vm, offset, chunk_size);
2028                        pass++;
2029                }
2030        }
2031end:
2032        if (igt_flush_test(i915))
2033                err = -EIO;
2034        i915_gem_context_unlock_engines(ctx);
2035        i915_gem_object_unpin_map(out);
2036out_put_out:
2037        i915_gem_object_put(out);
2038out_put_batch:
2039        i915_gem_object_unpin_map(act);
2040out_put_act:
2041        i915_gem_object_put(act);
2042out_put_bbe:
2043        i915_gem_object_put(bbe);
2044out_vm:
2045        i915_vm_put(vm);
2046out_unlock:
2047        fput(file);
2048        return err;
2049}
2050
2051int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2052{
2053        static const struct i915_subtest tests[] = {
2054                SUBTEST(igt_ppgtt_alloc),
2055                SUBTEST(igt_ppgtt_lowlevel),
2056                SUBTEST(igt_ppgtt_drunk),
2057                SUBTEST(igt_ppgtt_walk),
2058                SUBTEST(igt_ppgtt_pot),
2059                SUBTEST(igt_ppgtt_fill),
2060                SUBTEST(igt_ppgtt_shrink),
2061                SUBTEST(igt_ppgtt_shrink_boom),
2062                SUBTEST(igt_ggtt_lowlevel),
2063                SUBTEST(igt_ggtt_drunk),
2064                SUBTEST(igt_ggtt_walk),
2065                SUBTEST(igt_ggtt_pot),
2066                SUBTEST(igt_ggtt_fill),
2067                SUBTEST(igt_ggtt_page),
2068                SUBTEST(igt_cs_tlb),
2069        };
2070
2071        GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2072
2073        return i915_subtests(tests, i915);
2074}
2075