linux/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/list_sort.h>
  26#include <linux/prime_numbers.h>
  27
  28#include "gem/i915_gem_context.h"
  29#include "gem/selftests/mock_context.h"
  30#include "gt/intel_context.h"
  31#include "gt/intel_gpu_commands.h"
  32
  33#include "i915_random.h"
  34#include "i915_selftest.h"
  35
  36#include "mock_drm.h"
  37#include "mock_gem_device.h"
  38#include "mock_gtt.h"
  39#include "igt_flush_test.h"
  40
  41static void cleanup_freed_objects(struct drm_i915_private *i915)
  42{
  43        i915_gem_drain_freed_objects(i915);
  44}
  45
  46static void fake_free_pages(struct drm_i915_gem_object *obj,
  47                            struct sg_table *pages)
  48{
  49        sg_free_table(pages);
  50        kfree(pages);
  51}
  52
  53static int fake_get_pages(struct drm_i915_gem_object *obj)
  54{
  55#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
  56#define PFN_BIAS 0x1000
  57        struct sg_table *pages;
  58        struct scatterlist *sg;
  59        unsigned int sg_page_sizes;
  60        typeof(obj->base.size) rem;
  61
  62        pages = kmalloc(sizeof(*pages), GFP);
  63        if (!pages)
  64                return -ENOMEM;
  65
  66        rem = round_up(obj->base.size, BIT(31)) >> 31;
  67        if (sg_alloc_table(pages, rem, GFP)) {
  68                kfree(pages);
  69                return -ENOMEM;
  70        }
  71
  72        sg_page_sizes = 0;
  73        rem = obj->base.size;
  74        for (sg = pages->sgl; sg; sg = sg_next(sg)) {
  75                unsigned long len = min_t(typeof(rem), rem, BIT(31));
  76
  77                GEM_BUG_ON(!len);
  78                sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
  79                sg_dma_address(sg) = page_to_phys(sg_page(sg));
  80                sg_dma_len(sg) = len;
  81                sg_page_sizes |= len;
  82
  83                rem -= len;
  84        }
  85        GEM_BUG_ON(rem);
  86
  87        __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
  88
  89        return 0;
  90#undef GFP
  91}
  92
  93static void fake_put_pages(struct drm_i915_gem_object *obj,
  94                           struct sg_table *pages)
  95{
  96        fake_free_pages(obj, pages);
  97        obj->mm.dirty = false;
  98}
  99
 100static const struct drm_i915_gem_object_ops fake_ops = {
 101        .name = "fake-gem",
 102        .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
 103        .get_pages = fake_get_pages,
 104        .put_pages = fake_put_pages,
 105};
 106
 107static struct drm_i915_gem_object *
 108fake_dma_object(struct drm_i915_private *i915, u64 size)
 109{
 110        static struct lock_class_key lock_class;
 111        struct drm_i915_gem_object *obj;
 112
 113        GEM_BUG_ON(!size);
 114        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
 115
 116        if (overflows_type(size, obj->base.size))
 117                return ERR_PTR(-E2BIG);
 118
 119        obj = i915_gem_object_alloc();
 120        if (!obj)
 121                goto err;
 122
 123        drm_gem_private_object_init(&i915->drm, &obj->base, size);
 124        i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
 125
 126        i915_gem_object_set_volatile(obj);
 127
 128        obj->write_domain = I915_GEM_DOMAIN_CPU;
 129        obj->read_domains = I915_GEM_DOMAIN_CPU;
 130        obj->cache_level = I915_CACHE_NONE;
 131
 132        /* Preallocate the "backing storage" */
 133        if (i915_gem_object_pin_pages_unlocked(obj))
 134                goto err_obj;
 135
 136        i915_gem_object_unpin_pages(obj);
 137        return obj;
 138
 139err_obj:
 140        i915_gem_object_put(obj);
 141err:
 142        return ERR_PTR(-ENOMEM);
 143}
 144
 145static int igt_ppgtt_alloc(void *arg)
 146{
 147        struct drm_i915_private *dev_priv = arg;
 148        struct i915_ppgtt *ppgtt;
 149        struct i915_gem_ww_ctx ww;
 150        u64 size, last, limit;
 151        int err = 0;
 152
 153        /* Allocate a ppggt and try to fill the entire range */
 154
 155        if (!HAS_PPGTT(dev_priv))
 156                return 0;
 157
 158        ppgtt = i915_ppgtt_create(&dev_priv->gt);
 159        if (IS_ERR(ppgtt))
 160                return PTR_ERR(ppgtt);
 161
 162        if (!ppgtt->vm.allocate_va_range)
 163                goto err_ppgtt_cleanup;
 164
 165        /*
 166         * While we only allocate the page tables here and so we could
 167         * address a much larger GTT than we could actually fit into
 168         * RAM, a practical limit is the amount of physical pages in the system.
 169         * This should ensure that we do not run into the oomkiller during
 170         * the test and take down the machine wilfully.
 171         */
 172        limit = totalram_pages() << PAGE_SHIFT;
 173        limit = min(ppgtt->vm.total, limit);
 174
 175        i915_gem_ww_ctx_init(&ww, false);
 176retry:
 177        err = i915_vm_lock_objects(&ppgtt->vm, &ww);
 178        if (err)
 179                goto err_ppgtt_cleanup;
 180
 181        /* Check we can allocate the entire range */
 182        for (size = 4096; size <= limit; size <<= 2) {
 183                struct i915_vm_pt_stash stash = {};
 184
 185                err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
 186                if (err)
 187                        goto err_ppgtt_cleanup;
 188
 189                err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
 190                if (err) {
 191                        i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 192                        goto err_ppgtt_cleanup;
 193                }
 194
 195                ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
 196                cond_resched();
 197
 198                ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
 199
 200                i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 201        }
 202
 203        /* Check we can incrementally allocate the entire range */
 204        for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
 205                struct i915_vm_pt_stash stash = {};
 206
 207                err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
 208                if (err)
 209                        goto err_ppgtt_cleanup;
 210
 211                err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
 212                if (err) {
 213                        i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 214                        goto err_ppgtt_cleanup;
 215                }
 216
 217                ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
 218                                            last, size - last);
 219                cond_resched();
 220
 221                i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 222        }
 223
 224err_ppgtt_cleanup:
 225        if (err == -EDEADLK) {
 226                err = i915_gem_ww_ctx_backoff(&ww);
 227                if (!err)
 228                        goto retry;
 229        }
 230        i915_gem_ww_ctx_fini(&ww);
 231
 232        i915_vm_put(&ppgtt->vm);
 233        return err;
 234}
 235
 236static int lowlevel_hole(struct i915_address_space *vm,
 237                         u64 hole_start, u64 hole_end,
 238                         unsigned long end_time)
 239{
 240        I915_RND_STATE(seed_prng);
 241        struct i915_vma *mock_vma;
 242        unsigned int size;
 243
 244        mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
 245        if (!mock_vma)
 246                return -ENOMEM;
 247
 248        /* Keep creating larger objects until one cannot fit into the hole */
 249        for (size = 12; (hole_end - hole_start) >> size; size++) {
 250                I915_RND_SUBSTATE(prng, seed_prng);
 251                struct drm_i915_gem_object *obj;
 252                unsigned int *order, count, n;
 253                u64 hole_size;
 254
 255                hole_size = (hole_end - hole_start) >> size;
 256                if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
 257                        hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
 258                count = hole_size >> 1;
 259                if (!count) {
 260                        pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
 261                                 __func__, hole_start, hole_end, size, hole_size);
 262                        break;
 263                }
 264
 265                do {
 266                        order = i915_random_order(count, &prng);
 267                        if (order)
 268                                break;
 269                } while (count >>= 1);
 270                if (!count) {
 271                        kfree(mock_vma);
 272                        return -ENOMEM;
 273                }
 274                GEM_BUG_ON(!order);
 275
 276                GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
 277                GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
 278
 279                /* Ignore allocation failures (i.e. don't report them as
 280                 * a test failure) as we are purposefully allocating very
 281                 * large objects without checking that we have sufficient
 282                 * memory. We expect to hit -ENOMEM.
 283                 */
 284
 285                obj = fake_dma_object(vm->i915, BIT_ULL(size));
 286                if (IS_ERR(obj)) {
 287                        kfree(order);
 288                        break;
 289                }
 290
 291                GEM_BUG_ON(obj->base.size != BIT_ULL(size));
 292
 293                if (i915_gem_object_pin_pages_unlocked(obj)) {
 294                        i915_gem_object_put(obj);
 295                        kfree(order);
 296                        break;
 297                }
 298
 299                for (n = 0; n < count; n++) {
 300                        u64 addr = hole_start + order[n] * BIT_ULL(size);
 301                        intel_wakeref_t wakeref;
 302
 303                        GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 304
 305                        if (igt_timeout(end_time,
 306                                        "%s timed out before %d/%d\n",
 307                                        __func__, n, count)) {
 308                                hole_end = hole_start; /* quit */
 309                                break;
 310                        }
 311
 312                        if (vm->allocate_va_range) {
 313                                struct i915_vm_pt_stash stash = {};
 314                                struct i915_gem_ww_ctx ww;
 315                                int err;
 316
 317                                i915_gem_ww_ctx_init(&ww, false);
 318retry:
 319                                err = i915_vm_lock_objects(vm, &ww);
 320                                if (err)
 321                                        goto alloc_vm_end;
 322
 323                                err = -ENOMEM;
 324                                if (i915_vm_alloc_pt_stash(vm, &stash,
 325                                                           BIT_ULL(size)))
 326                                        goto alloc_vm_end;
 327
 328                                err = i915_vm_map_pt_stash(vm, &stash);
 329                                if (!err)
 330                                        vm->allocate_va_range(vm, &stash,
 331                                                              addr, BIT_ULL(size));
 332                                i915_vm_free_pt_stash(vm, &stash);
 333alloc_vm_end:
 334                                if (err == -EDEADLK) {
 335                                        err = i915_gem_ww_ctx_backoff(&ww);
 336                                        if (!err)
 337                                                goto retry;
 338                                }
 339                                i915_gem_ww_ctx_fini(&ww);
 340
 341                                if (err)
 342                                        break;
 343                        }
 344
 345                        mock_vma->pages = obj->mm.pages;
 346                        mock_vma->node.size = BIT_ULL(size);
 347                        mock_vma->node.start = addr;
 348
 349                        with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
 350                                vm->insert_entries(vm, mock_vma,
 351                                                   I915_CACHE_NONE, 0);
 352                }
 353                count = n;
 354
 355                i915_random_reorder(order, count, &prng);
 356                for (n = 0; n < count; n++) {
 357                        u64 addr = hole_start + order[n] * BIT_ULL(size);
 358                        intel_wakeref_t wakeref;
 359
 360                        GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 361                        with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
 362                                vm->clear_range(vm, addr, BIT_ULL(size));
 363                }
 364
 365                i915_gem_object_unpin_pages(obj);
 366                i915_gem_object_put(obj);
 367
 368                kfree(order);
 369
 370                cleanup_freed_objects(vm->i915);
 371        }
 372
 373        kfree(mock_vma);
 374        return 0;
 375}
 376
 377static void close_object_list(struct list_head *objects,
 378                              struct i915_address_space *vm)
 379{
 380        struct drm_i915_gem_object *obj, *on;
 381        int ignored;
 382
 383        list_for_each_entry_safe(obj, on, objects, st_link) {
 384                struct i915_vma *vma;
 385
 386                vma = i915_vma_instance(obj, vm, NULL);
 387                if (!IS_ERR(vma))
 388                        ignored = i915_vma_unbind(vma);
 389
 390                list_del(&obj->st_link);
 391                i915_gem_object_put(obj);
 392        }
 393}
 394
 395static int fill_hole(struct i915_address_space *vm,
 396                     u64 hole_start, u64 hole_end,
 397                     unsigned long end_time)
 398{
 399        const u64 hole_size = hole_end - hole_start;
 400        struct drm_i915_gem_object *obj;
 401        const unsigned long max_pages =
 402                min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
 403        const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
 404        unsigned long npages, prime, flags;
 405        struct i915_vma *vma;
 406        LIST_HEAD(objects);
 407        int err;
 408
 409        /* Try binding many VMA working inwards from either edge */
 410
 411        flags = PIN_OFFSET_FIXED | PIN_USER;
 412        if (i915_is_ggtt(vm))
 413                flags |= PIN_GLOBAL;
 414
 415        for_each_prime_number_from(prime, 2, max_step) {
 416                for (npages = 1; npages <= max_pages; npages *= prime) {
 417                        const u64 full_size = npages << PAGE_SHIFT;
 418                        const struct {
 419                                const char *name;
 420                                u64 offset;
 421                                int step;
 422                        } phases[] = {
 423                                { "top-down", hole_end, -1, },
 424                                { "bottom-up", hole_start, 1, },
 425                                { }
 426                        }, *p;
 427
 428                        obj = fake_dma_object(vm->i915, full_size);
 429                        if (IS_ERR(obj))
 430                                break;
 431
 432                        list_add(&obj->st_link, &objects);
 433
 434                        /* Align differing sized objects against the edges, and
 435                         * check we don't walk off into the void when binding
 436                         * them into the GTT.
 437                         */
 438                        for (p = phases; p->name; p++) {
 439                                u64 offset;
 440
 441                                offset = p->offset;
 442                                list_for_each_entry(obj, &objects, st_link) {
 443                                        vma = i915_vma_instance(obj, vm, NULL);
 444                                        if (IS_ERR(vma))
 445                                                continue;
 446
 447                                        if (p->step < 0) {
 448                                                if (offset < hole_start + obj->base.size)
 449                                                        break;
 450                                                offset -= obj->base.size;
 451                                        }
 452
 453                                        err = i915_vma_pin(vma, 0, 0, offset | flags);
 454                                        if (err) {
 455                                                pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
 456                                                       __func__, p->name, err, npages, prime, offset);
 457                                                goto err;
 458                                        }
 459
 460                                        if (!drm_mm_node_allocated(&vma->node) ||
 461                                            i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 462                                                pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 463                                                       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 464                                                       offset);
 465                                                err = -EINVAL;
 466                                                goto err;
 467                                        }
 468
 469                                        i915_vma_unpin(vma);
 470
 471                                        if (p->step > 0) {
 472                                                if (offset + obj->base.size > hole_end)
 473                                                        break;
 474                                                offset += obj->base.size;
 475                                        }
 476                                }
 477
 478                                offset = p->offset;
 479                                list_for_each_entry(obj, &objects, st_link) {
 480                                        vma = i915_vma_instance(obj, vm, NULL);
 481                                        if (IS_ERR(vma))
 482                                                continue;
 483
 484                                        if (p->step < 0) {
 485                                                if (offset < hole_start + obj->base.size)
 486                                                        break;
 487                                                offset -= obj->base.size;
 488                                        }
 489
 490                                        if (!drm_mm_node_allocated(&vma->node) ||
 491                                            i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 492                                                pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
 493                                                       __func__, p->name, vma->node.start, vma->node.size,
 494                                                       offset);
 495                                                err = -EINVAL;
 496                                                goto err;
 497                                        }
 498
 499                                        err = i915_vma_unbind(vma);
 500                                        if (err) {
 501                                                pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
 502                                                       __func__, p->name, vma->node.start, vma->node.size,
 503                                                       err);
 504                                                goto err;
 505                                        }
 506
 507                                        if (p->step > 0) {
 508                                                if (offset + obj->base.size > hole_end)
 509                                                        break;
 510                                                offset += obj->base.size;
 511                                        }
 512                                }
 513
 514                                offset = p->offset;
 515                                list_for_each_entry_reverse(obj, &objects, st_link) {
 516                                        vma = i915_vma_instance(obj, vm, NULL);
 517                                        if (IS_ERR(vma))
 518                                                continue;
 519
 520                                        if (p->step < 0) {
 521                                                if (offset < hole_start + obj->base.size)
 522                                                        break;
 523                                                offset -= obj->base.size;
 524                                        }
 525
 526                                        err = i915_vma_pin(vma, 0, 0, offset | flags);
 527                                        if (err) {
 528                                                pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
 529                                                       __func__, p->name, err, npages, prime, offset);
 530                                                goto err;
 531                                        }
 532
 533                                        if (!drm_mm_node_allocated(&vma->node) ||
 534                                            i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 535                                                pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 536                                                       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 537                                                       offset);
 538                                                err = -EINVAL;
 539                                                goto err;
 540                                        }
 541
 542                                        i915_vma_unpin(vma);
 543
 544                                        if (p->step > 0) {
 545                                                if (offset + obj->base.size > hole_end)
 546                                                        break;
 547                                                offset += obj->base.size;
 548                                        }
 549                                }
 550
 551                                offset = p->offset;
 552                                list_for_each_entry_reverse(obj, &objects, st_link) {
 553                                        vma = i915_vma_instance(obj, vm, NULL);
 554                                        if (IS_ERR(vma))
 555                                                continue;
 556
 557                                        if (p->step < 0) {
 558                                                if (offset < hole_start + obj->base.size)
 559                                                        break;
 560                                                offset -= obj->base.size;
 561                                        }
 562
 563                                        if (!drm_mm_node_allocated(&vma->node) ||
 564                                            i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 565                                                pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 566                                                       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 567                                                       offset);
 568                                                err = -EINVAL;
 569                                                goto err;
 570                                        }
 571
 572                                        err = i915_vma_unbind(vma);
 573                                        if (err) {
 574                                                pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
 575                                                       __func__, p->name, vma->node.start, vma->node.size,
 576                                                       err);
 577                                                goto err;
 578                                        }
 579
 580                                        if (p->step > 0) {
 581                                                if (offset + obj->base.size > hole_end)
 582                                                        break;
 583                                                offset += obj->base.size;
 584                                        }
 585                                }
 586                        }
 587
 588                        if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
 589                                        __func__, npages, prime)) {
 590                                err = -EINTR;
 591                                goto err;
 592                        }
 593                }
 594
 595                close_object_list(&objects, vm);
 596                cleanup_freed_objects(vm->i915);
 597        }
 598
 599        return 0;
 600
 601err:
 602        close_object_list(&objects, vm);
 603        return err;
 604}
 605
 606static int walk_hole(struct i915_address_space *vm,
 607                     u64 hole_start, u64 hole_end,
 608                     unsigned long end_time)
 609{
 610        const u64 hole_size = hole_end - hole_start;
 611        const unsigned long max_pages =
 612                min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
 613        unsigned long flags;
 614        u64 size;
 615
 616        /* Try binding a single VMA in different positions within the hole */
 617
 618        flags = PIN_OFFSET_FIXED | PIN_USER;
 619        if (i915_is_ggtt(vm))
 620                flags |= PIN_GLOBAL;
 621
 622        for_each_prime_number_from(size, 1, max_pages) {
 623                struct drm_i915_gem_object *obj;
 624                struct i915_vma *vma;
 625                u64 addr;
 626                int err = 0;
 627
 628                obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
 629                if (IS_ERR(obj))
 630                        break;
 631
 632                vma = i915_vma_instance(obj, vm, NULL);
 633                if (IS_ERR(vma)) {
 634                        err = PTR_ERR(vma);
 635                        goto err_put;
 636                }
 637
 638                for (addr = hole_start;
 639                     addr + obj->base.size < hole_end;
 640                     addr += obj->base.size) {
 641                        err = i915_vma_pin(vma, 0, 0, addr | flags);
 642                        if (err) {
 643                                pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
 644                                       __func__, addr, vma->size,
 645                                       hole_start, hole_end, err);
 646                                goto err_put;
 647                        }
 648                        i915_vma_unpin(vma);
 649
 650                        if (!drm_mm_node_allocated(&vma->node) ||
 651                            i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 652                                pr_err("%s incorrect at %llx + %llx\n",
 653                                       __func__, addr, vma->size);
 654                                err = -EINVAL;
 655                                goto err_put;
 656                        }
 657
 658                        err = i915_vma_unbind(vma);
 659                        if (err) {
 660                                pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
 661                                       __func__, addr, vma->size, err);
 662                                goto err_put;
 663                        }
 664
 665                        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 666
 667                        if (igt_timeout(end_time,
 668                                        "%s timed out at %llx\n",
 669                                        __func__, addr)) {
 670                                err = -EINTR;
 671                                goto err_put;
 672                        }
 673                }
 674
 675err_put:
 676                i915_gem_object_put(obj);
 677                if (err)
 678                        return err;
 679
 680                cleanup_freed_objects(vm->i915);
 681        }
 682
 683        return 0;
 684}
 685
 686static int pot_hole(struct i915_address_space *vm,
 687                    u64 hole_start, u64 hole_end,
 688                    unsigned long end_time)
 689{
 690        struct drm_i915_gem_object *obj;
 691        struct i915_vma *vma;
 692        unsigned long flags;
 693        unsigned int pot;
 694        int err = 0;
 695
 696        flags = PIN_OFFSET_FIXED | PIN_USER;
 697        if (i915_is_ggtt(vm))
 698                flags |= PIN_GLOBAL;
 699
 700        obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
 701        if (IS_ERR(obj))
 702                return PTR_ERR(obj);
 703
 704        vma = i915_vma_instance(obj, vm, NULL);
 705        if (IS_ERR(vma)) {
 706                err = PTR_ERR(vma);
 707                goto err_obj;
 708        }
 709
 710        /* Insert a pair of pages across every pot boundary within the hole */
 711        for (pot = fls64(hole_end - 1) - 1;
 712             pot > ilog2(2 * I915_GTT_PAGE_SIZE);
 713             pot--) {
 714                u64 step = BIT_ULL(pot);
 715                u64 addr;
 716
 717                for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
 718                     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
 719                     addr += step) {
 720                        err = i915_vma_pin(vma, 0, 0, addr | flags);
 721                        if (err) {
 722                                pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
 723                                       __func__,
 724                                       addr,
 725                                       hole_start, hole_end,
 726                                       err);
 727                                goto err_obj;
 728                        }
 729
 730                        if (!drm_mm_node_allocated(&vma->node) ||
 731                            i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 732                                pr_err("%s incorrect at %llx + %llx\n",
 733                                       __func__, addr, vma->size);
 734                                i915_vma_unpin(vma);
 735                                err = i915_vma_unbind(vma);
 736                                err = -EINVAL;
 737                                goto err_obj;
 738                        }
 739
 740                        i915_vma_unpin(vma);
 741                        err = i915_vma_unbind(vma);
 742                        GEM_BUG_ON(err);
 743                }
 744
 745                if (igt_timeout(end_time,
 746                                "%s timed out after %d/%d\n",
 747                                __func__, pot, fls64(hole_end - 1) - 1)) {
 748                        err = -EINTR;
 749                        goto err_obj;
 750                }
 751        }
 752
 753err_obj:
 754        i915_gem_object_put(obj);
 755        return err;
 756}
 757
 758static int drunk_hole(struct i915_address_space *vm,
 759                      u64 hole_start, u64 hole_end,
 760                      unsigned long end_time)
 761{
 762        I915_RND_STATE(prng);
 763        unsigned int size;
 764        unsigned long flags;
 765
 766        flags = PIN_OFFSET_FIXED | PIN_USER;
 767        if (i915_is_ggtt(vm))
 768                flags |= PIN_GLOBAL;
 769
 770        /* Keep creating larger objects until one cannot fit into the hole */
 771        for (size = 12; (hole_end - hole_start) >> size; size++) {
 772                struct drm_i915_gem_object *obj;
 773                unsigned int *order, count, n;
 774                struct i915_vma *vma;
 775                u64 hole_size;
 776                int err = -ENODEV;
 777
 778                hole_size = (hole_end - hole_start) >> size;
 779                if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
 780                        hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
 781                count = hole_size >> 1;
 782                if (!count) {
 783                        pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
 784                                 __func__, hole_start, hole_end, size, hole_size);
 785                        break;
 786                }
 787
 788                do {
 789                        order = i915_random_order(count, &prng);
 790                        if (order)
 791                                break;
 792                } while (count >>= 1);
 793                if (!count)
 794                        return -ENOMEM;
 795                GEM_BUG_ON(!order);
 796
 797                /* Ignore allocation failures (i.e. don't report them as
 798                 * a test failure) as we are purposefully allocating very
 799                 * large objects without checking that we have sufficient
 800                 * memory. We expect to hit -ENOMEM.
 801                 */
 802
 803                obj = fake_dma_object(vm->i915, BIT_ULL(size));
 804                if (IS_ERR(obj)) {
 805                        kfree(order);
 806                        break;
 807                }
 808
 809                vma = i915_vma_instance(obj, vm, NULL);
 810                if (IS_ERR(vma)) {
 811                        err = PTR_ERR(vma);
 812                        goto err_obj;
 813                }
 814
 815                GEM_BUG_ON(vma->size != BIT_ULL(size));
 816
 817                for (n = 0; n < count; n++) {
 818                        u64 addr = hole_start + order[n] * BIT_ULL(size);
 819
 820                        err = i915_vma_pin(vma, 0, 0, addr | flags);
 821                        if (err) {
 822                                pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
 823                                       __func__,
 824                                       addr, BIT_ULL(size),
 825                                       hole_start, hole_end,
 826                                       err);
 827                                goto err_obj;
 828                        }
 829
 830                        if (!drm_mm_node_allocated(&vma->node) ||
 831                            i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 832                                pr_err("%s incorrect at %llx + %llx\n",
 833                                       __func__, addr, BIT_ULL(size));
 834                                i915_vma_unpin(vma);
 835                                err = i915_vma_unbind(vma);
 836                                err = -EINVAL;
 837                                goto err_obj;
 838                        }
 839
 840                        i915_vma_unpin(vma);
 841                        err = i915_vma_unbind(vma);
 842                        GEM_BUG_ON(err);
 843
 844                        if (igt_timeout(end_time,
 845                                        "%s timed out after %d/%d\n",
 846                                        __func__, n, count)) {
 847                                err = -EINTR;
 848                                goto err_obj;
 849                        }
 850                }
 851
 852err_obj:
 853                i915_gem_object_put(obj);
 854                kfree(order);
 855                if (err)
 856                        return err;
 857
 858                cleanup_freed_objects(vm->i915);
 859        }
 860
 861        return 0;
 862}
 863
 864static int __shrink_hole(struct i915_address_space *vm,
 865                         u64 hole_start, u64 hole_end,
 866                         unsigned long end_time)
 867{
 868        struct drm_i915_gem_object *obj;
 869        unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
 870        unsigned int order = 12;
 871        LIST_HEAD(objects);
 872        int err = 0;
 873        u64 addr;
 874
 875        /* Keep creating larger objects until one cannot fit into the hole */
 876        for (addr = hole_start; addr < hole_end; ) {
 877                struct i915_vma *vma;
 878                u64 size = BIT_ULL(order++);
 879
 880                size = min(size, hole_end - addr);
 881                obj = fake_dma_object(vm->i915, size);
 882                if (IS_ERR(obj)) {
 883                        err = PTR_ERR(obj);
 884                        break;
 885                }
 886
 887                list_add(&obj->st_link, &objects);
 888
 889                vma = i915_vma_instance(obj, vm, NULL);
 890                if (IS_ERR(vma)) {
 891                        err = PTR_ERR(vma);
 892                        break;
 893                }
 894
 895                GEM_BUG_ON(vma->size != size);
 896
 897                err = i915_vma_pin(vma, 0, 0, addr | flags);
 898                if (err) {
 899                        pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
 900                               __func__, addr, size, hole_start, hole_end, err);
 901                        break;
 902                }
 903
 904                if (!drm_mm_node_allocated(&vma->node) ||
 905                    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 906                        pr_err("%s incorrect at %llx + %llx\n",
 907                               __func__, addr, size);
 908                        i915_vma_unpin(vma);
 909                        err = i915_vma_unbind(vma);
 910                        err = -EINVAL;
 911                        break;
 912                }
 913
 914                i915_vma_unpin(vma);
 915                addr += size;
 916
 917                /*
 918                 * Since we are injecting allocation faults at random intervals,
 919                 * wait for this allocation to complete before we change the
 920                 * faultinjection.
 921                 */
 922                err = i915_vma_sync(vma);
 923                if (err)
 924                        break;
 925
 926                if (igt_timeout(end_time,
 927                                "%s timed out at ofset %llx [%llx - %llx]\n",
 928                                __func__, addr, hole_start, hole_end)) {
 929                        err = -EINTR;
 930                        break;
 931                }
 932        }
 933
 934        close_object_list(&objects, vm);
 935        cleanup_freed_objects(vm->i915);
 936        return err;
 937}
 938
 939static int shrink_hole(struct i915_address_space *vm,
 940                       u64 hole_start, u64 hole_end,
 941                       unsigned long end_time)
 942{
 943        unsigned long prime;
 944        int err;
 945
 946        vm->fault_attr.probability = 999;
 947        atomic_set(&vm->fault_attr.times, -1);
 948
 949        for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
 950                vm->fault_attr.interval = prime;
 951                err = __shrink_hole(vm, hole_start, hole_end, end_time);
 952                if (err)
 953                        break;
 954        }
 955
 956        memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
 957
 958        return err;
 959}
 960
 961static int shrink_boom(struct i915_address_space *vm,
 962                       u64 hole_start, u64 hole_end,
 963                       unsigned long end_time)
 964{
 965        unsigned int sizes[] = { SZ_2M, SZ_1G };
 966        struct drm_i915_gem_object *purge;
 967        struct drm_i915_gem_object *explode;
 968        int err;
 969        int i;
 970
 971        /*
 972         * Catch the case which shrink_hole seems to miss. The setup here
 973         * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
 974         * ensuring that all vma assiocated with the respective pd/pdp are
 975         * unpinned at the time.
 976         */
 977
 978        for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
 979                unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 980                unsigned int size = sizes[i];
 981                struct i915_vma *vma;
 982
 983                purge = fake_dma_object(vm->i915, size);
 984                if (IS_ERR(purge))
 985                        return PTR_ERR(purge);
 986
 987                vma = i915_vma_instance(purge, vm, NULL);
 988                if (IS_ERR(vma)) {
 989                        err = PTR_ERR(vma);
 990                        goto err_purge;
 991                }
 992
 993                err = i915_vma_pin(vma, 0, 0, flags);
 994                if (err)
 995                        goto err_purge;
 996
 997                /* Should now be ripe for purging */
 998                i915_vma_unpin(vma);
 999
1000                explode = fake_dma_object(vm->i915, size);
1001                if (IS_ERR(explode)) {
1002                        err = PTR_ERR(explode);
1003                        goto err_purge;
1004                }
1005
1006                vm->fault_attr.probability = 100;
1007                vm->fault_attr.interval = 1;
1008                atomic_set(&vm->fault_attr.times, -1);
1009
1010                vma = i915_vma_instance(explode, vm, NULL);
1011                if (IS_ERR(vma)) {
1012                        err = PTR_ERR(vma);
1013                        goto err_explode;
1014                }
1015
1016                err = i915_vma_pin(vma, 0, 0, flags | size);
1017                if (err)
1018                        goto err_explode;
1019
1020                i915_vma_unpin(vma);
1021
1022                i915_gem_object_put(purge);
1023                i915_gem_object_put(explode);
1024
1025                memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1026                cleanup_freed_objects(vm->i915);
1027        }
1028
1029        return 0;
1030
1031err_explode:
1032        i915_gem_object_put(explode);
1033err_purge:
1034        i915_gem_object_put(purge);
1035        memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1036        return err;
1037}
1038
1039static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1040                          int (*func)(struct i915_address_space *vm,
1041                                      u64 hole_start, u64 hole_end,
1042                                      unsigned long end_time))
1043{
1044        struct i915_ppgtt *ppgtt;
1045        IGT_TIMEOUT(end_time);
1046        struct file *file;
1047        int err;
1048
1049        if (!HAS_FULL_PPGTT(dev_priv))
1050                return 0;
1051
1052        file = mock_file(dev_priv);
1053        if (IS_ERR(file))
1054                return PTR_ERR(file);
1055
1056        ppgtt = i915_ppgtt_create(&dev_priv->gt);
1057        if (IS_ERR(ppgtt)) {
1058                err = PTR_ERR(ppgtt);
1059                goto out_free;
1060        }
1061        GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1062        GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1063
1064        err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1065
1066        i915_vm_put(&ppgtt->vm);
1067
1068out_free:
1069        fput(file);
1070        return err;
1071}
1072
1073static int igt_ppgtt_fill(void *arg)
1074{
1075        return exercise_ppgtt(arg, fill_hole);
1076}
1077
1078static int igt_ppgtt_walk(void *arg)
1079{
1080        return exercise_ppgtt(arg, walk_hole);
1081}
1082
1083static int igt_ppgtt_pot(void *arg)
1084{
1085        return exercise_ppgtt(arg, pot_hole);
1086}
1087
1088static int igt_ppgtt_drunk(void *arg)
1089{
1090        return exercise_ppgtt(arg, drunk_hole);
1091}
1092
1093static int igt_ppgtt_lowlevel(void *arg)
1094{
1095        return exercise_ppgtt(arg, lowlevel_hole);
1096}
1097
1098static int igt_ppgtt_shrink(void *arg)
1099{
1100        return exercise_ppgtt(arg, shrink_hole);
1101}
1102
1103static int igt_ppgtt_shrink_boom(void *arg)
1104{
1105        return exercise_ppgtt(arg, shrink_boom);
1106}
1107
1108static int sort_holes(void *priv, const struct list_head *A,
1109                      const struct list_head *B)
1110{
1111        struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1112        struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1113
1114        if (a->start < b->start)
1115                return -1;
1116        else
1117                return 1;
1118}
1119
1120static int exercise_ggtt(struct drm_i915_private *i915,
1121                         int (*func)(struct i915_address_space *vm,
1122                                     u64 hole_start, u64 hole_end,
1123                                     unsigned long end_time))
1124{
1125        struct i915_ggtt *ggtt = &i915->ggtt;
1126        u64 hole_start, hole_end, last = 0;
1127        struct drm_mm_node *node;
1128        IGT_TIMEOUT(end_time);
1129        int err = 0;
1130
1131restart:
1132        list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1133        drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1134                if (hole_start < last)
1135                        continue;
1136
1137                if (ggtt->vm.mm.color_adjust)
1138                        ggtt->vm.mm.color_adjust(node, 0,
1139                                                 &hole_start, &hole_end);
1140                if (hole_start >= hole_end)
1141                        continue;
1142
1143                err = func(&ggtt->vm, hole_start, hole_end, end_time);
1144                if (err)
1145                        break;
1146
1147                /* As we have manipulated the drm_mm, the list may be corrupt */
1148                last = hole_end;
1149                goto restart;
1150        }
1151
1152        return err;
1153}
1154
1155static int igt_ggtt_fill(void *arg)
1156{
1157        return exercise_ggtt(arg, fill_hole);
1158}
1159
1160static int igt_ggtt_walk(void *arg)
1161{
1162        return exercise_ggtt(arg, walk_hole);
1163}
1164
1165static int igt_ggtt_pot(void *arg)
1166{
1167        return exercise_ggtt(arg, pot_hole);
1168}
1169
1170static int igt_ggtt_drunk(void *arg)
1171{
1172        return exercise_ggtt(arg, drunk_hole);
1173}
1174
1175static int igt_ggtt_lowlevel(void *arg)
1176{
1177        return exercise_ggtt(arg, lowlevel_hole);
1178}
1179
1180static int igt_ggtt_page(void *arg)
1181{
1182        const unsigned int count = PAGE_SIZE/sizeof(u32);
1183        I915_RND_STATE(prng);
1184        struct drm_i915_private *i915 = arg;
1185        struct i915_ggtt *ggtt = &i915->ggtt;
1186        struct drm_i915_gem_object *obj;
1187        intel_wakeref_t wakeref;
1188        struct drm_mm_node tmp;
1189        unsigned int *order, n;
1190        int err;
1191
1192        if (!i915_ggtt_has_aperture(ggtt))
1193                return 0;
1194
1195        obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1196        if (IS_ERR(obj))
1197                return PTR_ERR(obj);
1198
1199        err = i915_gem_object_pin_pages_unlocked(obj);
1200        if (err)
1201                goto out_free;
1202
1203        memset(&tmp, 0, sizeof(tmp));
1204        mutex_lock(&ggtt->vm.mutex);
1205        err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1206                                          count * PAGE_SIZE, 0,
1207                                          I915_COLOR_UNEVICTABLE,
1208                                          0, ggtt->mappable_end,
1209                                          DRM_MM_INSERT_LOW);
1210        mutex_unlock(&ggtt->vm.mutex);
1211        if (err)
1212                goto out_unpin;
1213
1214        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1215
1216        for (n = 0; n < count; n++) {
1217                u64 offset = tmp.start + n * PAGE_SIZE;
1218
1219                ggtt->vm.insert_page(&ggtt->vm,
1220                                     i915_gem_object_get_dma_address(obj, 0),
1221                                     offset, I915_CACHE_NONE, 0);
1222        }
1223
1224        order = i915_random_order(count, &prng);
1225        if (!order) {
1226                err = -ENOMEM;
1227                goto out_remove;
1228        }
1229
1230        for (n = 0; n < count; n++) {
1231                u64 offset = tmp.start + order[n] * PAGE_SIZE;
1232                u32 __iomem *vaddr;
1233
1234                vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1235                iowrite32(n, vaddr + n);
1236                io_mapping_unmap_atomic(vaddr);
1237        }
1238        intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1239
1240        i915_random_reorder(order, count, &prng);
1241        for (n = 0; n < count; n++) {
1242                u64 offset = tmp.start + order[n] * PAGE_SIZE;
1243                u32 __iomem *vaddr;
1244                u32 val;
1245
1246                vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1247                val = ioread32(vaddr + n);
1248                io_mapping_unmap_atomic(vaddr);
1249
1250                if (val != n) {
1251                        pr_err("insert page failed: found %d, expected %d\n",
1252                               val, n);
1253                        err = -EINVAL;
1254                        break;
1255                }
1256        }
1257
1258        kfree(order);
1259out_remove:
1260        ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1261        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1262        mutex_lock(&ggtt->vm.mutex);
1263        drm_mm_remove_node(&tmp);
1264        mutex_unlock(&ggtt->vm.mutex);
1265out_unpin:
1266        i915_gem_object_unpin_pages(obj);
1267out_free:
1268        i915_gem_object_put(obj);
1269        return err;
1270}
1271
1272static void track_vma_bind(struct i915_vma *vma)
1273{
1274        struct drm_i915_gem_object *obj = vma->obj;
1275
1276        __i915_gem_object_pin_pages(obj);
1277
1278        GEM_BUG_ON(vma->pages);
1279        atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1280        __i915_gem_object_pin_pages(obj);
1281        vma->pages = obj->mm.pages;
1282
1283        mutex_lock(&vma->vm->mutex);
1284        list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1285        mutex_unlock(&vma->vm->mutex);
1286}
1287
1288static int exercise_mock(struct drm_i915_private *i915,
1289                         int (*func)(struct i915_address_space *vm,
1290                                     u64 hole_start, u64 hole_end,
1291                                     unsigned long end_time))
1292{
1293        const u64 limit = totalram_pages() << PAGE_SHIFT;
1294        struct i915_address_space *vm;
1295        struct i915_gem_context *ctx;
1296        IGT_TIMEOUT(end_time);
1297        int err;
1298
1299        ctx = mock_context(i915, "mock");
1300        if (!ctx)
1301                return -ENOMEM;
1302
1303        vm = i915_gem_context_get_vm_rcu(ctx);
1304        err = func(vm, 0, min(vm->total, limit), end_time);
1305        i915_vm_put(vm);
1306
1307        mock_context_close(ctx);
1308        return err;
1309}
1310
1311static int igt_mock_fill(void *arg)
1312{
1313        struct i915_ggtt *ggtt = arg;
1314
1315        return exercise_mock(ggtt->vm.i915, fill_hole);
1316}
1317
1318static int igt_mock_walk(void *arg)
1319{
1320        struct i915_ggtt *ggtt = arg;
1321
1322        return exercise_mock(ggtt->vm.i915, walk_hole);
1323}
1324
1325static int igt_mock_pot(void *arg)
1326{
1327        struct i915_ggtt *ggtt = arg;
1328
1329        return exercise_mock(ggtt->vm.i915, pot_hole);
1330}
1331
1332static int igt_mock_drunk(void *arg)
1333{
1334        struct i915_ggtt *ggtt = arg;
1335
1336        return exercise_mock(ggtt->vm.i915, drunk_hole);
1337}
1338
1339static int igt_gtt_reserve(void *arg)
1340{
1341        struct i915_ggtt *ggtt = arg;
1342        struct drm_i915_gem_object *obj, *on;
1343        I915_RND_STATE(prng);
1344        LIST_HEAD(objects);
1345        u64 total;
1346        int err = -ENODEV;
1347
1348        /* i915_gem_gtt_reserve() tries to reserve the precise range
1349         * for the node, and evicts if it has to. So our test checks that
1350         * it can give us the requsted space and prevent overlaps.
1351         */
1352
1353        /* Start by filling the GGTT */
1354        for (total = 0;
1355             total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1356             total += 2 * I915_GTT_PAGE_SIZE) {
1357                struct i915_vma *vma;
1358
1359                obj = i915_gem_object_create_internal(ggtt->vm.i915,
1360                                                      2 * PAGE_SIZE);
1361                if (IS_ERR(obj)) {
1362                        err = PTR_ERR(obj);
1363                        goto out;
1364                }
1365
1366                err = i915_gem_object_pin_pages_unlocked(obj);
1367                if (err) {
1368                        i915_gem_object_put(obj);
1369                        goto out;
1370                }
1371
1372                list_add(&obj->st_link, &objects);
1373
1374                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1375                if (IS_ERR(vma)) {
1376                        err = PTR_ERR(vma);
1377                        goto out;
1378                }
1379
1380                mutex_lock(&ggtt->vm.mutex);
1381                err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1382                                           obj->base.size,
1383                                           total,
1384                                           obj->cache_level,
1385                                           0);
1386                mutex_unlock(&ggtt->vm.mutex);
1387                if (err) {
1388                        pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1389                               total, ggtt->vm.total, err);
1390                        goto out;
1391                }
1392                track_vma_bind(vma);
1393
1394                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1395                if (vma->node.start != total ||
1396                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1397                        pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1398                               vma->node.start, vma->node.size,
1399                               total, 2*I915_GTT_PAGE_SIZE);
1400                        err = -EINVAL;
1401                        goto out;
1402                }
1403        }
1404
1405        /* Now we start forcing evictions */
1406        for (total = I915_GTT_PAGE_SIZE;
1407             total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1408             total += 2 * I915_GTT_PAGE_SIZE) {
1409                struct i915_vma *vma;
1410
1411                obj = i915_gem_object_create_internal(ggtt->vm.i915,
1412                                                      2 * PAGE_SIZE);
1413                if (IS_ERR(obj)) {
1414                        err = PTR_ERR(obj);
1415                        goto out;
1416                }
1417
1418                err = i915_gem_object_pin_pages_unlocked(obj);
1419                if (err) {
1420                        i915_gem_object_put(obj);
1421                        goto out;
1422                }
1423
1424                list_add(&obj->st_link, &objects);
1425
1426                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1427                if (IS_ERR(vma)) {
1428                        err = PTR_ERR(vma);
1429                        goto out;
1430                }
1431
1432                mutex_lock(&ggtt->vm.mutex);
1433                err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1434                                           obj->base.size,
1435                                           total,
1436                                           obj->cache_level,
1437                                           0);
1438                mutex_unlock(&ggtt->vm.mutex);
1439                if (err) {
1440                        pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1441                               total, ggtt->vm.total, err);
1442                        goto out;
1443                }
1444                track_vma_bind(vma);
1445
1446                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1447                if (vma->node.start != total ||
1448                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1449                        pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1450                               vma->node.start, vma->node.size,
1451                               total, 2*I915_GTT_PAGE_SIZE);
1452                        err = -EINVAL;
1453                        goto out;
1454                }
1455        }
1456
1457        /* And then try at random */
1458        list_for_each_entry_safe(obj, on, &objects, st_link) {
1459                struct i915_vma *vma;
1460                u64 offset;
1461
1462                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1463                if (IS_ERR(vma)) {
1464                        err = PTR_ERR(vma);
1465                        goto out;
1466                }
1467
1468                err = i915_vma_unbind(vma);
1469                if (err) {
1470                        pr_err("i915_vma_unbind failed with err=%d!\n", err);
1471                        goto out;
1472                }
1473
1474                offset = igt_random_offset(&prng,
1475                                           0, ggtt->vm.total,
1476                                           2 * I915_GTT_PAGE_SIZE,
1477                                           I915_GTT_MIN_ALIGNMENT);
1478
1479                mutex_lock(&ggtt->vm.mutex);
1480                err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1481                                           obj->base.size,
1482                                           offset,
1483                                           obj->cache_level,
1484                                           0);
1485                mutex_unlock(&ggtt->vm.mutex);
1486                if (err) {
1487                        pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1488                               total, ggtt->vm.total, err);
1489                        goto out;
1490                }
1491                track_vma_bind(vma);
1492
1493                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1494                if (vma->node.start != offset ||
1495                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1496                        pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1497                               vma->node.start, vma->node.size,
1498                               offset, 2*I915_GTT_PAGE_SIZE);
1499                        err = -EINVAL;
1500                        goto out;
1501                }
1502        }
1503
1504out:
1505        list_for_each_entry_safe(obj, on, &objects, st_link) {
1506                i915_gem_object_unpin_pages(obj);
1507                i915_gem_object_put(obj);
1508        }
1509        return err;
1510}
1511
1512static int igt_gtt_insert(void *arg)
1513{
1514        struct i915_ggtt *ggtt = arg;
1515        struct drm_i915_gem_object *obj, *on;
1516        struct drm_mm_node tmp = {};
1517        const struct invalid_insert {
1518                u64 size;
1519                u64 alignment;
1520                u64 start, end;
1521        } invalid_insert[] = {
1522                {
1523                        ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1524                        0, ggtt->vm.total,
1525                },
1526                {
1527                        2*I915_GTT_PAGE_SIZE, 0,
1528                        0, I915_GTT_PAGE_SIZE,
1529                },
1530                {
1531                        -(u64)I915_GTT_PAGE_SIZE, 0,
1532                        0, 4*I915_GTT_PAGE_SIZE,
1533                },
1534                {
1535                        -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1536                        0, 4*I915_GTT_PAGE_SIZE,
1537                },
1538                {
1539                        I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1540                        I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1541                },
1542                {}
1543        }, *ii;
1544        LIST_HEAD(objects);
1545        u64 total;
1546        int err = -ENODEV;
1547
1548        /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1549         * to the node, evicting if required.
1550         */
1551
1552        /* Check a couple of obviously invalid requests */
1553        for (ii = invalid_insert; ii->size; ii++) {
1554                mutex_lock(&ggtt->vm.mutex);
1555                err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1556                                          ii->size, ii->alignment,
1557                                          I915_COLOR_UNEVICTABLE,
1558                                          ii->start, ii->end,
1559                                          0);
1560                mutex_unlock(&ggtt->vm.mutex);
1561                if (err != -ENOSPC) {
1562                        pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1563                               ii->size, ii->alignment, ii->start, ii->end,
1564                               err);
1565                        return -EINVAL;
1566                }
1567        }
1568
1569        /* Start by filling the GGTT */
1570        for (total = 0;
1571             total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1572             total += I915_GTT_PAGE_SIZE) {
1573                struct i915_vma *vma;
1574
1575                obj = i915_gem_object_create_internal(ggtt->vm.i915,
1576                                                      I915_GTT_PAGE_SIZE);
1577                if (IS_ERR(obj)) {
1578                        err = PTR_ERR(obj);
1579                        goto out;
1580                }
1581
1582                err = i915_gem_object_pin_pages_unlocked(obj);
1583                if (err) {
1584                        i915_gem_object_put(obj);
1585                        goto out;
1586                }
1587
1588                list_add(&obj->st_link, &objects);
1589
1590                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1591                if (IS_ERR(vma)) {
1592                        err = PTR_ERR(vma);
1593                        goto out;
1594                }
1595
1596                mutex_lock(&ggtt->vm.mutex);
1597                err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1598                                          obj->base.size, 0, obj->cache_level,
1599                                          0, ggtt->vm.total,
1600                                          0);
1601                mutex_unlock(&ggtt->vm.mutex);
1602                if (err == -ENOSPC) {
1603                        /* maxed out the GGTT space */
1604                        i915_gem_object_put(obj);
1605                        break;
1606                }
1607                if (err) {
1608                        pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1609                               total, ggtt->vm.total, err);
1610                        goto out;
1611                }
1612                track_vma_bind(vma);
1613                __i915_vma_pin(vma);
1614
1615                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1616        }
1617
1618        list_for_each_entry(obj, &objects, st_link) {
1619                struct i915_vma *vma;
1620
1621                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1622                if (IS_ERR(vma)) {
1623                        err = PTR_ERR(vma);
1624                        goto out;
1625                }
1626
1627                if (!drm_mm_node_allocated(&vma->node)) {
1628                        pr_err("VMA was unexpectedly evicted!\n");
1629                        err = -EINVAL;
1630                        goto out;
1631                }
1632
1633                __i915_vma_unpin(vma);
1634        }
1635
1636        /* If we then reinsert, we should find the same hole */
1637        list_for_each_entry_safe(obj, on, &objects, st_link) {
1638                struct i915_vma *vma;
1639                u64 offset;
1640
1641                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1642                if (IS_ERR(vma)) {
1643                        err = PTR_ERR(vma);
1644                        goto out;
1645                }
1646
1647                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1648                offset = vma->node.start;
1649
1650                err = i915_vma_unbind(vma);
1651                if (err) {
1652                        pr_err("i915_vma_unbind failed with err=%d!\n", err);
1653                        goto out;
1654                }
1655
1656                mutex_lock(&ggtt->vm.mutex);
1657                err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1658                                          obj->base.size, 0, obj->cache_level,
1659                                          0, ggtt->vm.total,
1660                                          0);
1661                mutex_unlock(&ggtt->vm.mutex);
1662                if (err) {
1663                        pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1664                               total, ggtt->vm.total, err);
1665                        goto out;
1666                }
1667                track_vma_bind(vma);
1668
1669                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1670                if (vma->node.start != offset) {
1671                        pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1672                               offset, vma->node.start);
1673                        err = -EINVAL;
1674                        goto out;
1675                }
1676        }
1677
1678        /* And then force evictions */
1679        for (total = 0;
1680             total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1681             total += 2 * I915_GTT_PAGE_SIZE) {
1682                struct i915_vma *vma;
1683
1684                obj = i915_gem_object_create_internal(ggtt->vm.i915,
1685                                                      2 * I915_GTT_PAGE_SIZE);
1686                if (IS_ERR(obj)) {
1687                        err = PTR_ERR(obj);
1688                        goto out;
1689                }
1690
1691                err = i915_gem_object_pin_pages_unlocked(obj);
1692                if (err) {
1693                        i915_gem_object_put(obj);
1694                        goto out;
1695                }
1696
1697                list_add(&obj->st_link, &objects);
1698
1699                vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1700                if (IS_ERR(vma)) {
1701                        err = PTR_ERR(vma);
1702                        goto out;
1703                }
1704
1705                mutex_lock(&ggtt->vm.mutex);
1706                err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1707                                          obj->base.size, 0, obj->cache_level,
1708                                          0, ggtt->vm.total,
1709                                          0);
1710                mutex_unlock(&ggtt->vm.mutex);
1711                if (err) {
1712                        pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1713                               total, ggtt->vm.total, err);
1714                        goto out;
1715                }
1716                track_vma_bind(vma);
1717
1718                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1719        }
1720
1721out:
1722        list_for_each_entry_safe(obj, on, &objects, st_link) {
1723                i915_gem_object_unpin_pages(obj);
1724                i915_gem_object_put(obj);
1725        }
1726        return err;
1727}
1728
1729int i915_gem_gtt_mock_selftests(void)
1730{
1731        static const struct i915_subtest tests[] = {
1732                SUBTEST(igt_mock_drunk),
1733                SUBTEST(igt_mock_walk),
1734                SUBTEST(igt_mock_pot),
1735                SUBTEST(igt_mock_fill),
1736                SUBTEST(igt_gtt_reserve),
1737                SUBTEST(igt_gtt_insert),
1738        };
1739        struct drm_i915_private *i915;
1740        struct i915_ggtt *ggtt;
1741        int err;
1742
1743        i915 = mock_gem_device();
1744        if (!i915)
1745                return -ENOMEM;
1746
1747        ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1748        if (!ggtt) {
1749                err = -ENOMEM;
1750                goto out_put;
1751        }
1752        mock_init_ggtt(i915, ggtt);
1753
1754        err = i915_subtests(tests, ggtt);
1755
1756        mock_device_flush(i915);
1757        i915_gem_drain_freed_objects(i915);
1758        mock_fini_ggtt(ggtt);
1759        kfree(ggtt);
1760out_put:
1761        mock_destroy_device(i915);
1762        return err;
1763}
1764
1765static int context_sync(struct intel_context *ce)
1766{
1767        struct i915_request *rq;
1768        long timeout;
1769
1770        rq = intel_context_create_request(ce);
1771        if (IS_ERR(rq))
1772                return PTR_ERR(rq);
1773
1774        i915_request_get(rq);
1775        i915_request_add(rq);
1776
1777        timeout = i915_request_wait(rq, 0, HZ / 5);
1778        i915_request_put(rq);
1779
1780        return timeout < 0 ? -EIO : 0;
1781}
1782
1783static struct i915_request *
1784submit_batch(struct intel_context *ce, u64 addr)
1785{
1786        struct i915_request *rq;
1787        int err;
1788
1789        rq = intel_context_create_request(ce);
1790        if (IS_ERR(rq))
1791                return rq;
1792
1793        err = 0;
1794        if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1795                err = rq->engine->emit_init_breadcrumb(rq);
1796        if (err == 0)
1797                err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1798
1799        if (err == 0)
1800                i915_request_get(rq);
1801        i915_request_add(rq);
1802
1803        return err ? ERR_PTR(err) : rq;
1804}
1805
1806static u32 *spinner(u32 *batch, int i)
1807{
1808        return batch + i * 64 / sizeof(*batch) + 4;
1809}
1810
1811static void end_spin(u32 *batch, int i)
1812{
1813        *spinner(batch, i) = MI_BATCH_BUFFER_END;
1814        wmb();
1815}
1816
1817static int igt_cs_tlb(void *arg)
1818{
1819        const unsigned int count = PAGE_SIZE / 64;
1820        const unsigned int chunk_size = count * PAGE_SIZE;
1821        struct drm_i915_private *i915 = arg;
1822        struct drm_i915_gem_object *bbe, *act, *out;
1823        struct i915_gem_engines_iter it;
1824        struct i915_address_space *vm;
1825        struct i915_gem_context *ctx;
1826        struct intel_context *ce;
1827        struct i915_vma *vma;
1828        I915_RND_STATE(prng);
1829        struct file *file;
1830        unsigned int i;
1831        u32 *result;
1832        u32 *batch;
1833        int err = 0;
1834
1835        /*
1836         * Our mission here is to fool the hardware to execute something
1837         * from scratch as it has not seen the batch move (due to missing
1838         * the TLB invalidate).
1839         */
1840
1841        file = mock_file(i915);
1842        if (IS_ERR(file))
1843                return PTR_ERR(file);
1844
1845        ctx = live_context(i915, file);
1846        if (IS_ERR(ctx)) {
1847                err = PTR_ERR(ctx);
1848                goto out_unlock;
1849        }
1850
1851        vm = i915_gem_context_get_vm_rcu(ctx);
1852        if (i915_is_ggtt(vm))
1853                goto out_vm;
1854
1855        /* Create two pages; dummy we prefill the TLB, and intended */
1856        bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1857        if (IS_ERR(bbe)) {
1858                err = PTR_ERR(bbe);
1859                goto out_vm;
1860        }
1861
1862        batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
1863        if (IS_ERR(batch)) {
1864                err = PTR_ERR(batch);
1865                goto out_put_bbe;
1866        }
1867        memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1868        i915_gem_object_flush_map(bbe);
1869        i915_gem_object_unpin_map(bbe);
1870
1871        act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1872        if (IS_ERR(act)) {
1873                err = PTR_ERR(act);
1874                goto out_put_bbe;
1875        }
1876
1877        /* Track the execution of each request by writing into different slot */
1878        batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
1879        if (IS_ERR(batch)) {
1880                err = PTR_ERR(batch);
1881                goto out_put_act;
1882        }
1883        for (i = 0; i < count; i++) {
1884                u32 *cs = batch + i * 64 / sizeof(*cs);
1885                u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1886
1887                GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
1888                cs[0] = MI_STORE_DWORD_IMM_GEN4;
1889                if (GRAPHICS_VER(i915) >= 8) {
1890                        cs[1] = lower_32_bits(addr);
1891                        cs[2] = upper_32_bits(addr);
1892                        cs[3] = i;
1893                        cs[4] = MI_NOOP;
1894                        cs[5] = MI_BATCH_BUFFER_START_GEN8;
1895                } else {
1896                        cs[1] = 0;
1897                        cs[2] = lower_32_bits(addr);
1898                        cs[3] = i;
1899                        cs[4] = MI_NOOP;
1900                        cs[5] = MI_BATCH_BUFFER_START;
1901                }
1902        }
1903
1904        out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1905        if (IS_ERR(out)) {
1906                err = PTR_ERR(out);
1907                goto out_put_batch;
1908        }
1909        i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1910
1911        vma = i915_vma_instance(out, vm, NULL);
1912        if (IS_ERR(vma)) {
1913                err = PTR_ERR(vma);
1914                goto out_put_out;
1915        }
1916
1917        err = i915_vma_pin(vma, 0, 0,
1918                           PIN_USER |
1919                           PIN_OFFSET_FIXED |
1920                           (vm->total - PAGE_SIZE));
1921        if (err)
1922                goto out_put_out;
1923        GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1924
1925        result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
1926        if (IS_ERR(result)) {
1927                err = PTR_ERR(result);
1928                goto out_put_out;
1929        }
1930
1931        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1932                IGT_TIMEOUT(end_time);
1933                unsigned long pass = 0;
1934
1935                if (!intel_engine_can_store_dword(ce->engine))
1936                        continue;
1937
1938                while (!__igt_timeout(end_time, NULL)) {
1939                        struct i915_vm_pt_stash stash = {};
1940                        struct i915_request *rq;
1941                        struct i915_gem_ww_ctx ww;
1942                        u64 offset;
1943
1944                        offset = igt_random_offset(&prng,
1945                                                   0, vm->total - PAGE_SIZE,
1946                                                   chunk_size, PAGE_SIZE);
1947
1948                        memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1949
1950                        vma = i915_vma_instance(bbe, vm, NULL);
1951                        if (IS_ERR(vma)) {
1952                                err = PTR_ERR(vma);
1953                                goto end;
1954                        }
1955
1956                        err = vma->ops->set_pages(vma);
1957                        if (err)
1958                                goto end;
1959
1960                        i915_gem_ww_ctx_init(&ww, false);
1961retry:
1962                        err = i915_vm_lock_objects(vm, &ww);
1963                        if (err)
1964                                goto end_ww;
1965
1966                        err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
1967                        if (err)
1968                                goto end_ww;
1969
1970                        err = i915_vm_map_pt_stash(vm, &stash);
1971                        if (!err)
1972                                vm->allocate_va_range(vm, &stash, offset, chunk_size);
1973                        i915_vm_free_pt_stash(vm, &stash);
1974end_ww:
1975                        if (err == -EDEADLK) {
1976                                err = i915_gem_ww_ctx_backoff(&ww);
1977                                if (!err)
1978                                        goto retry;
1979                        }
1980                        i915_gem_ww_ctx_fini(&ww);
1981                        if (err)
1982                                goto end;
1983
1984                        /* Prime the TLB with the dummy pages */
1985                        for (i = 0; i < count; i++) {
1986                                vma->node.start = offset + i * PAGE_SIZE;
1987                                vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1988
1989                                rq = submit_batch(ce, vma->node.start);
1990                                if (IS_ERR(rq)) {
1991                                        err = PTR_ERR(rq);
1992                                        goto end;
1993                                }
1994                                i915_request_put(rq);
1995                        }
1996
1997                        vma->ops->clear_pages(vma);
1998
1999                        err = context_sync(ce);
2000                        if (err) {
2001                                pr_err("%s: dummy setup timed out\n",
2002                                       ce->engine->name);
2003                                goto end;
2004                        }
2005
2006                        vma = i915_vma_instance(act, vm, NULL);
2007                        if (IS_ERR(vma)) {
2008                                err = PTR_ERR(vma);
2009                                goto end;
2010                        }
2011
2012                        err = vma->ops->set_pages(vma);
2013                        if (err)
2014                                goto end;
2015
2016                        /* Replace the TLB with target batches */
2017                        for (i = 0; i < count; i++) {
2018                                struct i915_request *rq;
2019                                u32 *cs = batch + i * 64 / sizeof(*cs);
2020                                u64 addr;
2021
2022                                vma->node.start = offset + i * PAGE_SIZE;
2023                                vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
2024
2025                                addr = vma->node.start + i * 64;
2026                                cs[4] = MI_NOOP;
2027                                cs[6] = lower_32_bits(addr);
2028                                cs[7] = upper_32_bits(addr);
2029                                wmb();
2030
2031                                rq = submit_batch(ce, addr);
2032                                if (IS_ERR(rq)) {
2033                                        err = PTR_ERR(rq);
2034                                        goto end;
2035                                }
2036
2037                                /* Wait until the context chain has started */
2038                                if (i == 0) {
2039                                        while (READ_ONCE(result[i]) &&
2040                                               !i915_request_completed(rq))
2041                                                cond_resched();
2042                                } else {
2043                                        end_spin(batch, i - 1);
2044                                }
2045
2046                                i915_request_put(rq);
2047                        }
2048                        end_spin(batch, count - 1);
2049
2050                        vma->ops->clear_pages(vma);
2051
2052                        err = context_sync(ce);
2053                        if (err) {
2054                                pr_err("%s: writes timed out\n",
2055                                       ce->engine->name);
2056                                goto end;
2057                        }
2058
2059                        for (i = 0; i < count; i++) {
2060                                if (result[i] != i) {
2061                                        pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2062                                               ce->engine->name, pass,
2063                                               offset, i, result[i], i);
2064                                        err = -EINVAL;
2065                                        goto end;
2066                                }
2067                        }
2068
2069                        vm->clear_range(vm, offset, chunk_size);
2070                        pass++;
2071                }
2072        }
2073end:
2074        if (igt_flush_test(i915))
2075                err = -EIO;
2076        i915_gem_context_unlock_engines(ctx);
2077        i915_gem_object_unpin_map(out);
2078out_put_out:
2079        i915_gem_object_put(out);
2080out_put_batch:
2081        i915_gem_object_unpin_map(act);
2082out_put_act:
2083        i915_gem_object_put(act);
2084out_put_bbe:
2085        i915_gem_object_put(bbe);
2086out_vm:
2087        i915_vm_put(vm);
2088out_unlock:
2089        fput(file);
2090        return err;
2091}
2092
2093int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2094{
2095        static const struct i915_subtest tests[] = {
2096                SUBTEST(igt_ppgtt_alloc),
2097                SUBTEST(igt_ppgtt_lowlevel),
2098                SUBTEST(igt_ppgtt_drunk),
2099                SUBTEST(igt_ppgtt_walk),
2100                SUBTEST(igt_ppgtt_pot),
2101                SUBTEST(igt_ppgtt_fill),
2102                SUBTEST(igt_ppgtt_shrink),
2103                SUBTEST(igt_ppgtt_shrink_boom),
2104                SUBTEST(igt_ggtt_lowlevel),
2105                SUBTEST(igt_ggtt_drunk),
2106                SUBTEST(igt_ggtt_walk),
2107                SUBTEST(igt_ggtt_pot),
2108                SUBTEST(igt_ggtt_fill),
2109                SUBTEST(igt_ggtt_page),
2110                SUBTEST(igt_cs_tlb),
2111        };
2112
2113        GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2114
2115        return i915_subtests(tests, i915);
2116}
2117