linux/drivers/gpu/drm/i915/i915_gem_gtt.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2010 Daniel Vetter
   3 * Copyright © 2011-2014 Intel Corporation
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22 * IN THE SOFTWARE.
  23 *
  24 */
  25
  26#include <linux/slab.h> /* fault-inject.h is not standalone! */
  27
  28#include <linux/fault-inject.h>
  29#include <linux/log2.h>
  30#include <linux/random.h>
  31#include <linux/seq_file.h>
  32#include <linux/stop_machine.h>
  33
  34#include <asm/set_memory.h>
  35
  36#include <drm/drmP.h>
  37#include <drm/i915_drm.h>
  38
  39#include "i915_drv.h"
  40#include "i915_vgpu.h"
  41#include "i915_trace.h"
  42#include "intel_drv.h"
  43#include "intel_frontbuffer.h"
  44
  45#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
  46
  47/**
  48 * DOC: Global GTT views
  49 *
  50 * Background and previous state
  51 *
  52 * Historically objects could exists (be bound) in global GTT space only as
  53 * singular instances with a view representing all of the object's backing pages
  54 * in a linear fashion. This view will be called a normal view.
  55 *
  56 * To support multiple views of the same object, where the number of mapped
  57 * pages is not equal to the backing store, or where the layout of the pages
  58 * is not linear, concept of a GGTT view was added.
  59 *
  60 * One example of an alternative view is a stereo display driven by a single
  61 * image. In this case we would have a framebuffer looking like this
  62 * (2x2 pages):
  63 *
  64 *    12
  65 *    34
  66 *
  67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
  68 * rendering. In contrast, fed to the display engine would be an alternative
  69 * view which could look something like this:
  70 *
  71 *   1212
  72 *   3434
  73 *
  74 * In this example both the size and layout of pages in the alternative view is
  75 * different from the normal view.
  76 *
  77 * Implementation and usage
  78 *
  79 * GGTT views are implemented using VMAs and are distinguished via enum
  80 * i915_ggtt_view_type and struct i915_ggtt_view.
  81 *
  82 * A new flavour of core GEM functions which work with GGTT bound objects were
  83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
  84 * renaming  in large amounts of code. They take the struct i915_ggtt_view
  85 * parameter encapsulating all metadata required to implement a view.
  86 *
  87 * As a helper for callers which are only interested in the normal view,
  88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
  89 * GEM API functions, the ones not taking the view parameter, are operating on,
  90 * or with the normal GGTT view.
  91 *
  92 * Code wanting to add or use a new GGTT view needs to:
  93 *
  94 * 1. Add a new enum with a suitable name.
  95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
  96 * 3. Add support to i915_get_vma_pages().
  97 *
  98 * New views are required to build a scatter-gather table from within the
  99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 100 * exists for the lifetime of an VMA.
 101 *
 102 * Core API is designed to have copy semantics which means that passed in
 103 * struct i915_ggtt_view does not need to be persistent (left around after
 104 * calling the core API functions).
 105 *
 106 */
 107
 108static int
 109i915_get_ggtt_vma_pages(struct i915_vma *vma);
 110
 111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
 112{
 113        /*
 114         * Note that as an uncached mmio write, this will flush the
 115         * WCB of the writes into the GGTT before it triggers the invalidate.
 116         */
 117        I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 118}
 119
 120static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
 121{
 122        gen6_ggtt_invalidate(dev_priv);
 123        I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 124}
 125
 126static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
 127{
 128        intel_gtt_chipset_flush();
 129}
 130
 131static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
 132{
 133        i915->ggtt.invalidate(i915);
 134}
 135
 136int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
 137                                int enable_ppgtt)
 138{
 139        bool has_full_ppgtt;
 140        bool has_full_48bit_ppgtt;
 141
 142        if (!dev_priv->info.has_aliasing_ppgtt)
 143                return 0;
 144
 145        has_full_ppgtt = dev_priv->info.has_full_ppgtt;
 146        has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
 147
 148        if (intel_vgpu_active(dev_priv)) {
 149                /* GVT-g has no support for 32bit ppgtt */
 150                has_full_ppgtt = false;
 151                has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
 152        }
 153
 154        /*
 155         * We don't allow disabling PPGTT for gen9+ as it's a requirement for
 156         * execlists, the sole mechanism available to submit work.
 157         */
 158        if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
 159                return 0;
 160
 161        if (enable_ppgtt == 1)
 162                return 1;
 163
 164        if (enable_ppgtt == 2 && has_full_ppgtt)
 165                return 2;
 166
 167        if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
 168                return 3;
 169
 170        /* Disable ppgtt on SNB if VT-d is on. */
 171        if (IS_GEN6(dev_priv) && intel_vtd_active()) {
 172                DRM_INFO("Disabling PPGTT because VT-d is on\n");
 173                return 0;
 174        }
 175
 176        if (has_full_48bit_ppgtt)
 177                return 3;
 178
 179        if (has_full_ppgtt)
 180                return 2;
 181
 182        return 1;
 183}
 184
 185static int ppgtt_bind_vma(struct i915_vma *vma,
 186                          enum i915_cache_level cache_level,
 187                          u32 unused)
 188{
 189        u32 pte_flags;
 190        int err;
 191
 192        if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
 193                err = vma->vm->allocate_va_range(vma->vm,
 194                                                 vma->node.start, vma->size);
 195                if (err)
 196                        return err;
 197        }
 198
 199        /* Applicable to VLV, and gen8+ */
 200        pte_flags = 0;
 201        if (i915_gem_object_is_readonly(vma->obj))
 202                pte_flags |= PTE_READ_ONLY;
 203
 204        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
 205
 206        return 0;
 207}
 208
 209static void ppgtt_unbind_vma(struct i915_vma *vma)
 210{
 211        vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 212}
 213
 214static int ppgtt_set_pages(struct i915_vma *vma)
 215{
 216        GEM_BUG_ON(vma->pages);
 217
 218        vma->pages = vma->obj->mm.pages;
 219
 220        vma->page_sizes = vma->obj->mm.page_sizes;
 221
 222        return 0;
 223}
 224
 225static void clear_pages(struct i915_vma *vma)
 226{
 227        GEM_BUG_ON(!vma->pages);
 228
 229        if (vma->pages != vma->obj->mm.pages) {
 230                sg_free_table(vma->pages);
 231                kfree(vma->pages);
 232        }
 233        vma->pages = NULL;
 234
 235        memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
 236}
 237
 238static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
 239                                  enum i915_cache_level level,
 240                                  u32 flags)
 241{
 242        gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 243
 244        if (unlikely(flags & PTE_READ_ONLY))
 245                pte &= ~_PAGE_RW;
 246
 247        switch (level) {
 248        case I915_CACHE_NONE:
 249                pte |= PPAT_UNCACHED;
 250                break;
 251        case I915_CACHE_WT:
 252                pte |= PPAT_DISPLAY_ELLC;
 253                break;
 254        default:
 255                pte |= PPAT_CACHED;
 256                break;
 257        }
 258
 259        return pte;
 260}
 261
 262static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
 263                                  const enum i915_cache_level level)
 264{
 265        gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
 266        pde |= addr;
 267        if (level != I915_CACHE_NONE)
 268                pde |= PPAT_CACHED_PDE;
 269        else
 270                pde |= PPAT_UNCACHED;
 271        return pde;
 272}
 273
 274#define gen8_pdpe_encode gen8_pde_encode
 275#define gen8_pml4e_encode gen8_pde_encode
 276
 277static gen6_pte_t snb_pte_encode(dma_addr_t addr,
 278                                 enum i915_cache_level level,
 279                                 u32 unused)
 280{
 281        gen6_pte_t pte = GEN6_PTE_VALID;
 282        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 283
 284        switch (level) {
 285        case I915_CACHE_L3_LLC:
 286        case I915_CACHE_LLC:
 287                pte |= GEN6_PTE_CACHE_LLC;
 288                break;
 289        case I915_CACHE_NONE:
 290                pte |= GEN6_PTE_UNCACHED;
 291                break;
 292        default:
 293                MISSING_CASE(level);
 294        }
 295
 296        return pte;
 297}
 298
 299static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
 300                                 enum i915_cache_level level,
 301                                 u32 unused)
 302{
 303        gen6_pte_t pte = GEN6_PTE_VALID;
 304        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 305
 306        switch (level) {
 307        case I915_CACHE_L3_LLC:
 308                pte |= GEN7_PTE_CACHE_L3_LLC;
 309                break;
 310        case I915_CACHE_LLC:
 311                pte |= GEN6_PTE_CACHE_LLC;
 312                break;
 313        case I915_CACHE_NONE:
 314                pte |= GEN6_PTE_UNCACHED;
 315                break;
 316        default:
 317                MISSING_CASE(level);
 318        }
 319
 320        return pte;
 321}
 322
 323static gen6_pte_t byt_pte_encode(dma_addr_t addr,
 324                                 enum i915_cache_level level,
 325                                 u32 flags)
 326{
 327        gen6_pte_t pte = GEN6_PTE_VALID;
 328        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 329
 330        if (!(flags & PTE_READ_ONLY))
 331                pte |= BYT_PTE_WRITEABLE;
 332
 333        if (level != I915_CACHE_NONE)
 334                pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
 335
 336        return pte;
 337}
 338
 339static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
 340                                 enum i915_cache_level level,
 341                                 u32 unused)
 342{
 343        gen6_pte_t pte = GEN6_PTE_VALID;
 344        pte |= HSW_PTE_ADDR_ENCODE(addr);
 345
 346        if (level != I915_CACHE_NONE)
 347                pte |= HSW_WB_LLC_AGE3;
 348
 349        return pte;
 350}
 351
 352static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 353                                  enum i915_cache_level level,
 354                                  u32 unused)
 355{
 356        gen6_pte_t pte = GEN6_PTE_VALID;
 357        pte |= HSW_PTE_ADDR_ENCODE(addr);
 358
 359        switch (level) {
 360        case I915_CACHE_NONE:
 361                break;
 362        case I915_CACHE_WT:
 363                pte |= HSW_WT_ELLC_LLC_AGE3;
 364                break;
 365        default:
 366                pte |= HSW_WB_ELLC_LLC_AGE3;
 367                break;
 368        }
 369
 370        return pte;
 371}
 372
 373static void stash_init(struct pagestash *stash)
 374{
 375        pagevec_init(&stash->pvec);
 376        spin_lock_init(&stash->lock);
 377}
 378
 379static struct page *stash_pop_page(struct pagestash *stash)
 380{
 381        struct page *page = NULL;
 382
 383        spin_lock(&stash->lock);
 384        if (likely(stash->pvec.nr))
 385                page = stash->pvec.pages[--stash->pvec.nr];
 386        spin_unlock(&stash->lock);
 387
 388        return page;
 389}
 390
 391static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
 392{
 393        int nr;
 394
 395        spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
 396
 397        nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
 398        memcpy(stash->pvec.pages + stash->pvec.nr,
 399               pvec->pages + pvec->nr - nr,
 400               sizeof(pvec->pages[0]) * nr);
 401        stash->pvec.nr += nr;
 402
 403        spin_unlock(&stash->lock);
 404
 405        pvec->nr -= nr;
 406}
 407
 408static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 409{
 410        struct pagevec stack;
 411        struct page *page;
 412
 413        if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
 414                i915_gem_shrink_all(vm->i915);
 415
 416        page = stash_pop_page(&vm->free_pages);
 417        if (page)
 418                return page;
 419
 420        if (!vm->pt_kmap_wc)
 421                return alloc_page(gfp);
 422
 423        /* Look in our global stash of WC pages... */
 424        page = stash_pop_page(&vm->i915->mm.wc_stash);
 425        if (page)
 426                return page;
 427
 428        /*
 429         * Otherwise batch allocate pages to amortize cost of set_pages_wc.
 430         *
 431         * We have to be careful as page allocation may trigger the shrinker
 432         * (via direct reclaim) which will fill up the WC stash underneath us.
 433         * So we add our WB pages into a temporary pvec on the stack and merge
 434         * them into the WC stash after all the allocations are complete.
 435         */
 436        pagevec_init(&stack);
 437        do {
 438                struct page *page;
 439
 440                page = alloc_page(gfp);
 441                if (unlikely(!page))
 442                        break;
 443
 444                stack.pages[stack.nr++] = page;
 445        } while (pagevec_space(&stack));
 446
 447        if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
 448                page = stack.pages[--stack.nr];
 449
 450                /* Merge spare WC pages to the global stash */
 451                stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
 452
 453                /* Push any surplus WC pages onto the local VM stash */
 454                if (stack.nr)
 455                        stash_push_pagevec(&vm->free_pages, &stack);
 456        }
 457
 458        /* Return unwanted leftovers */
 459        if (unlikely(stack.nr)) {
 460                WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
 461                __pagevec_release(&stack);
 462        }
 463
 464        return page;
 465}
 466
 467static void vm_free_pages_release(struct i915_address_space *vm,
 468                                  bool immediate)
 469{
 470        struct pagevec *pvec = &vm->free_pages.pvec;
 471        struct pagevec stack;
 472
 473        lockdep_assert_held(&vm->free_pages.lock);
 474        GEM_BUG_ON(!pagevec_count(pvec));
 475
 476        if (vm->pt_kmap_wc) {
 477                /*
 478                 * When we use WC, first fill up the global stash and then
 479                 * only if full immediately free the overflow.
 480                 */
 481                stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
 482
 483                /*
 484                 * As we have made some room in the VM's free_pages,
 485                 * we can wait for it to fill again. Unless we are
 486                 * inside i915_address_space_fini() and must
 487                 * immediately release the pages!
 488                 */
 489                if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
 490                        return;
 491
 492                /*
 493                 * We have to drop the lock to allow ourselves to sleep,
 494                 * so take a copy of the pvec and clear the stash for
 495                 * others to use it as we sleep.
 496                 */
 497                stack = *pvec;
 498                pagevec_reinit(pvec);
 499                spin_unlock(&vm->free_pages.lock);
 500
 501                pvec = &stack;
 502                set_pages_array_wb(pvec->pages, pvec->nr);
 503
 504                spin_lock(&vm->free_pages.lock);
 505        }
 506
 507        __pagevec_release(pvec);
 508}
 509
 510static void vm_free_page(struct i915_address_space *vm, struct page *page)
 511{
 512        /*
 513         * On !llc, we need to change the pages back to WB. We only do so
 514         * in bulk, so we rarely need to change the page attributes here,
 515         * but doing so requires a stop_machine() from deep inside arch/x86/mm.
 516         * To make detection of the possible sleep more likely, use an
 517         * unconditional might_sleep() for everybody.
 518         */
 519        might_sleep();
 520        spin_lock(&vm->free_pages.lock);
 521        if (!pagevec_add(&vm->free_pages.pvec, page))
 522                vm_free_pages_release(vm, false);
 523        spin_unlock(&vm->free_pages.lock);
 524}
 525
 526static void i915_address_space_init(struct i915_address_space *vm,
 527                                    struct drm_i915_private *dev_priv)
 528{
 529        /*
 530         * The vm->mutex must be reclaim safe (for use in the shrinker).
 531         * Do a dummy acquire now under fs_reclaim so that any allocation
 532         * attempt holding the lock is immediately reported by lockdep.
 533         */
 534        mutex_init(&vm->mutex);
 535        i915_gem_shrinker_taints_mutex(&vm->mutex);
 536
 537        GEM_BUG_ON(!vm->total);
 538        drm_mm_init(&vm->mm, 0, vm->total);
 539        vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
 540
 541        stash_init(&vm->free_pages);
 542
 543        INIT_LIST_HEAD(&vm->active_list);
 544        INIT_LIST_HEAD(&vm->inactive_list);
 545        INIT_LIST_HEAD(&vm->unbound_list);
 546}
 547
 548static void i915_address_space_fini(struct i915_address_space *vm)
 549{
 550        spin_lock(&vm->free_pages.lock);
 551        if (pagevec_count(&vm->free_pages.pvec))
 552                vm_free_pages_release(vm, true);
 553        GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
 554        spin_unlock(&vm->free_pages.lock);
 555
 556        drm_mm_takedown(&vm->mm);
 557
 558        mutex_destroy(&vm->mutex);
 559}
 560
 561static int __setup_page_dma(struct i915_address_space *vm,
 562                            struct i915_page_dma *p,
 563                            gfp_t gfp)
 564{
 565        p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
 566        if (unlikely(!p->page))
 567                return -ENOMEM;
 568
 569        p->daddr = dma_map_page_attrs(vm->dma,
 570                                      p->page, 0, PAGE_SIZE,
 571                                      PCI_DMA_BIDIRECTIONAL,
 572                                      DMA_ATTR_SKIP_CPU_SYNC |
 573                                      DMA_ATTR_NO_WARN);
 574        if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
 575                vm_free_page(vm, p->page);
 576                return -ENOMEM;
 577        }
 578
 579        return 0;
 580}
 581
 582static int setup_page_dma(struct i915_address_space *vm,
 583                          struct i915_page_dma *p)
 584{
 585        return __setup_page_dma(vm, p, __GFP_HIGHMEM);
 586}
 587
 588static void cleanup_page_dma(struct i915_address_space *vm,
 589                             struct i915_page_dma *p)
 590{
 591        dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 592        vm_free_page(vm, p->page);
 593}
 594
 595#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
 596
 597#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
 598#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
 599#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
 600#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
 601
 602static void fill_page_dma(struct i915_address_space *vm,
 603                          struct i915_page_dma *p,
 604                          const u64 val)
 605{
 606        u64 * const vaddr = kmap_atomic(p->page);
 607
 608        memset64(vaddr, val, PAGE_SIZE / sizeof(val));
 609
 610        kunmap_atomic(vaddr);
 611}
 612
 613static void fill_page_dma_32(struct i915_address_space *vm,
 614                             struct i915_page_dma *p,
 615                             const u32 v)
 616{
 617        fill_page_dma(vm, p, (u64)v << 32 | v);
 618}
 619
 620static int
 621setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 622{
 623        unsigned long size;
 624
 625        /*
 626         * In order to utilize 64K pages for an object with a size < 2M, we will
 627         * need to support a 64K scratch page, given that every 16th entry for a
 628         * page-table operating in 64K mode must point to a properly aligned 64K
 629         * region, including any PTEs which happen to point to scratch.
 630         *
 631         * This is only relevant for the 48b PPGTT where we support
 632         * huge-gtt-pages, see also i915_vma_insert().
 633         *
 634         * TODO: we should really consider write-protecting the scratch-page and
 635         * sharing between ppgtt
 636         */
 637        size = I915_GTT_PAGE_SIZE_4K;
 638        if (i915_vm_is_48bit(vm) &&
 639            HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
 640                size = I915_GTT_PAGE_SIZE_64K;
 641                gfp |= __GFP_NOWARN;
 642        }
 643        gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
 644
 645        do {
 646                int order = get_order(size);
 647                struct page *page;
 648                dma_addr_t addr;
 649
 650                page = alloc_pages(gfp, order);
 651                if (unlikely(!page))
 652                        goto skip;
 653
 654                addr = dma_map_page_attrs(vm->dma,
 655                                          page, 0, size,
 656                                          PCI_DMA_BIDIRECTIONAL,
 657                                          DMA_ATTR_SKIP_CPU_SYNC |
 658                                          DMA_ATTR_NO_WARN);
 659                if (unlikely(dma_mapping_error(vm->dma, addr)))
 660                        goto free_page;
 661
 662                if (unlikely(!IS_ALIGNED(addr, size)))
 663                        goto unmap_page;
 664
 665                vm->scratch_page.page = page;
 666                vm->scratch_page.daddr = addr;
 667                vm->scratch_page.order = order;
 668                return 0;
 669
 670unmap_page:
 671                dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
 672free_page:
 673                __free_pages(page, order);
 674skip:
 675                if (size == I915_GTT_PAGE_SIZE_4K)
 676                        return -ENOMEM;
 677
 678                size = I915_GTT_PAGE_SIZE_4K;
 679                gfp &= ~__GFP_NOWARN;
 680        } while (1);
 681}
 682
 683static void cleanup_scratch_page(struct i915_address_space *vm)
 684{
 685        struct i915_page_dma *p = &vm->scratch_page;
 686
 687        dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
 688                       PCI_DMA_BIDIRECTIONAL);
 689        __free_pages(p->page, p->order);
 690}
 691
 692static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 693{
 694        struct i915_page_table *pt;
 695
 696        pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
 697        if (unlikely(!pt))
 698                return ERR_PTR(-ENOMEM);
 699
 700        if (unlikely(setup_px(vm, pt))) {
 701                kfree(pt);
 702                return ERR_PTR(-ENOMEM);
 703        }
 704
 705        pt->used_ptes = 0;
 706        return pt;
 707}
 708
 709static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
 710{
 711        cleanup_px(vm, pt);
 712        kfree(pt);
 713}
 714
 715static void gen8_initialize_pt(struct i915_address_space *vm,
 716                               struct i915_page_table *pt)
 717{
 718        fill_px(vm, pt,
 719                gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
 720}
 721
 722static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
 723                               struct i915_page_table *pt)
 724{
 725        fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
 726}
 727
 728static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 729{
 730        struct i915_page_directory *pd;
 731
 732        pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
 733        if (unlikely(!pd))
 734                return ERR_PTR(-ENOMEM);
 735
 736        if (unlikely(setup_px(vm, pd))) {
 737                kfree(pd);
 738                return ERR_PTR(-ENOMEM);
 739        }
 740
 741        pd->used_pdes = 0;
 742        return pd;
 743}
 744
 745static void free_pd(struct i915_address_space *vm,
 746                    struct i915_page_directory *pd)
 747{
 748        cleanup_px(vm, pd);
 749        kfree(pd);
 750}
 751
 752static void gen8_initialize_pd(struct i915_address_space *vm,
 753                               struct i915_page_directory *pd)
 754{
 755        fill_px(vm, pd,
 756                gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
 757        memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
 758}
 759
 760static int __pdp_init(struct i915_address_space *vm,
 761                      struct i915_page_directory_pointer *pdp)
 762{
 763        const unsigned int pdpes = i915_pdpes_per_pdp(vm);
 764
 765        pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
 766                                            I915_GFP_ALLOW_FAIL);
 767        if (unlikely(!pdp->page_directory))
 768                return -ENOMEM;
 769
 770        memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
 771
 772        return 0;
 773}
 774
 775static void __pdp_fini(struct i915_page_directory_pointer *pdp)
 776{
 777        kfree(pdp->page_directory);
 778        pdp->page_directory = NULL;
 779}
 780
 781static inline bool use_4lvl(const struct i915_address_space *vm)
 782{
 783        return i915_vm_is_48bit(vm);
 784}
 785
 786static struct i915_page_directory_pointer *
 787alloc_pdp(struct i915_address_space *vm)
 788{
 789        struct i915_page_directory_pointer *pdp;
 790        int ret = -ENOMEM;
 791
 792        GEM_BUG_ON(!use_4lvl(vm));
 793
 794        pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
 795        if (!pdp)
 796                return ERR_PTR(-ENOMEM);
 797
 798        ret = __pdp_init(vm, pdp);
 799        if (ret)
 800                goto fail_bitmap;
 801
 802        ret = setup_px(vm, pdp);
 803        if (ret)
 804                goto fail_page_m;
 805
 806        return pdp;
 807
 808fail_page_m:
 809        __pdp_fini(pdp);
 810fail_bitmap:
 811        kfree(pdp);
 812
 813        return ERR_PTR(ret);
 814}
 815
 816static void free_pdp(struct i915_address_space *vm,
 817                     struct i915_page_directory_pointer *pdp)
 818{
 819        __pdp_fini(pdp);
 820
 821        if (!use_4lvl(vm))
 822                return;
 823
 824        cleanup_px(vm, pdp);
 825        kfree(pdp);
 826}
 827
 828static void gen8_initialize_pdp(struct i915_address_space *vm,
 829                                struct i915_page_directory_pointer *pdp)
 830{
 831        gen8_ppgtt_pdpe_t scratch_pdpe;
 832
 833        scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
 834
 835        fill_px(vm, pdp, scratch_pdpe);
 836}
 837
 838static void gen8_initialize_pml4(struct i915_address_space *vm,
 839                                 struct i915_pml4 *pml4)
 840{
 841        fill_px(vm, pml4,
 842                gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
 843        memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
 844}
 845
 846/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
 847 * the page table structures, we mark them dirty so that
 848 * context switching/execlist queuing code takes extra steps
 849 * to ensure that tlbs are flushed.
 850 */
 851static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 852{
 853        ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
 854}
 855
 856/* Removes entries from a single page table, releasing it if it's empty.
 857 * Caller can use the return value to update higher-level entries.
 858 */
 859static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
 860                                struct i915_page_table *pt,
 861                                u64 start, u64 length)
 862{
 863        unsigned int num_entries = gen8_pte_count(start, length);
 864        unsigned int pte = gen8_pte_index(start);
 865        unsigned int pte_end = pte + num_entries;
 866        const gen8_pte_t scratch_pte =
 867                gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
 868        gen8_pte_t *vaddr;
 869
 870        GEM_BUG_ON(num_entries > pt->used_ptes);
 871
 872        pt->used_ptes -= num_entries;
 873        if (!pt->used_ptes)
 874                return true;
 875
 876        vaddr = kmap_atomic_px(pt);
 877        while (pte < pte_end)
 878                vaddr[pte++] = scratch_pte;
 879        kunmap_atomic(vaddr);
 880
 881        return false;
 882}
 883
 884static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
 885                               struct i915_page_directory *pd,
 886                               struct i915_page_table *pt,
 887                               unsigned int pde)
 888{
 889        gen8_pde_t *vaddr;
 890
 891        pd->page_table[pde] = pt;
 892
 893        vaddr = kmap_atomic_px(pd);
 894        vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
 895        kunmap_atomic(vaddr);
 896}
 897
 898static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
 899                                struct i915_page_directory *pd,
 900                                u64 start, u64 length)
 901{
 902        struct i915_page_table *pt;
 903        u32 pde;
 904
 905        gen8_for_each_pde(pt, pd, start, length, pde) {
 906                GEM_BUG_ON(pt == vm->scratch_pt);
 907
 908                if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
 909                        continue;
 910
 911                gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
 912                GEM_BUG_ON(!pd->used_pdes);
 913                pd->used_pdes--;
 914
 915                free_pt(vm, pt);
 916        }
 917
 918        return !pd->used_pdes;
 919}
 920
 921static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
 922                                struct i915_page_directory_pointer *pdp,
 923                                struct i915_page_directory *pd,
 924                                unsigned int pdpe)
 925{
 926        gen8_ppgtt_pdpe_t *vaddr;
 927
 928        pdp->page_directory[pdpe] = pd;
 929        if (!use_4lvl(vm))
 930                return;
 931
 932        vaddr = kmap_atomic_px(pdp);
 933        vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
 934        kunmap_atomic(vaddr);
 935}
 936
 937/* Removes entries from a single page dir pointer, releasing it if it's empty.
 938 * Caller can use the return value to update higher-level entries
 939 */
 940static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
 941                                 struct i915_page_directory_pointer *pdp,
 942                                 u64 start, u64 length)
 943{
 944        struct i915_page_directory *pd;
 945        unsigned int pdpe;
 946
 947        gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
 948                GEM_BUG_ON(pd == vm->scratch_pd);
 949
 950                if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
 951                        continue;
 952
 953                gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
 954                GEM_BUG_ON(!pdp->used_pdpes);
 955                pdp->used_pdpes--;
 956
 957                free_pd(vm, pd);
 958        }
 959
 960        return !pdp->used_pdpes;
 961}
 962
 963static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
 964                                  u64 start, u64 length)
 965{
 966        gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
 967}
 968
 969static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
 970                                 struct i915_page_directory_pointer *pdp,
 971                                 unsigned int pml4e)
 972{
 973        gen8_ppgtt_pml4e_t *vaddr;
 974
 975        pml4->pdps[pml4e] = pdp;
 976
 977        vaddr = kmap_atomic_px(pml4);
 978        vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
 979        kunmap_atomic(vaddr);
 980}
 981
 982/* Removes entries from a single pml4.
 983 * This is the top-level structure in 4-level page tables used on gen8+.
 984 * Empty entries are always scratch pml4e.
 985 */
 986static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
 987                                  u64 start, u64 length)
 988{
 989        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 990        struct i915_pml4 *pml4 = &ppgtt->pml4;
 991        struct i915_page_directory_pointer *pdp;
 992        unsigned int pml4e;
 993
 994        GEM_BUG_ON(!use_4lvl(vm));
 995
 996        gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
 997                GEM_BUG_ON(pdp == vm->scratch_pdp);
 998
 999                if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
1000                        continue;
1001
1002                gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1003
1004                free_pdp(vm, pdp);
1005        }
1006}
1007
1008static inline struct sgt_dma {
1009        struct scatterlist *sg;
1010        dma_addr_t dma, max;
1011} sgt_dma(struct i915_vma *vma) {
1012        struct scatterlist *sg = vma->pages->sgl;
1013        dma_addr_t addr = sg_dma_address(sg);
1014        return (struct sgt_dma) { sg, addr, addr + sg->length };
1015}
1016
1017struct gen8_insert_pte {
1018        u16 pml4e;
1019        u16 pdpe;
1020        u16 pde;
1021        u16 pte;
1022};
1023
1024static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
1025{
1026        return (struct gen8_insert_pte) {
1027                 gen8_pml4e_index(start),
1028                 gen8_pdpe_index(start),
1029                 gen8_pde_index(start),
1030                 gen8_pte_index(start),
1031        };
1032}
1033
1034static __always_inline bool
1035gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
1036                              struct i915_page_directory_pointer *pdp,
1037                              struct sgt_dma *iter,
1038                              struct gen8_insert_pte *idx,
1039                              enum i915_cache_level cache_level,
1040                              u32 flags)
1041{
1042        struct i915_page_directory *pd;
1043        const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1044        gen8_pte_t *vaddr;
1045        bool ret;
1046
1047        GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
1048        pd = pdp->page_directory[idx->pdpe];
1049        vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1050        do {
1051                vaddr[idx->pte] = pte_encode | iter->dma;
1052
1053                iter->dma += I915_GTT_PAGE_SIZE;
1054                if (iter->dma >= iter->max) {
1055                        iter->sg = __sg_next(iter->sg);
1056                        if (!iter->sg) {
1057                                ret = false;
1058                                break;
1059                        }
1060
1061                        iter->dma = sg_dma_address(iter->sg);
1062                        iter->max = iter->dma + iter->sg->length;
1063                }
1064
1065                if (++idx->pte == GEN8_PTES) {
1066                        idx->pte = 0;
1067
1068                        if (++idx->pde == I915_PDES) {
1069                                idx->pde = 0;
1070
1071                                /* Limited by sg length for 3lvl */
1072                                if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
1073                                        idx->pdpe = 0;
1074                                        ret = true;
1075                                        break;
1076                                }
1077
1078                                GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
1079                                pd = pdp->page_directory[idx->pdpe];
1080                        }
1081
1082                        kunmap_atomic(vaddr);
1083                        vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1084                }
1085        } while (1);
1086        kunmap_atomic(vaddr);
1087
1088        return ret;
1089}
1090
1091static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1092                                   struct i915_vma *vma,
1093                                   enum i915_cache_level cache_level,
1094                                   u32 flags)
1095{
1096        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1097        struct sgt_dma iter = sgt_dma(vma);
1098        struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1099
1100        gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1101                                      cache_level, flags);
1102
1103        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1104}
1105
1106static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1107                                           struct i915_page_directory_pointer **pdps,
1108                                           struct sgt_dma *iter,
1109                                           enum i915_cache_level cache_level,
1110                                           u32 flags)
1111{
1112        const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1113        u64 start = vma->node.start;
1114        dma_addr_t rem = iter->sg->length;
1115
1116        do {
1117                struct gen8_insert_pte idx = gen8_insert_pte(start);
1118                struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1119                struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1120                unsigned int page_size;
1121                bool maybe_64K = false;
1122                gen8_pte_t encode = pte_encode;
1123                gen8_pte_t *vaddr;
1124                u16 index, max;
1125
1126                if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1127                    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1128                    rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1129                        index = idx.pde;
1130                        max = I915_PDES;
1131                        page_size = I915_GTT_PAGE_SIZE_2M;
1132
1133                        encode |= GEN8_PDE_PS_2M;
1134
1135                        vaddr = kmap_atomic_px(pd);
1136                } else {
1137                        struct i915_page_table *pt = pd->page_table[idx.pde];
1138
1139                        index = idx.pte;
1140                        max = GEN8_PTES;
1141                        page_size = I915_GTT_PAGE_SIZE;
1142
1143                        if (!index &&
1144                            vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1145                            IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1146                            (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1147                             rem >= (max - index) * I915_GTT_PAGE_SIZE))
1148                                maybe_64K = true;
1149
1150                        vaddr = kmap_atomic_px(pt);
1151                }
1152
1153                do {
1154                        GEM_BUG_ON(iter->sg->length < page_size);
1155                        vaddr[index++] = encode | iter->dma;
1156
1157                        start += page_size;
1158                        iter->dma += page_size;
1159                        rem -= page_size;
1160                        if (iter->dma >= iter->max) {
1161                                iter->sg = __sg_next(iter->sg);
1162                                if (!iter->sg)
1163                                        break;
1164
1165                                rem = iter->sg->length;
1166                                iter->dma = sg_dma_address(iter->sg);
1167                                iter->max = iter->dma + rem;
1168
1169                                if (maybe_64K && index < max &&
1170                                    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1171                                      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1172                                       rem >= (max - index) * I915_GTT_PAGE_SIZE)))
1173                                        maybe_64K = false;
1174
1175                                if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1176                                        break;
1177                        }
1178                } while (rem >= page_size && index < max);
1179
1180                kunmap_atomic(vaddr);
1181
1182                /*
1183                 * Is it safe to mark the 2M block as 64K? -- Either we have
1184                 * filled whole page-table with 64K entries, or filled part of
1185                 * it and have reached the end of the sg table and we have
1186                 * enough padding.
1187                 */
1188                if (maybe_64K &&
1189                    (index == max ||
1190                     (i915_vm_has_scratch_64K(vma->vm) &&
1191                      !iter->sg && IS_ALIGNED(vma->node.start +
1192                                              vma->node.size,
1193                                              I915_GTT_PAGE_SIZE_2M)))) {
1194                        vaddr = kmap_atomic_px(pd);
1195                        vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1196                        kunmap_atomic(vaddr);
1197                        page_size = I915_GTT_PAGE_SIZE_64K;
1198
1199                        /*
1200                         * We write all 4K page entries, even when using 64K
1201                         * pages. In order to verify that the HW isn't cheating
1202                         * by using the 4K PTE instead of the 64K PTE, we want
1203                         * to remove all the surplus entries. If the HW skipped
1204                         * the 64K PTE, it will read/write into the scratch page
1205                         * instead - which we detect as missing results during
1206                         * selftests.
1207                         */
1208                        if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1209                                u16 i;
1210
1211                                encode = pte_encode | vma->vm->scratch_page.daddr;
1212                                vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
1213
1214                                for (i = 1; i < index; i += 16)
1215                                        memset64(vaddr + i, encode, 15);
1216
1217                                kunmap_atomic(vaddr);
1218                        }
1219                }
1220
1221                vma->page_sizes.gtt |= page_size;
1222        } while (iter->sg);
1223}
1224
1225static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1226                                   struct i915_vma *vma,
1227                                   enum i915_cache_level cache_level,
1228                                   u32 flags)
1229{
1230        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1231        struct sgt_dma iter = sgt_dma(vma);
1232        struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
1233
1234        if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1235                gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
1236                                               flags);
1237        } else {
1238                struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1239
1240                while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1241                                                     &iter, &idx, cache_level,
1242                                                     flags))
1243                        GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1244
1245                vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1246        }
1247}
1248
1249static void gen8_free_page_tables(struct i915_address_space *vm,
1250                                  struct i915_page_directory *pd)
1251{
1252        int i;
1253
1254        for (i = 0; i < I915_PDES; i++) {
1255                if (pd->page_table[i] != vm->scratch_pt)
1256                        free_pt(vm, pd->page_table[i]);
1257        }
1258}
1259
1260static int gen8_init_scratch(struct i915_address_space *vm)
1261{
1262        int ret;
1263
1264        ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1265        if (ret)
1266                return ret;
1267
1268        vm->scratch_pt = alloc_pt(vm);
1269        if (IS_ERR(vm->scratch_pt)) {
1270                ret = PTR_ERR(vm->scratch_pt);
1271                goto free_scratch_page;
1272        }
1273
1274        vm->scratch_pd = alloc_pd(vm);
1275        if (IS_ERR(vm->scratch_pd)) {
1276                ret = PTR_ERR(vm->scratch_pd);
1277                goto free_pt;
1278        }
1279
1280        if (use_4lvl(vm)) {
1281                vm->scratch_pdp = alloc_pdp(vm);
1282                if (IS_ERR(vm->scratch_pdp)) {
1283                        ret = PTR_ERR(vm->scratch_pdp);
1284                        goto free_pd;
1285                }
1286        }
1287
1288        gen8_initialize_pt(vm, vm->scratch_pt);
1289        gen8_initialize_pd(vm, vm->scratch_pd);
1290        if (use_4lvl(vm))
1291                gen8_initialize_pdp(vm, vm->scratch_pdp);
1292
1293        return 0;
1294
1295free_pd:
1296        free_pd(vm, vm->scratch_pd);
1297free_pt:
1298        free_pt(vm, vm->scratch_pt);
1299free_scratch_page:
1300        cleanup_scratch_page(vm);
1301
1302        return ret;
1303}
1304
1305static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1306{
1307        struct i915_address_space *vm = &ppgtt->vm;
1308        struct drm_i915_private *dev_priv = vm->i915;
1309        enum vgt_g2v_type msg;
1310        int i;
1311
1312        if (use_4lvl(vm)) {
1313                const u64 daddr = px_dma(&ppgtt->pml4);
1314
1315                I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1316                I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1317
1318                msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1319                                VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1320        } else {
1321                for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1322                        const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1323
1324                        I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1325                        I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1326                }
1327
1328                msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1329                                VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1330        }
1331
1332        I915_WRITE(vgtif_reg(g2v_notify), msg);
1333
1334        return 0;
1335}
1336
1337static void gen8_free_scratch(struct i915_address_space *vm)
1338{
1339        if (use_4lvl(vm))
1340                free_pdp(vm, vm->scratch_pdp);
1341        free_pd(vm, vm->scratch_pd);
1342        free_pt(vm, vm->scratch_pt);
1343        cleanup_scratch_page(vm);
1344}
1345
1346static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1347                                    struct i915_page_directory_pointer *pdp)
1348{
1349        const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1350        int i;
1351
1352        for (i = 0; i < pdpes; i++) {
1353                if (pdp->page_directory[i] == vm->scratch_pd)
1354                        continue;
1355
1356                gen8_free_page_tables(vm, pdp->page_directory[i]);
1357                free_pd(vm, pdp->page_directory[i]);
1358        }
1359
1360        free_pdp(vm, pdp);
1361}
1362
1363static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1364{
1365        int i;
1366
1367        for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1368                if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
1369                        continue;
1370
1371                gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
1372        }
1373
1374        cleanup_px(&ppgtt->vm, &ppgtt->pml4);
1375}
1376
1377static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1378{
1379        struct drm_i915_private *dev_priv = vm->i915;
1380        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1381
1382        if (intel_vgpu_active(dev_priv))
1383                gen8_ppgtt_notify_vgt(ppgtt, false);
1384
1385        if (use_4lvl(vm))
1386                gen8_ppgtt_cleanup_4lvl(ppgtt);
1387        else
1388                gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
1389
1390        gen8_free_scratch(vm);
1391}
1392
1393static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1394                               struct i915_page_directory *pd,
1395                               u64 start, u64 length)
1396{
1397        struct i915_page_table *pt;
1398        u64 from = start;
1399        unsigned int pde;
1400
1401        gen8_for_each_pde(pt, pd, start, length, pde) {
1402                int count = gen8_pte_count(start, length);
1403
1404                if (pt == vm->scratch_pt) {
1405                        pd->used_pdes++;
1406
1407                        pt = alloc_pt(vm);
1408                        if (IS_ERR(pt)) {
1409                                pd->used_pdes--;
1410                                goto unwind;
1411                        }
1412
1413                        if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1414                                gen8_initialize_pt(vm, pt);
1415
1416                        gen8_ppgtt_set_pde(vm, pd, pt, pde);
1417                        GEM_BUG_ON(pd->used_pdes > I915_PDES);
1418                }
1419
1420                pt->used_ptes += count;
1421        }
1422        return 0;
1423
1424unwind:
1425        gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1426        return -ENOMEM;
1427}
1428
1429static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1430                                struct i915_page_directory_pointer *pdp,
1431                                u64 start, u64 length)
1432{
1433        struct i915_page_directory *pd;
1434        u64 from = start;
1435        unsigned int pdpe;
1436        int ret;
1437
1438        gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1439                if (pd == vm->scratch_pd) {
1440                        pdp->used_pdpes++;
1441
1442                        pd = alloc_pd(vm);
1443                        if (IS_ERR(pd)) {
1444                                pdp->used_pdpes--;
1445                                goto unwind;
1446                        }
1447
1448                        gen8_initialize_pd(vm, pd);
1449                        gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1450                        GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1451
1452                        mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
1453                }
1454
1455                ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1456                if (unlikely(ret))
1457                        goto unwind_pd;
1458        }
1459
1460        return 0;
1461
1462unwind_pd:
1463        if (!pd->used_pdes) {
1464                gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1465                GEM_BUG_ON(!pdp->used_pdpes);
1466                pdp->used_pdpes--;
1467                free_pd(vm, pd);
1468        }
1469unwind:
1470        gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1471        return -ENOMEM;
1472}
1473
1474static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1475                                 u64 start, u64 length)
1476{
1477        return gen8_ppgtt_alloc_pdp(vm,
1478                                    &i915_vm_to_ppgtt(vm)->pdp, start, length);
1479}
1480
1481static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1482                                 u64 start, u64 length)
1483{
1484        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1485        struct i915_pml4 *pml4 = &ppgtt->pml4;
1486        struct i915_page_directory_pointer *pdp;
1487        u64 from = start;
1488        u32 pml4e;
1489        int ret;
1490
1491        gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1492                if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1493                        pdp = alloc_pdp(vm);
1494                        if (IS_ERR(pdp))
1495                                goto unwind;
1496
1497                        gen8_initialize_pdp(vm, pdp);
1498                        gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1499                }
1500
1501                ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1502                if (unlikely(ret))
1503                        goto unwind_pdp;
1504        }
1505
1506        return 0;
1507
1508unwind_pdp:
1509        if (!pdp->used_pdpes) {
1510                gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1511                free_pdp(vm, pdp);
1512        }
1513unwind:
1514        gen8_ppgtt_clear_4lvl(vm, from, start - from);
1515        return -ENOMEM;
1516}
1517
1518static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1519                          struct i915_page_directory_pointer *pdp,
1520                          u64 start, u64 length,
1521                          gen8_pte_t scratch_pte,
1522                          struct seq_file *m)
1523{
1524        struct i915_address_space *vm = &ppgtt->vm;
1525        struct i915_page_directory *pd;
1526        u32 pdpe;
1527
1528        gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1529                struct i915_page_table *pt;
1530                u64 pd_len = length;
1531                u64 pd_start = start;
1532                u32 pde;
1533
1534                if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
1535                        continue;
1536
1537                seq_printf(m, "\tPDPE #%d\n", pdpe);
1538                gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1539                        u32 pte;
1540                        gen8_pte_t *pt_vaddr;
1541
1542                        if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
1543                                continue;
1544
1545                        pt_vaddr = kmap_atomic_px(pt);
1546                        for (pte = 0; pte < GEN8_PTES; pte += 4) {
1547                                u64 va = (pdpe << GEN8_PDPE_SHIFT |
1548                                          pde << GEN8_PDE_SHIFT |
1549                                          pte << GEN8_PTE_SHIFT);
1550                                int i;
1551                                bool found = false;
1552
1553                                for (i = 0; i < 4; i++)
1554                                        if (pt_vaddr[pte + i] != scratch_pte)
1555                                                found = true;
1556                                if (!found)
1557                                        continue;
1558
1559                                seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1560                                for (i = 0; i < 4; i++) {
1561                                        if (pt_vaddr[pte + i] != scratch_pte)
1562                                                seq_printf(m, " %llx", pt_vaddr[pte + i]);
1563                                        else
1564                                                seq_puts(m, "  SCRATCH ");
1565                                }
1566                                seq_puts(m, "\n");
1567                        }
1568                        kunmap_atomic(pt_vaddr);
1569                }
1570        }
1571}
1572
1573static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1574{
1575        struct i915_address_space *vm = &ppgtt->vm;
1576        const gen8_pte_t scratch_pte =
1577                gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1578        u64 start = 0, length = ppgtt->vm.total;
1579
1580        if (use_4lvl(vm)) {
1581                u64 pml4e;
1582                struct i915_pml4 *pml4 = &ppgtt->pml4;
1583                struct i915_page_directory_pointer *pdp;
1584
1585                gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1586                        if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
1587                                continue;
1588
1589                        seq_printf(m, "    PML4E #%llu\n", pml4e);
1590                        gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
1591                }
1592        } else {
1593                gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
1594        }
1595}
1596
1597static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1598{
1599        struct i915_address_space *vm = &ppgtt->vm;
1600        struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1601        struct i915_page_directory *pd;
1602        u64 start = 0, length = ppgtt->vm.total;
1603        u64 from = start;
1604        unsigned int pdpe;
1605
1606        gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1607                pd = alloc_pd(vm);
1608                if (IS_ERR(pd))
1609                        goto unwind;
1610
1611                gen8_initialize_pd(vm, pd);
1612                gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1613                pdp->used_pdpes++;
1614        }
1615
1616        pdp->used_pdpes++; /* never remove */
1617        return 0;
1618
1619unwind:
1620        start -= from;
1621        gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1622                gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1623                free_pd(vm, pd);
1624        }
1625        pdp->used_pdpes = 0;
1626        return -ENOMEM;
1627}
1628
1629/*
1630 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1631 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1632 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1633 * space.
1634 *
1635 */
1636static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1637{
1638        struct i915_hw_ppgtt *ppgtt;
1639        int err;
1640
1641        ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1642        if (!ppgtt)
1643                return ERR_PTR(-ENOMEM);
1644
1645        kref_init(&ppgtt->ref);
1646
1647        ppgtt->vm.i915 = i915;
1648        ppgtt->vm.dma = &i915->drm.pdev->dev;
1649
1650        ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
1651                1ULL << 48 :
1652                1ULL << 32;
1653
1654        /*
1655         * From bdw, there is support for read-only pages in the PPGTT.
1656         *
1657         * XXX GVT is not honouring the lack of RW in the PTE bits.
1658         */
1659        ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
1660
1661        i915_address_space_init(&ppgtt->vm, i915);
1662
1663        /* There are only few exceptions for gen >=6. chv and bxt.
1664         * And we are not sure about the latter so play safe for now.
1665         */
1666        if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1667                ppgtt->vm.pt_kmap_wc = true;
1668
1669        err = gen8_init_scratch(&ppgtt->vm);
1670        if (err)
1671                goto err_free;
1672
1673        if (use_4lvl(&ppgtt->vm)) {
1674                err = setup_px(&ppgtt->vm, &ppgtt->pml4);
1675                if (err)
1676                        goto err_scratch;
1677
1678                gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
1679
1680                ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1681                ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
1682                ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
1683        } else {
1684                err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
1685                if (err)
1686                        goto err_scratch;
1687
1688                if (intel_vgpu_active(i915)) {
1689                        err = gen8_preallocate_top_level_pdp(ppgtt);
1690                        if (err) {
1691                                __pdp_fini(&ppgtt->pdp);
1692                                goto err_scratch;
1693                        }
1694                }
1695
1696                ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1697                ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
1698                ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
1699        }
1700
1701        if (intel_vgpu_active(i915))
1702                gen8_ppgtt_notify_vgt(ppgtt, true);
1703
1704        ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1705        ppgtt->debug_dump = gen8_dump_ppgtt;
1706
1707        ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
1708        ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
1709        ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
1710        ppgtt->vm.vma_ops.clear_pages = clear_pages;
1711
1712        return ppgtt;
1713
1714err_scratch:
1715        gen8_free_scratch(&ppgtt->vm);
1716err_free:
1717        kfree(ppgtt);
1718        return ERR_PTR(err);
1719}
1720
1721static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1722{
1723        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1724        const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
1725        struct i915_page_table *pt;
1726        u32 pte, pde;
1727
1728        gen6_for_all_pdes(pt, &base->pd, pde) {
1729                gen6_pte_t *vaddr;
1730
1731                if (pt == base->vm.scratch_pt)
1732                        continue;
1733
1734                if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
1735                        u32 expected =
1736                                GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
1737                                GEN6_PDE_VALID;
1738                        u32 pd_entry = readl(ppgtt->pd_addr + pde);
1739
1740                        if (pd_entry != expected)
1741                                seq_printf(m,
1742                                           "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1743                                           pde,
1744                                           pd_entry,
1745                                           expected);
1746
1747                        seq_printf(m, "\tPDE: %x\n", pd_entry);
1748                }
1749
1750                vaddr = kmap_atomic_px(base->pd.page_table[pde]);
1751                for (pte = 0; pte < GEN6_PTES; pte += 4) {
1752                        int i;
1753
1754                        for (i = 0; i < 4; i++)
1755                                if (vaddr[pte + i] != scratch_pte)
1756                                        break;
1757                        if (i == 4)
1758                                continue;
1759
1760                        seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
1761                                   pde, pte,
1762                                   (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
1763                        for (i = 0; i < 4; i++) {
1764                                if (vaddr[pte + i] != scratch_pte)
1765                                        seq_printf(m, " %08x", vaddr[pte + i]);
1766                                else
1767                                        seq_puts(m, "  SCRATCH");
1768                        }
1769                        seq_puts(m, "\n");
1770                }
1771                kunmap_atomic(vaddr);
1772        }
1773}
1774
1775/* Write pde (index) from the page directory @pd to the page table @pt */
1776static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
1777                                  const unsigned int pde,
1778                                  const struct i915_page_table *pt)
1779{
1780        /* Caller needs to make sure the write completes if necessary */
1781        iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1782                  ppgtt->pd_addr + pde);
1783}
1784
1785static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1786{
1787        struct intel_engine_cs *engine;
1788        enum intel_engine_id id;
1789
1790        for_each_engine(engine, dev_priv, id) {
1791                u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1792                                 GEN8_GFX_PPGTT_48B : 0;
1793                I915_WRITE(RING_MODE_GEN7(engine),
1794                           _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1795        }
1796}
1797
1798static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1799{
1800        struct intel_engine_cs *engine;
1801        u32 ecochk, ecobits;
1802        enum intel_engine_id id;
1803
1804        ecobits = I915_READ(GAC_ECO_BITS);
1805        I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1806
1807        ecochk = I915_READ(GAM_ECOCHK);
1808        if (IS_HASWELL(dev_priv)) {
1809                ecochk |= ECOCHK_PPGTT_WB_HSW;
1810        } else {
1811                ecochk |= ECOCHK_PPGTT_LLC_IVB;
1812                ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1813        }
1814        I915_WRITE(GAM_ECOCHK, ecochk);
1815
1816        for_each_engine(engine, dev_priv, id) {
1817                /* GFX_MODE is per-ring on gen7+ */
1818                I915_WRITE(RING_MODE_GEN7(engine),
1819                           _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1820        }
1821}
1822
1823static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1824{
1825        u32 ecochk, gab_ctl, ecobits;
1826
1827        ecobits = I915_READ(GAC_ECO_BITS);
1828        I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1829                   ECOBITS_PPGTT_CACHE64B);
1830
1831        gab_ctl = I915_READ(GAB_CTL);
1832        I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1833
1834        ecochk = I915_READ(GAM_ECOCHK);
1835        I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1836
1837        I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1838}
1839
1840/* PPGTT support for Sandybdrige/Gen6 and later */
1841static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1842                                   u64 start, u64 length)
1843{
1844        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1845        unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1846        unsigned int pde = first_entry / GEN6_PTES;
1847        unsigned int pte = first_entry % GEN6_PTES;
1848        unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1849        const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
1850
1851        while (num_entries) {
1852                struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
1853                const unsigned int end = min(pte + num_entries, GEN6_PTES);
1854                const unsigned int count = end - pte;
1855                gen6_pte_t *vaddr;
1856
1857                GEM_BUG_ON(pt == vm->scratch_pt);
1858
1859                num_entries -= count;
1860
1861                GEM_BUG_ON(count > pt->used_ptes);
1862                pt->used_ptes -= count;
1863                if (!pt->used_ptes)
1864                        ppgtt->scan_for_unused_pt = true;
1865
1866                /*
1867                 * Note that the hw doesn't support removing PDE on the fly
1868                 * (they are cached inside the context with no means to
1869                 * invalidate the cache), so we can only reset the PTE
1870                 * entries back to scratch.
1871                 */
1872
1873                vaddr = kmap_atomic_px(pt);
1874                do {
1875                        vaddr[pte++] = scratch_pte;
1876                } while (pte < end);
1877                kunmap_atomic(vaddr);
1878
1879                pte = 0;
1880        }
1881}
1882
1883static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1884                                      struct i915_vma *vma,
1885                                      enum i915_cache_level cache_level,
1886                                      u32 flags)
1887{
1888        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1889        unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1890        unsigned act_pt = first_entry / GEN6_PTES;
1891        unsigned act_pte = first_entry % GEN6_PTES;
1892        const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1893        struct sgt_dma iter = sgt_dma(vma);
1894        gen6_pte_t *vaddr;
1895
1896        GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
1897
1898        vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1899        do {
1900                vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1901
1902                iter.dma += I915_GTT_PAGE_SIZE;
1903                if (iter.dma == iter.max) {
1904                        iter.sg = __sg_next(iter.sg);
1905                        if (!iter.sg)
1906                                break;
1907
1908                        iter.dma = sg_dma_address(iter.sg);
1909                        iter.max = iter.dma + iter.sg->length;
1910                }
1911
1912                if (++act_pte == GEN6_PTES) {
1913                        kunmap_atomic(vaddr);
1914                        vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1915                        act_pte = 0;
1916                }
1917        } while (1);
1918        kunmap_atomic(vaddr);
1919
1920        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1921}
1922
1923static int gen6_alloc_va_range(struct i915_address_space *vm,
1924                               u64 start, u64 length)
1925{
1926        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1927        struct i915_page_table *pt;
1928        u64 from = start;
1929        unsigned int pde;
1930        bool flush = false;
1931
1932        gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
1933                const unsigned int count = gen6_pte_count(start, length);
1934
1935                if (pt == vm->scratch_pt) {
1936                        pt = alloc_pt(vm);
1937                        if (IS_ERR(pt))
1938                                goto unwind_out;
1939
1940                        gen6_initialize_pt(ppgtt, pt);
1941                        ppgtt->base.pd.page_table[pde] = pt;
1942
1943                        if (i915_vma_is_bound(ppgtt->vma,
1944                                              I915_VMA_GLOBAL_BIND)) {
1945                                gen6_write_pde(ppgtt, pde, pt);
1946                                flush = true;
1947                        }
1948
1949                        GEM_BUG_ON(pt->used_ptes);
1950                }
1951
1952                pt->used_ptes += count;
1953        }
1954
1955        if (flush) {
1956                mark_tlbs_dirty(&ppgtt->base);
1957                gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1958        }
1959
1960        return 0;
1961
1962unwind_out:
1963        gen6_ppgtt_clear_range(vm, from, start - from);
1964        return -ENOMEM;
1965}
1966
1967static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1968{
1969        struct i915_address_space * const vm = &ppgtt->base.vm;
1970        struct i915_page_table *unused;
1971        u32 pde;
1972        int ret;
1973
1974        ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1975        if (ret)
1976                return ret;
1977
1978        ppgtt->scratch_pte =
1979                vm->pte_encode(vm->scratch_page.daddr,
1980                               I915_CACHE_NONE, PTE_READ_ONLY);
1981
1982        vm->scratch_pt = alloc_pt(vm);
1983        if (IS_ERR(vm->scratch_pt)) {
1984                cleanup_scratch_page(vm);
1985                return PTR_ERR(vm->scratch_pt);
1986        }
1987
1988        gen6_initialize_pt(ppgtt, vm->scratch_pt);
1989        gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
1990                ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
1991
1992        return 0;
1993}
1994
1995static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
1996{
1997        free_pt(vm, vm->scratch_pt);
1998        cleanup_scratch_page(vm);
1999}
2000
2001static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
2002{
2003        struct i915_page_table *pt;
2004        u32 pde;
2005
2006        gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
2007                if (pt != ppgtt->base.vm.scratch_pt)
2008                        free_pt(&ppgtt->base.vm, pt);
2009}
2010
2011static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
2012{
2013        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
2014
2015        i915_vma_destroy(ppgtt->vma);
2016
2017        gen6_ppgtt_free_pd(ppgtt);
2018        gen6_ppgtt_free_scratch(vm);
2019}
2020
2021static int pd_vma_set_pages(struct i915_vma *vma)
2022{
2023        vma->pages = ERR_PTR(-ENODEV);
2024        return 0;
2025}
2026
2027static void pd_vma_clear_pages(struct i915_vma *vma)
2028{
2029        GEM_BUG_ON(!vma->pages);
2030
2031        vma->pages = NULL;
2032}
2033
2034static int pd_vma_bind(struct i915_vma *vma,
2035                       enum i915_cache_level cache_level,
2036                       u32 unused)
2037{
2038        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
2039        struct gen6_hw_ppgtt *ppgtt = vma->private;
2040        u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
2041        struct i915_page_table *pt;
2042        unsigned int pde;
2043
2044        ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
2045        ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
2046
2047        gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
2048                gen6_write_pde(ppgtt, pde, pt);
2049
2050        mark_tlbs_dirty(&ppgtt->base);
2051        gen6_ggtt_invalidate(ppgtt->base.vm.i915);
2052
2053        return 0;
2054}
2055
2056static void pd_vma_unbind(struct i915_vma *vma)
2057{
2058        struct gen6_hw_ppgtt *ppgtt = vma->private;
2059        struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
2060        struct i915_page_table *pt;
2061        unsigned int pde;
2062
2063        if (!ppgtt->scan_for_unused_pt)
2064                return;
2065
2066        /* Free all no longer used page tables */
2067        gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
2068                if (pt->used_ptes || pt == scratch_pt)
2069                        continue;
2070
2071                free_pt(&ppgtt->base.vm, pt);
2072                ppgtt->base.pd.page_table[pde] = scratch_pt;
2073        }
2074
2075        ppgtt->scan_for_unused_pt = false;
2076}
2077
2078static const struct i915_vma_ops pd_vma_ops = {
2079        .set_pages = pd_vma_set_pages,
2080        .clear_pages = pd_vma_clear_pages,
2081        .bind_vma = pd_vma_bind,
2082        .unbind_vma = pd_vma_unbind,
2083};
2084
2085static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
2086{
2087        struct drm_i915_private *i915 = ppgtt->base.vm.i915;
2088        struct i915_ggtt *ggtt = &i915->ggtt;
2089        struct i915_vma *vma;
2090
2091        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
2092        GEM_BUG_ON(size > ggtt->vm.total);
2093
2094        vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
2095        if (!vma)
2096                return ERR_PTR(-ENOMEM);
2097
2098        init_request_active(&vma->last_fence, NULL);
2099
2100        vma->vm = &ggtt->vm;
2101        vma->ops = &pd_vma_ops;
2102        vma->private = ppgtt;
2103
2104        vma->active = RB_ROOT;
2105
2106        vma->size = size;
2107        vma->fence_size = size;
2108        vma->flags = I915_VMA_GGTT;
2109        vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
2110
2111        INIT_LIST_HEAD(&vma->obj_link);
2112        list_add(&vma->vm_link, &vma->vm->unbound_list);
2113
2114        return vma;
2115}
2116
2117int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2118{
2119        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2120
2121        /*
2122         * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
2123         * which will be pinned into every active context.
2124         * (When vma->pin_count becomes atomic, I expect we will naturally
2125         * need a larger, unpacked, type and kill this redundancy.)
2126         */
2127        if (ppgtt->pin_count++)
2128                return 0;
2129
2130        /*
2131         * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2132         * allocator works in address space sizes, so it's multiplied by page
2133         * size. We allocate at the top of the GTT to avoid fragmentation.
2134         */
2135        return i915_vma_pin(ppgtt->vma,
2136                            0, GEN6_PD_ALIGN,
2137                            PIN_GLOBAL | PIN_HIGH);
2138}
2139
2140void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
2141{
2142        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2143
2144        GEM_BUG_ON(!ppgtt->pin_count);
2145        if (--ppgtt->pin_count)
2146                return;
2147
2148        i915_vma_unpin(ppgtt->vma);
2149}
2150
2151static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
2152{
2153        struct i915_ggtt * const ggtt = &i915->ggtt;
2154        struct gen6_hw_ppgtt *ppgtt;
2155        int err;
2156
2157        ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2158        if (!ppgtt)
2159                return ERR_PTR(-ENOMEM);
2160
2161        kref_init(&ppgtt->base.ref);
2162
2163        ppgtt->base.vm.i915 = i915;
2164        ppgtt->base.vm.dma = &i915->drm.pdev->dev;
2165
2166        ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
2167
2168        i915_address_space_init(&ppgtt->base.vm, i915);
2169
2170        ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
2171        ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
2172        ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
2173        ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
2174        ppgtt->base.debug_dump = gen6_dump_ppgtt;
2175
2176        ppgtt->base.vm.vma_ops.bind_vma    = ppgtt_bind_vma;
2177        ppgtt->base.vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
2178        ppgtt->base.vm.vma_ops.set_pages   = ppgtt_set_pages;
2179        ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
2180
2181        ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
2182
2183        err = gen6_ppgtt_init_scratch(ppgtt);
2184        if (err)
2185                goto err_free;
2186
2187        ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
2188        if (IS_ERR(ppgtt->vma)) {
2189                err = PTR_ERR(ppgtt->vma);
2190                goto err_scratch;
2191        }
2192
2193        return &ppgtt->base;
2194
2195err_scratch:
2196        gen6_ppgtt_free_scratch(&ppgtt->base.vm);
2197err_free:
2198        kfree(ppgtt);
2199        return ERR_PTR(err);
2200}
2201
2202static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2203{
2204        /* This function is for gtt related workarounds. This function is
2205         * called on driver load and after a GPU reset, so you can place
2206         * workarounds here even if they get overwritten by GPU reset.
2207         */
2208        /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2209        if (IS_BROADWELL(dev_priv))
2210                I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2211        else if (IS_CHERRYVIEW(dev_priv))
2212                I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2213        else if (IS_GEN9_LP(dev_priv))
2214                I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2215        else if (INTEL_GEN(dev_priv) >= 9)
2216                I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2217
2218        /*
2219         * To support 64K PTEs we need to first enable the use of the
2220         * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2221         * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2222         * shouldn't be needed after GEN10.
2223         *
2224         * 64K pages were first introduced from BDW+, although technically they
2225         * only *work* from gen9+. For pre-BDW we instead have the option for
2226         * 32K pages, but we don't currently have any support for it in our
2227         * driver.
2228         */
2229        if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2230            INTEL_GEN(dev_priv) <= 10)
2231                I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2232                           I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2233                           GAMW_ECO_ENABLE_64K_IPS_FIELD);
2234}
2235
2236int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2237{
2238        gtt_write_workarounds(dev_priv);
2239
2240        /* In the case of execlists, PPGTT is enabled by the context descriptor
2241         * and the PDPs are contained within the context itself.  We don't
2242         * need to do anything here. */
2243        if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
2244                return 0;
2245
2246        if (!USES_PPGTT(dev_priv))
2247                return 0;
2248
2249        if (IS_GEN6(dev_priv))
2250                gen6_ppgtt_enable(dev_priv);
2251        else if (IS_GEN7(dev_priv))
2252                gen7_ppgtt_enable(dev_priv);
2253        else if (INTEL_GEN(dev_priv) >= 8)
2254                gen8_ppgtt_enable(dev_priv);
2255        else
2256                MISSING_CASE(INTEL_GEN(dev_priv));
2257
2258        return 0;
2259}
2260
2261static struct i915_hw_ppgtt *
2262__hw_ppgtt_create(struct drm_i915_private *i915)
2263{
2264        if (INTEL_GEN(i915) < 8)
2265                return gen6_ppgtt_create(i915);
2266        else
2267                return gen8_ppgtt_create(i915);
2268}
2269
2270struct i915_hw_ppgtt *
2271i915_ppgtt_create(struct drm_i915_private *i915,
2272                  struct drm_i915_file_private *fpriv)
2273{
2274        struct i915_hw_ppgtt *ppgtt;
2275
2276        ppgtt = __hw_ppgtt_create(i915);
2277        if (IS_ERR(ppgtt))
2278                return ppgtt;
2279
2280        ppgtt->vm.file = fpriv;
2281
2282        trace_i915_ppgtt_create(&ppgtt->vm);
2283
2284        return ppgtt;
2285}
2286
2287void i915_ppgtt_close(struct i915_address_space *vm)
2288{
2289        GEM_BUG_ON(vm->closed);
2290        vm->closed = true;
2291}
2292
2293static void ppgtt_destroy_vma(struct i915_address_space *vm)
2294{
2295        struct list_head *phases[] = {
2296                &vm->active_list,
2297                &vm->inactive_list,
2298                &vm->unbound_list,
2299                NULL,
2300        }, **phase;
2301
2302        vm->closed = true;
2303        for (phase = phases; *phase; phase++) {
2304                struct i915_vma *vma, *vn;
2305
2306                list_for_each_entry_safe(vma, vn, *phase, vm_link)
2307                        i915_vma_destroy(vma);
2308        }
2309}
2310
2311void i915_ppgtt_release(struct kref *kref)
2312{
2313        struct i915_hw_ppgtt *ppgtt =
2314                container_of(kref, struct i915_hw_ppgtt, ref);
2315
2316        trace_i915_ppgtt_release(&ppgtt->vm);
2317
2318        ppgtt_destroy_vma(&ppgtt->vm);
2319
2320        GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
2321        GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
2322        GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
2323
2324        ppgtt->vm.cleanup(&ppgtt->vm);
2325        i915_address_space_fini(&ppgtt->vm);
2326        kfree(ppgtt);
2327}
2328
2329/* Certain Gen5 chipsets require require idling the GPU before
2330 * unmapping anything from the GTT when VT-d is enabled.
2331 */
2332static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2333{
2334        /* Query intel_iommu to see if we need the workaround. Presumably that
2335         * was loaded first.
2336         */
2337        return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
2338}
2339
2340static void gen6_check_faults(struct drm_i915_private *dev_priv)
2341{
2342        struct intel_engine_cs *engine;
2343        enum intel_engine_id id;
2344        u32 fault;
2345
2346        for_each_engine(engine, dev_priv, id) {
2347                fault = I915_READ(RING_FAULT_REG(engine));
2348                if (fault & RING_FAULT_VALID) {
2349                        DRM_DEBUG_DRIVER("Unexpected fault\n"
2350                                         "\tAddr: 0x%08lx\n"
2351                                         "\tAddress space: %s\n"
2352                                         "\tSource ID: %d\n"
2353                                         "\tType: %d\n",
2354                                         fault & PAGE_MASK,
2355                                         fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2356                                         RING_FAULT_SRCID(fault),
2357                                         RING_FAULT_FAULT_TYPE(fault));
2358                }
2359        }
2360}
2361
2362static void gen8_check_faults(struct drm_i915_private *dev_priv)
2363{
2364        u32 fault = I915_READ(GEN8_RING_FAULT_REG);
2365
2366        if (fault & RING_FAULT_VALID) {
2367                u32 fault_data0, fault_data1;
2368                u64 fault_addr;
2369
2370                fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
2371                fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
2372                fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
2373                             ((u64)fault_data0 << 12);
2374
2375                DRM_DEBUG_DRIVER("Unexpected fault\n"
2376                                 "\tAddr: 0x%08x_%08x\n"
2377                                 "\tAddress space: %s\n"
2378                                 "\tEngine ID: %d\n"
2379                                 "\tSource ID: %d\n"
2380                                 "\tType: %d\n",
2381                                 upper_32_bits(fault_addr),
2382                                 lower_32_bits(fault_addr),
2383                                 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
2384                                 GEN8_RING_FAULT_ENGINE_ID(fault),
2385                                 RING_FAULT_SRCID(fault),
2386                                 RING_FAULT_FAULT_TYPE(fault));
2387        }
2388}
2389
2390void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2391{
2392        /* From GEN8 onwards we only have one 'All Engine Fault Register' */
2393        if (INTEL_GEN(dev_priv) >= 8)
2394                gen8_check_faults(dev_priv);
2395        else if (INTEL_GEN(dev_priv) >= 6)
2396                gen6_check_faults(dev_priv);
2397        else
2398                return;
2399
2400        i915_clear_error_registers(dev_priv);
2401}
2402
2403void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2404{
2405        struct i915_ggtt *ggtt = &dev_priv->ggtt;
2406
2407        /* Don't bother messing with faults pre GEN6 as we have little
2408         * documentation supporting that it's a good idea.
2409         */
2410        if (INTEL_GEN(dev_priv) < 6)
2411                return;
2412
2413        i915_check_and_clear_faults(dev_priv);
2414
2415        ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2416
2417        i915_ggtt_invalidate(dev_priv);
2418}
2419
2420int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2421                               struct sg_table *pages)
2422{
2423        do {
2424                if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2425                                     pages->sgl, pages->nents,
2426                                     PCI_DMA_BIDIRECTIONAL,
2427                                     DMA_ATTR_NO_WARN))
2428                        return 0;
2429
2430                /* If the DMA remap fails, one cause can be that we have
2431                 * too many objects pinned in a small remapping table,
2432                 * such as swiotlb. Incrementally purge all other objects and
2433                 * try again - if there are no more pages to remove from
2434                 * the DMA remapper, i915_gem_shrink will return 0.
2435                 */
2436                GEM_BUG_ON(obj->mm.pages == pages);
2437        } while (i915_gem_shrink(to_i915(obj->base.dev),
2438                                 obj->base.size >> PAGE_SHIFT, NULL,
2439                                 I915_SHRINK_BOUND |
2440                                 I915_SHRINK_UNBOUND |
2441                                 I915_SHRINK_ACTIVE));
2442
2443        return -ENOSPC;
2444}
2445
2446static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2447{
2448        writeq(pte, addr);
2449}
2450
2451static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2452                                  dma_addr_t addr,
2453                                  u64 offset,
2454                                  enum i915_cache_level level,
2455                                  u32 unused)
2456{
2457        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2458        gen8_pte_t __iomem *pte =
2459                (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2460
2461        gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2462
2463        ggtt->invalidate(vm->i915);
2464}
2465
2466static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2467                                     struct i915_vma *vma,
2468                                     enum i915_cache_level level,
2469                                     u32 flags)
2470{
2471        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2472        struct sgt_iter sgt_iter;
2473        gen8_pte_t __iomem *gtt_entries;
2474        const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2475        dma_addr_t addr;
2476
2477        /*
2478         * Note that we ignore PTE_READ_ONLY here. The caller must be careful
2479         * not to allow the user to override access to a read only page.
2480         */
2481
2482        gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2483        gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2484        for_each_sgt_dma(addr, sgt_iter, vma->pages)
2485                gen8_set_pte(gtt_entries++, pte_encode | addr);
2486
2487        /*
2488         * We want to flush the TLBs only after we're certain all the PTE
2489         * updates have finished.
2490         */
2491        ggtt->invalidate(vm->i915);
2492}
2493
2494static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2495                                  dma_addr_t addr,
2496                                  u64 offset,
2497                                  enum i915_cache_level level,
2498                                  u32 flags)
2499{
2500        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2501        gen6_pte_t __iomem *pte =
2502                (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2503
2504        iowrite32(vm->pte_encode(addr, level, flags), pte);
2505
2506        ggtt->invalidate(vm->i915);
2507}
2508
2509/*
2510 * Binds an object into the global gtt with the specified cache level. The object
2511 * will be accessible to the GPU via commands whose operands reference offsets
2512 * within the global GTT as well as accessible by the GPU through the GMADR
2513 * mapped BAR (dev_priv->mm.gtt->gtt).
2514 */
2515static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2516                                     struct i915_vma *vma,
2517                                     enum i915_cache_level level,
2518                                     u32 flags)
2519{
2520        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2521        gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2522        unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2523        struct sgt_iter iter;
2524        dma_addr_t addr;
2525        for_each_sgt_dma(addr, iter, vma->pages)
2526                iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2527
2528        /*
2529         * We want to flush the TLBs only after we're certain all the PTE
2530         * updates have finished.
2531         */
2532        ggtt->invalidate(vm->i915);
2533}
2534
2535static void nop_clear_range(struct i915_address_space *vm,
2536                            u64 start, u64 length)
2537{
2538}
2539
2540static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2541                                  u64 start, u64 length)
2542{
2543        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2544        unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2545        unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2546        const gen8_pte_t scratch_pte =
2547                gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
2548        gen8_pte_t __iomem *gtt_base =
2549                (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2550        const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2551        int i;
2552
2553        if (WARN(num_entries > max_entries,
2554                 "First entry = %d; Num entries = %d (max=%d)\n",
2555                 first_entry, num_entries, max_entries))
2556                num_entries = max_entries;
2557
2558        for (i = 0; i < num_entries; i++)
2559                gen8_set_pte(&gtt_base[i], scratch_pte);
2560}
2561
2562static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2563{
2564        struct drm_i915_private *dev_priv = vm->i915;
2565
2566        /*
2567         * Make sure the internal GAM fifo has been cleared of all GTT
2568         * writes before exiting stop_machine(). This guarantees that
2569         * any aperture accesses waiting to start in another process
2570         * cannot back up behind the GTT writes causing a hang.
2571         * The register can be any arbitrary GAM register.
2572         */
2573        POSTING_READ(GFX_FLSH_CNTL_GEN6);
2574}
2575
2576struct insert_page {
2577        struct i915_address_space *vm;
2578        dma_addr_t addr;
2579        u64 offset;
2580        enum i915_cache_level level;
2581};
2582
2583static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2584{
2585        struct insert_page *arg = _arg;
2586
2587        gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2588        bxt_vtd_ggtt_wa(arg->vm);
2589
2590        return 0;
2591}
2592
2593static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2594                                          dma_addr_t addr,
2595                                          u64 offset,
2596                                          enum i915_cache_level level,
2597                                          u32 unused)
2598{
2599        struct insert_page arg = { vm, addr, offset, level };
2600
2601        stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2602}
2603
2604struct insert_entries {
2605        struct i915_address_space *vm;
2606        struct i915_vma *vma;
2607        enum i915_cache_level level;
2608        u32 flags;
2609};
2610
2611static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2612{
2613        struct insert_entries *arg = _arg;
2614
2615        gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2616        bxt_vtd_ggtt_wa(arg->vm);
2617
2618        return 0;
2619}
2620
2621static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2622                                             struct i915_vma *vma,
2623                                             enum i915_cache_level level,
2624                                             u32 flags)
2625{
2626        struct insert_entries arg = { vm, vma, level, flags };
2627
2628        stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2629}
2630
2631struct clear_range {
2632        struct i915_address_space *vm;
2633        u64 start;
2634        u64 length;
2635};
2636
2637static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2638{
2639        struct clear_range *arg = _arg;
2640
2641        gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2642        bxt_vtd_ggtt_wa(arg->vm);
2643
2644        return 0;
2645}
2646
2647static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2648                                          u64 start,
2649                                          u64 length)
2650{
2651        struct clear_range arg = { vm, start, length };
2652
2653        stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2654}
2655
2656static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2657                                  u64 start, u64 length)
2658{
2659        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2660        unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2661        unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2662        gen6_pte_t scratch_pte, __iomem *gtt_base =
2663                (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2664        const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2665        int i;
2666
2667        if (WARN(num_entries > max_entries,
2668                 "First entry = %d; Num entries = %d (max=%d)\n",
2669                 first_entry, num_entries, max_entries))
2670                num_entries = max_entries;
2671
2672        scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2673                                     I915_CACHE_LLC, 0);
2674
2675        for (i = 0; i < num_entries; i++)
2676                iowrite32(scratch_pte, &gtt_base[i]);
2677}
2678
2679static void i915_ggtt_insert_page(struct i915_address_space *vm,
2680                                  dma_addr_t addr,
2681                                  u64 offset,
2682                                  enum i915_cache_level cache_level,
2683                                  u32 unused)
2684{
2685        unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2686                AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2687
2688        intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2689}
2690
2691static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2692                                     struct i915_vma *vma,
2693                                     enum i915_cache_level cache_level,
2694                                     u32 unused)
2695{
2696        unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2697                AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2698
2699        intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2700                                    flags);
2701}
2702
2703static void i915_ggtt_clear_range(struct i915_address_space *vm,
2704                                  u64 start, u64 length)
2705{
2706        intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2707}
2708
2709static int ggtt_bind_vma(struct i915_vma *vma,
2710                         enum i915_cache_level cache_level,
2711                         u32 flags)
2712{
2713        struct drm_i915_private *i915 = vma->vm->i915;
2714        struct drm_i915_gem_object *obj = vma->obj;
2715        u32 pte_flags;
2716
2717        /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2718        pte_flags = 0;
2719        if (i915_gem_object_is_readonly(obj))
2720                pte_flags |= PTE_READ_ONLY;
2721
2722        intel_runtime_pm_get(i915);
2723        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2724        intel_runtime_pm_put(i915);
2725
2726        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2727
2728        /*
2729         * Without aliasing PPGTT there's no difference between
2730         * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2731         * upgrade to both bound if we bind either to avoid double-binding.
2732         */
2733        vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2734
2735        return 0;
2736}
2737
2738static void ggtt_unbind_vma(struct i915_vma *vma)
2739{
2740        struct drm_i915_private *i915 = vma->vm->i915;
2741
2742        intel_runtime_pm_get(i915);
2743        vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2744        intel_runtime_pm_put(i915);
2745}
2746
2747static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2748                                 enum i915_cache_level cache_level,
2749                                 u32 flags)
2750{
2751        struct drm_i915_private *i915 = vma->vm->i915;
2752        u32 pte_flags;
2753        int ret;
2754
2755        /* Currently applicable only to VLV */
2756        pte_flags = 0;
2757        if (i915_gem_object_is_readonly(vma->obj))
2758                pte_flags |= PTE_READ_ONLY;
2759
2760        if (flags & I915_VMA_LOCAL_BIND) {
2761                struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2762
2763                if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2764                        ret = appgtt->vm.allocate_va_range(&appgtt->vm,
2765                                                           vma->node.start,
2766                                                           vma->size);
2767                        if (ret)
2768                                return ret;
2769                }
2770
2771                appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
2772                                          pte_flags);
2773        }
2774
2775        if (flags & I915_VMA_GLOBAL_BIND) {
2776                intel_runtime_pm_get(i915);
2777                vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2778                intel_runtime_pm_put(i915);
2779        }
2780
2781        return 0;
2782}
2783
2784static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2785{
2786        struct drm_i915_private *i915 = vma->vm->i915;
2787
2788        if (vma->flags & I915_VMA_GLOBAL_BIND) {
2789                intel_runtime_pm_get(i915);
2790                vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2791                intel_runtime_pm_put(i915);
2792        }
2793
2794        if (vma->flags & I915_VMA_LOCAL_BIND) {
2795                struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
2796
2797                vm->clear_range(vm, vma->node.start, vma->size);
2798        }
2799}
2800
2801void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2802                               struct sg_table *pages)
2803{
2804        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2805        struct device *kdev = &dev_priv->drm.pdev->dev;
2806        struct i915_ggtt *ggtt = &dev_priv->ggtt;
2807
2808        if (unlikely(ggtt->do_idle_maps)) {
2809                if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2810                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2811                        /* Wait a bit, in hopes it avoids the hang */
2812                        udelay(10);
2813                }
2814        }
2815
2816        dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2817}
2818
2819static int ggtt_set_pages(struct i915_vma *vma)
2820{
2821        int ret;
2822
2823        GEM_BUG_ON(vma->pages);
2824
2825        ret = i915_get_ggtt_vma_pages(vma);
2826        if (ret)
2827                return ret;
2828
2829        vma->page_sizes = vma->obj->mm.page_sizes;
2830
2831        return 0;
2832}
2833
2834static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2835                                  unsigned long color,
2836                                  u64 *start,
2837                                  u64 *end)
2838{
2839        if (node->allocated && node->color != color)
2840                *start += I915_GTT_PAGE_SIZE;
2841
2842        /* Also leave a space between the unallocated reserved node after the
2843         * GTT and any objects within the GTT, i.e. we use the color adjustment
2844         * to insert a guard page to prevent prefetches crossing over the
2845         * GTT boundary.
2846         */
2847        node = list_next_entry(node, node_list);
2848        if (node->color != color)
2849                *end -= I915_GTT_PAGE_SIZE;
2850}
2851
2852int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2853{
2854        struct i915_ggtt *ggtt = &i915->ggtt;
2855        struct i915_hw_ppgtt *ppgtt;
2856        int err;
2857
2858        ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
2859        if (IS_ERR(ppgtt))
2860                return PTR_ERR(ppgtt);
2861
2862        if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2863                err = -ENODEV;
2864                goto err_ppgtt;
2865        }
2866
2867        /*
2868         * Note we only pre-allocate as far as the end of the global
2869         * GTT. On 48b / 4-level page-tables, the difference is very,
2870         * very significant! We have to preallocate as GVT/vgpu does
2871         * not like the page directory disappearing.
2872         */
2873        err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2874        if (err)
2875                goto err_ppgtt;
2876
2877        i915->mm.aliasing_ppgtt = ppgtt;
2878
2879        GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2880        ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2881
2882        GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2883        ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2884
2885        return 0;
2886
2887err_ppgtt:
2888        i915_ppgtt_put(ppgtt);
2889        return err;
2890}
2891
2892void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2893{
2894        struct i915_ggtt *ggtt = &i915->ggtt;
2895        struct i915_hw_ppgtt *ppgtt;
2896
2897        ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2898        if (!ppgtt)
2899                return;
2900
2901        i915_ppgtt_put(ppgtt);
2902
2903        ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
2904        ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2905}
2906
2907int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2908{
2909        /* Let GEM Manage all of the aperture.
2910         *
2911         * However, leave one page at the end still bound to the scratch page.
2912         * There are a number of places where the hardware apparently prefetches
2913         * past the end of the object, and we've seen multiple hangs with the
2914         * GPU head pointer stuck in a batchbuffer bound at the last page of the
2915         * aperture.  One page should be enough to keep any prefetching inside
2916         * of the aperture.
2917         */
2918        struct i915_ggtt *ggtt = &dev_priv->ggtt;
2919        unsigned long hole_start, hole_end;
2920        struct drm_mm_node *entry;
2921        int ret;
2922
2923        /*
2924         * GuC requires all resources that we're sharing with it to be placed in
2925         * non-WOPCM memory. If GuC is not present or not in use we still need a
2926         * small bias as ring wraparound at offset 0 sometimes hangs. No idea
2927         * why.
2928         */
2929        ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
2930                               intel_guc_reserved_gtt_size(&dev_priv->guc));
2931
2932        ret = intel_vgt_balloon(dev_priv);
2933        if (ret)
2934                return ret;
2935
2936        /* Reserve a mappable slot for our lockless error capture */
2937        ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2938                                          PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2939                                          0, ggtt->mappable_end,
2940                                          DRM_MM_INSERT_LOW);
2941        if (ret)
2942                return ret;
2943
2944        /* Clear any non-preallocated blocks */
2945        drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2946                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2947                              hole_start, hole_end);
2948                ggtt->vm.clear_range(&ggtt->vm, hole_start,
2949                                     hole_end - hole_start);
2950        }
2951
2952        /* And finally clear the reserved guard page */
2953        ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2954
2955        if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
2956                ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2957                if (ret)
2958                        goto err;
2959        }
2960
2961        return 0;
2962
2963err:
2964        drm_mm_remove_node(&ggtt->error_capture);
2965        return ret;
2966}
2967
2968/**
2969 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2970 * @dev_priv: i915 device
2971 */
2972void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2973{
2974        struct i915_ggtt *ggtt = &dev_priv->ggtt;
2975        struct i915_vma *vma, *vn;
2976        struct pagevec *pvec;
2977
2978        ggtt->vm.closed = true;
2979
2980        mutex_lock(&dev_priv->drm.struct_mutex);
2981        i915_gem_fini_aliasing_ppgtt(dev_priv);
2982
2983        GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
2984        list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
2985                WARN_ON(i915_vma_unbind(vma));
2986
2987        if (drm_mm_node_allocated(&ggtt->error_capture))
2988                drm_mm_remove_node(&ggtt->error_capture);
2989
2990        if (drm_mm_initialized(&ggtt->vm.mm)) {
2991                intel_vgt_deballoon(dev_priv);
2992                i915_address_space_fini(&ggtt->vm);
2993        }
2994
2995        ggtt->vm.cleanup(&ggtt->vm);
2996
2997        pvec = &dev_priv->mm.wc_stash.pvec;
2998        if (pvec->nr) {
2999                set_pages_array_wb(pvec->pages, pvec->nr);
3000                __pagevec_release(pvec);
3001        }
3002
3003        mutex_unlock(&dev_priv->drm.struct_mutex);
3004
3005        arch_phys_wc_del(ggtt->mtrr);
3006        io_mapping_fini(&ggtt->iomap);
3007
3008        i915_gem_cleanup_stolen(dev_priv);
3009}
3010
3011static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
3012{
3013        snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
3014        snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
3015        return snb_gmch_ctl << 20;
3016}
3017
3018static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
3019{
3020        bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
3021        bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
3022        if (bdw_gmch_ctl)
3023                bdw_gmch_ctl = 1 << bdw_gmch_ctl;
3024
3025#ifdef CONFIG_X86_32
3026        /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
3027        if (bdw_gmch_ctl > 4)
3028                bdw_gmch_ctl = 4;
3029#endif
3030
3031        return bdw_gmch_ctl << 20;
3032}
3033
3034static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
3035{
3036        gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
3037        gmch_ctrl &= SNB_GMCH_GGMS_MASK;
3038
3039        if (gmch_ctrl)
3040                return 1 << (20 + gmch_ctrl);
3041
3042        return 0;
3043}
3044
3045static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
3046{
3047        struct drm_i915_private *dev_priv = ggtt->vm.i915;
3048        struct pci_dev *pdev = dev_priv->drm.pdev;
3049        phys_addr_t phys_addr;
3050        int ret;
3051
3052        /* For Modern GENs the PTEs and register space are split in the BAR */
3053        phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
3054
3055        /*
3056         * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
3057         * will be dropped. For WC mappings in general we have 64 byte burst
3058         * writes when the WC buffer is flushed, so we can't use it, but have to
3059         * resort to an uncached mapping. The WC issue is easily caught by the
3060         * readback check when writing GTT PTE entries.
3061         */
3062        if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
3063                ggtt->gsm = ioremap_nocache(phys_addr, size);
3064        else
3065                ggtt->gsm = ioremap_wc(phys_addr, size);
3066        if (!ggtt->gsm) {
3067                DRM_ERROR("Failed to map the ggtt page table\n");
3068                return -ENOMEM;
3069        }
3070
3071        ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
3072        if (ret) {
3073                DRM_ERROR("Scratch setup failed\n");
3074                /* iounmap will also get called at remove, but meh */
3075                iounmap(ggtt->gsm);
3076                return ret;
3077        }
3078
3079        return 0;
3080}
3081
3082static struct intel_ppat_entry *
3083__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
3084{
3085        struct intel_ppat_entry *entry = &ppat->entries[index];
3086
3087        GEM_BUG_ON(index >= ppat->max_entries);
3088        GEM_BUG_ON(test_bit(index, ppat->used));
3089
3090        entry->ppat = ppat;
3091        entry->value = value;
3092        kref_init(&entry->ref);
3093        set_bit(index, ppat->used);
3094        set_bit(index, ppat->dirty);
3095
3096        return entry;
3097}
3098
3099static void __free_ppat_entry(struct intel_ppat_entry *entry)
3100{
3101        struct intel_ppat *ppat = entry->ppat;
3102        unsigned int index = entry - ppat->entries;
3103
3104        GEM_BUG_ON(index >= ppat->max_entries);
3105        GEM_BUG_ON(!test_bit(index, ppat->used));
3106
3107        entry->value = ppat->clear_value;
3108        clear_bit(index, ppat->used);
3109        set_bit(index, ppat->dirty);
3110}
3111
3112/**
3113 * intel_ppat_get - get a usable PPAT entry
3114 * @i915: i915 device instance
3115 * @value: the PPAT value required by the caller
3116 *
3117 * The function tries to search if there is an existing PPAT entry which
3118 * matches with the required value. If perfectly matched, the existing PPAT
3119 * entry will be used. If only partially matched, it will try to check if
3120 * there is any available PPAT index. If yes, it will allocate a new PPAT
3121 * index for the required entry and update the HW. If not, the partially
3122 * matched entry will be used.
3123 */
3124const struct intel_ppat_entry *
3125intel_ppat_get(struct drm_i915_private *i915, u8 value)
3126{
3127        struct intel_ppat *ppat = &i915->ppat;
3128        struct intel_ppat_entry *entry = NULL;
3129        unsigned int scanned, best_score;
3130        int i;
3131
3132        GEM_BUG_ON(!ppat->max_entries);
3133
3134        scanned = best_score = 0;
3135        for_each_set_bit(i, ppat->used, ppat->max_entries) {
3136                unsigned int score;
3137
3138                score = ppat->match(ppat->entries[i].value, value);
3139                if (score > best_score) {
3140                        entry = &ppat->entries[i];
3141                        if (score == INTEL_PPAT_PERFECT_MATCH) {
3142                                kref_get(&entry->ref);
3143                                return entry;
3144                        }
3145                        best_score = score;
3146                }
3147                scanned++;
3148        }
3149
3150        if (scanned == ppat->max_entries) {
3151                if (!entry)
3152                        return ERR_PTR(-ENOSPC);
3153
3154                kref_get(&entry->ref);
3155                return entry;
3156        }
3157
3158        i = find_first_zero_bit(ppat->used, ppat->max_entries);
3159        entry = __alloc_ppat_entry(ppat, i, value);
3160        ppat->update_hw(i915);
3161        return entry;
3162}
3163
3164static void release_ppat(struct kref *kref)
3165{
3166        struct intel_ppat_entry *entry =
3167                container_of(kref, struct intel_ppat_entry, ref);
3168        struct drm_i915_private *i915 = entry->ppat->i915;
3169
3170        __free_ppat_entry(entry);
3171        entry->ppat->update_hw(i915);
3172}
3173
3174/**
3175 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3176 * @entry: an intel PPAT entry
3177 *
3178 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3179 * entry is dynamically allocated, its reference count will be decreased. Once
3180 * the reference count becomes into zero, the PPAT index becomes free again.
3181 */
3182void intel_ppat_put(const struct intel_ppat_entry *entry)
3183{
3184        struct intel_ppat *ppat = entry->ppat;
3185        unsigned int index = entry - ppat->entries;
3186
3187        GEM_BUG_ON(!ppat->max_entries);
3188
3189        kref_put(&ppat->entries[index].ref, release_ppat);
3190}
3191
3192static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3193{
3194        struct intel_ppat *ppat = &dev_priv->ppat;
3195        int i;
3196
3197        for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3198                I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3199                clear_bit(i, ppat->dirty);
3200        }
3201}
3202
3203static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3204{
3205        struct intel_ppat *ppat = &dev_priv->ppat;
3206        u64 pat = 0;
3207        int i;
3208
3209        for (i = 0; i < ppat->max_entries; i++)
3210                pat |= GEN8_PPAT(i, ppat->entries[i].value);
3211
3212        bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3213
3214        I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3215        I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3216}
3217
3218static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3219{
3220        unsigned int score = 0;
3221        enum {
3222                AGE_MATCH = BIT(0),
3223                TC_MATCH = BIT(1),
3224                CA_MATCH = BIT(2),
3225        };
3226
3227        /* Cache attribute has to be matched. */
3228        if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3229                return 0;
3230
3231        score |= CA_MATCH;
3232
3233        if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3234                score |= TC_MATCH;
3235
3236        if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3237                score |= AGE_MATCH;
3238
3239        if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3240                return INTEL_PPAT_PERFECT_MATCH;
3241
3242        return score;
3243}
3244
3245static unsigned int chv_private_pat_match(u8 src, u8 dst)
3246{
3247        return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3248                INTEL_PPAT_PERFECT_MATCH : 0;
3249}
3250
3251static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3252{
3253        ppat->max_entries = 8;
3254        ppat->update_hw = cnl_private_pat_update_hw;
3255        ppat->match = bdw_private_pat_match;
3256        ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3257
3258        __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3259        __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3260        __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3261        __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3262        __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3263        __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3264        __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3265        __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3266}
3267
3268/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3269 * bits. When using advanced contexts each context stores its own PAT, but
3270 * writing this data shouldn't be harmful even in those cases. */
3271static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3272{
3273        ppat->max_entries = 8;
3274        ppat->update_hw = bdw_private_pat_update_hw;
3275        ppat->match = bdw_private_pat_match;
3276        ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3277
3278        if (!USES_PPGTT(ppat->i915)) {
3279                /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3280                 * so RTL will always use the value corresponding to
3281                 * pat_sel = 000".
3282                 * So let's disable cache for GGTT to avoid screen corruptions.
3283                 * MOCS still can be used though.
3284                 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3285                 * before this patch, i.e. the same uncached + snooping access
3286                 * like on gen6/7 seems to be in effect.
3287                 * - So this just fixes blitter/render access. Again it looks
3288                 * like it's not just uncached access, but uncached + snooping.
3289                 * So we can still hold onto all our assumptions wrt cpu
3290                 * clflushing on LLC machines.
3291                 */
3292                __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3293                return;
3294        }
3295
3296        __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);      /* for normal objects, no eLLC */
3297        __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);  /* for something pointing to ptes? */
3298        __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);  /* for scanout with eLLC */
3299        __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);                      /* Uncached objects, mostly for scanout */
3300        __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3301        __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3302        __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3303        __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3304}
3305
3306static void chv_setup_private_ppat(struct intel_ppat *ppat)
3307{
3308        ppat->max_entries = 8;
3309        ppat->update_hw = bdw_private_pat_update_hw;
3310        ppat->match = chv_private_pat_match;
3311        ppat->clear_value = CHV_PPAT_SNOOP;
3312
3313        /*
3314         * Map WB on BDW to snooped on CHV.
3315         *
3316         * Only the snoop bit has meaning for CHV, the rest is
3317         * ignored.
3318         *
3319         * The hardware will never snoop for certain types of accesses:
3320         * - CPU GTT (GMADR->GGTT->no snoop->memory)
3321         * - PPGTT page tables
3322         * - some other special cycles
3323         *
3324         * As with BDW, we also need to consider the following for GT accesses:
3325         * "For GGTT, there is NO pat_sel[2:0] from the entry,
3326         * so RTL will always use the value corresponding to
3327         * pat_sel = 000".
3328         * Which means we must set the snoop bit in PAT entry 0
3329         * in order to keep the global status page working.
3330         */
3331
3332        __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3333        __alloc_ppat_entry(ppat, 1, 0);
3334        __alloc_ppat_entry(ppat, 2, 0);
3335        __alloc_ppat_entry(ppat, 3, 0);
3336        __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3337        __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3338        __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3339        __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3340}
3341
3342static void gen6_gmch_remove(struct i915_address_space *vm)
3343{
3344        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3345
3346        iounmap(ggtt->gsm);
3347        cleanup_scratch_page(vm);
3348}
3349
3350static void setup_private_pat(struct drm_i915_private *dev_priv)
3351{
3352        struct intel_ppat *ppat = &dev_priv->ppat;
3353        int i;
3354
3355        ppat->i915 = dev_priv;
3356
3357        if (INTEL_GEN(dev_priv) >= 10)
3358                cnl_setup_private_ppat(ppat);
3359        else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3360                chv_setup_private_ppat(ppat);
3361        else
3362                bdw_setup_private_ppat(ppat);
3363
3364        GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3365
3366        for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3367                ppat->entries[i].value = ppat->clear_value;
3368                ppat->entries[i].ppat = ppat;
3369                set_bit(i, ppat->dirty);
3370        }
3371
3372        ppat->update_hw(dev_priv);
3373}
3374
3375static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3376{
3377        struct drm_i915_private *dev_priv = ggtt->vm.i915;
3378        struct pci_dev *pdev = dev_priv->drm.pdev;
3379        unsigned int size;
3380        u16 snb_gmch_ctl;
3381        int err;
3382
3383        /* TODO: We're not aware of mappable constraints on gen8 yet */
3384        ggtt->gmadr =
3385                (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3386                                                 pci_resource_len(pdev, 2));
3387        ggtt->mappable_end = resource_size(&ggtt->gmadr);
3388
3389        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3390        if (!err)
3391                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3392        if (err)
3393                DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3394
3395        pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3396        if (IS_CHERRYVIEW(dev_priv))
3397                size = chv_get_total_gtt_size(snb_gmch_ctl);
3398        else
3399                size = gen8_get_total_gtt_size(snb_gmch_ctl);
3400
3401        ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3402        ggtt->vm.cleanup = gen6_gmch_remove;
3403        ggtt->vm.insert_page = gen8_ggtt_insert_page;
3404        ggtt->vm.clear_range = nop_clear_range;
3405        if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3406                ggtt->vm.clear_range = gen8_ggtt_clear_range;
3407
3408        ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3409
3410        /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3411        if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
3412                ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3413                ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
3414                if (ggtt->vm.clear_range != nop_clear_range)
3415                        ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3416
3417                /* Prevent recursively calling stop_machine() and deadlocks. */
3418                dev_info(dev_priv->drm.dev,
3419                         "Disabling error capture for VT-d workaround\n");
3420                i915_disable_error_state(dev_priv, -ENODEV);
3421        }
3422
3423        ggtt->invalidate = gen6_ggtt_invalidate;
3424
3425        ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3426        ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3427        ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3428        ggtt->vm.vma_ops.clear_pages = clear_pages;
3429
3430        setup_private_pat(dev_priv);
3431
3432        return ggtt_probe_common(ggtt, size);
3433}
3434
3435static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3436{
3437        struct drm_i915_private *dev_priv = ggtt->vm.i915;
3438        struct pci_dev *pdev = dev_priv->drm.pdev;
3439        unsigned int size;
3440        u16 snb_gmch_ctl;
3441        int err;
3442
3443        ggtt->gmadr =
3444                (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3445                                                 pci_resource_len(pdev, 2));
3446        ggtt->mappable_end = resource_size(&ggtt->gmadr);
3447
3448        /* 64/512MB is the current min/max we actually know of, but this is just
3449         * a coarse sanity check.
3450         */
3451        if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3452                DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3453                return -ENXIO;
3454        }
3455
3456        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3457        if (!err)
3458                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3459        if (err)
3460                DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3461        pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3462
3463        size = gen6_get_total_gtt_size(snb_gmch_ctl);
3464        ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3465
3466        ggtt->vm.clear_range = gen6_ggtt_clear_range;
3467        ggtt->vm.insert_page = gen6_ggtt_insert_page;
3468        ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3469        ggtt->vm.cleanup = gen6_gmch_remove;
3470
3471        ggtt->invalidate = gen6_ggtt_invalidate;
3472
3473        if (HAS_EDRAM(dev_priv))
3474                ggtt->vm.pte_encode = iris_pte_encode;
3475        else if (IS_HASWELL(dev_priv))
3476                ggtt->vm.pte_encode = hsw_pte_encode;
3477        else if (IS_VALLEYVIEW(dev_priv))
3478                ggtt->vm.pte_encode = byt_pte_encode;
3479        else if (INTEL_GEN(dev_priv) >= 7)
3480                ggtt->vm.pte_encode = ivb_pte_encode;
3481        else
3482                ggtt->vm.pte_encode = snb_pte_encode;
3483
3484        ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3485        ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3486        ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3487        ggtt->vm.vma_ops.clear_pages = clear_pages;
3488
3489        return ggtt_probe_common(ggtt, size);
3490}
3491
3492static void i915_gmch_remove(struct i915_address_space *vm)
3493{
3494        intel_gmch_remove();
3495}
3496
3497static int i915_gmch_probe(struct i915_ggtt *ggtt)
3498{
3499        struct drm_i915_private *dev_priv = ggtt->vm.i915;
3500        phys_addr_t gmadr_base;
3501        int ret;
3502
3503        ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3504        if (!ret) {
3505                DRM_ERROR("failed to set up gmch\n");
3506                return -EIO;
3507        }
3508
3509        intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3510
3511        ggtt->gmadr =
3512                (struct resource) DEFINE_RES_MEM(gmadr_base,
3513                                                 ggtt->mappable_end);
3514
3515        ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3516        ggtt->vm.insert_page = i915_ggtt_insert_page;
3517        ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3518        ggtt->vm.clear_range = i915_ggtt_clear_range;
3519        ggtt->vm.cleanup = i915_gmch_remove;
3520
3521        ggtt->invalidate = gmch_ggtt_invalidate;
3522
3523        ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3524        ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3525        ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3526        ggtt->vm.vma_ops.clear_pages = clear_pages;
3527
3528        if (unlikely(ggtt->do_idle_maps))
3529                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3530
3531        return 0;
3532}
3533
3534/**
3535 * i915_ggtt_probe_hw - Probe GGTT hardware location
3536 * @dev_priv: i915 device
3537 */
3538int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3539{
3540        struct i915_ggtt *ggtt = &dev_priv->ggtt;
3541        int ret;
3542
3543        ggtt->vm.i915 = dev_priv;
3544        ggtt->vm.dma = &dev_priv->drm.pdev->dev;
3545
3546        if (INTEL_GEN(dev_priv) <= 5)
3547                ret = i915_gmch_probe(ggtt);
3548        else if (INTEL_GEN(dev_priv) < 8)
3549                ret = gen6_gmch_probe(ggtt);
3550        else
3551                ret = gen8_gmch_probe(ggtt);
3552        if (ret)
3553                return ret;
3554
3555        /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3556         * This is easier than doing range restriction on the fly, as we
3557         * currently don't have any bits spare to pass in this upper
3558         * restriction!
3559         */
3560        if (USES_GUC(dev_priv)) {
3561                ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
3562                ggtt->mappable_end =
3563                        min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3564        }
3565
3566        if ((ggtt->vm.total - 1) >> 32) {
3567                DRM_ERROR("We never expected a Global GTT with more than 32bits"
3568                          " of address space! Found %lldM!\n",
3569                          ggtt->vm.total >> 20);
3570                ggtt->vm.total = 1ULL << 32;
3571                ggtt->mappable_end =
3572                        min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3573        }
3574
3575        if (ggtt->mappable_end > ggtt->vm.total) {
3576                DRM_ERROR("mappable aperture extends past end of GGTT,"
3577                          " aperture=%pa, total=%llx\n",
3578                          &ggtt->mappable_end, ggtt->vm.total);
3579                ggtt->mappable_end = ggtt->vm.total;
3580        }
3581
3582        /* GMADR is the PCI mmio aperture into the global GTT. */
3583        DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3584        DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3585        DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3586                         (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3587        if (intel_vtd_active())
3588                DRM_INFO("VT-d active for gfx access\n");
3589
3590        return 0;
3591}
3592
3593/**
3594 * i915_ggtt_init_hw - Initialize GGTT hardware
3595 * @dev_priv: i915 device
3596 */
3597int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3598{
3599        struct i915_ggtt *ggtt = &dev_priv->ggtt;
3600        int ret;
3601
3602        stash_init(&dev_priv->mm.wc_stash);
3603
3604        /* Note that we use page colouring to enforce a guard page at the
3605         * end of the address space. This is required as the CS may prefetch
3606         * beyond the end of the batch buffer, across the page boundary,
3607         * and beyond the end of the GTT if we do not provide a guard.
3608         */
3609        mutex_lock(&dev_priv->drm.struct_mutex);
3610        i915_address_space_init(&ggtt->vm, dev_priv);
3611
3612        ggtt->vm.is_ggtt = true;
3613
3614        /* Only VLV supports read-only GGTT mappings */
3615        ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
3616
3617        if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
3618                ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3619        mutex_unlock(&dev_priv->drm.struct_mutex);
3620
3621        if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
3622                                dev_priv->ggtt.gmadr.start,
3623                                dev_priv->ggtt.mappable_end)) {
3624                ret = -EIO;
3625                goto out_gtt_cleanup;
3626        }
3627
3628        ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3629
3630        /*
3631         * Initialise stolen early so that we may reserve preallocated
3632         * objects for the BIOS to KMS transition.
3633         */
3634        ret = i915_gem_init_stolen(dev_priv);
3635        if (ret)
3636                goto out_gtt_cleanup;
3637
3638        return 0;
3639
3640out_gtt_cleanup:
3641        ggtt->vm.cleanup(&ggtt->vm);
3642        return ret;
3643}
3644
3645int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3646{
3647        if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3648                return -EIO;
3649
3650        return 0;
3651}
3652
3653void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3654{
3655        GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3656
3657        i915->ggtt.invalidate = guc_ggtt_invalidate;
3658
3659        i915_ggtt_invalidate(i915);
3660}
3661
3662void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3663{
3664        /* XXX Temporary pardon for error unload */
3665        if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
3666                return;
3667
3668        /* We should only be called after i915_ggtt_enable_guc() */
3669        GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3670
3671        i915->ggtt.invalidate = gen6_ggtt_invalidate;
3672
3673        i915_ggtt_invalidate(i915);
3674}
3675
3676void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3677{
3678        struct i915_ggtt *ggtt = &dev_priv->ggtt;
3679        struct i915_vma *vma, *vn;
3680
3681        i915_check_and_clear_faults(dev_priv);
3682
3683        /* First fill our portion of the GTT with scratch pages */
3684        ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3685
3686        ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3687
3688        /* clflush objects bound into the GGTT and rebind them. */
3689        GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
3690        list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
3691                struct drm_i915_gem_object *obj = vma->obj;
3692
3693                if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3694                        continue;
3695
3696                if (!i915_vma_unbind(vma))
3697                        continue;
3698
3699                WARN_ON(i915_vma_bind(vma,
3700                                      obj ? obj->cache_level : 0,
3701                                      PIN_UPDATE));
3702                if (obj)
3703                        WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3704        }
3705
3706        ggtt->vm.closed = false;
3707        i915_ggtt_invalidate(dev_priv);
3708
3709        if (INTEL_GEN(dev_priv) >= 8) {
3710                struct intel_ppat *ppat = &dev_priv->ppat;
3711
3712                bitmap_set(ppat->dirty, 0, ppat->max_entries);
3713                dev_priv->ppat.update_hw(dev_priv);
3714                return;
3715        }
3716}
3717
3718static struct scatterlist *
3719rotate_pages(const dma_addr_t *in, unsigned int offset,
3720             unsigned int width, unsigned int height,
3721             unsigned int stride,
3722             struct sg_table *st, struct scatterlist *sg)
3723{
3724        unsigned int column, row;
3725        unsigned int src_idx;
3726
3727        for (column = 0; column < width; column++) {
3728                src_idx = stride * (height - 1) + column;
3729                for (row = 0; row < height; row++) {
3730                        st->nents++;
3731                        /* We don't need the pages, but need to initialize
3732                         * the entries so the sg list can be happily traversed.
3733                         * The only thing we need are DMA addresses.
3734                         */
3735                        sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3736                        sg_dma_address(sg) = in[offset + src_idx];
3737                        sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3738                        sg = sg_next(sg);
3739                        src_idx -= stride;
3740                }
3741        }
3742
3743        return sg;
3744}
3745
3746static noinline struct sg_table *
3747intel_rotate_pages(struct intel_rotation_info *rot_info,
3748                   struct drm_i915_gem_object *obj)
3749{
3750        const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
3751        unsigned int size = intel_rotation_info_size(rot_info);
3752        struct sgt_iter sgt_iter;
3753        dma_addr_t dma_addr;
3754        unsigned long i;
3755        dma_addr_t *page_addr_list;
3756        struct sg_table *st;
3757        struct scatterlist *sg;
3758        int ret = -ENOMEM;
3759
3760        /* Allocate a temporary list of source pages for random access. */
3761        page_addr_list = kvmalloc_array(n_pages,
3762                                        sizeof(dma_addr_t),
3763                                        GFP_KERNEL);
3764        if (!page_addr_list)
3765                return ERR_PTR(ret);
3766
3767        /* Allocate target SG list. */
3768        st = kmalloc(sizeof(*st), GFP_KERNEL);
3769        if (!st)
3770                goto err_st_alloc;
3771
3772        ret = sg_alloc_table(st, size, GFP_KERNEL);
3773        if (ret)
3774                goto err_sg_alloc;
3775
3776        /* Populate source page list from the object. */
3777        i = 0;
3778        for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3779                page_addr_list[i++] = dma_addr;
3780
3781        GEM_BUG_ON(i != n_pages);
3782        st->nents = 0;
3783        sg = st->sgl;
3784
3785        for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3786                sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3787                                  rot_info->plane[i].width, rot_info->plane[i].height,
3788                                  rot_info->plane[i].stride, st, sg);
3789        }
3790
3791        kvfree(page_addr_list);
3792
3793        return st;
3794
3795err_sg_alloc:
3796        kfree(st);
3797err_st_alloc:
3798        kvfree(page_addr_list);
3799
3800        DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3801                         obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3802
3803        return ERR_PTR(ret);
3804}
3805
3806static noinline struct sg_table *
3807intel_partial_pages(const struct i915_ggtt_view *view,
3808                    struct drm_i915_gem_object *obj)
3809{
3810        struct sg_table *st;
3811        struct scatterlist *sg, *iter;
3812        unsigned int count = view->partial.size;
3813        unsigned int offset;
3814        int ret = -ENOMEM;
3815
3816        st = kmalloc(sizeof(*st), GFP_KERNEL);
3817        if (!st)
3818                goto err_st_alloc;
3819
3820        ret = sg_alloc_table(st, count, GFP_KERNEL);
3821        if (ret)
3822                goto err_sg_alloc;
3823
3824        iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3825        GEM_BUG_ON(!iter);
3826
3827        sg = st->sgl;
3828        st->nents = 0;
3829        do {
3830                unsigned int len;
3831
3832                len = min(iter->length - (offset << PAGE_SHIFT),
3833                          count << PAGE_SHIFT);
3834                sg_set_page(sg, NULL, len, 0);
3835                sg_dma_address(sg) =
3836                        sg_dma_address(iter) + (offset << PAGE_SHIFT);
3837                sg_dma_len(sg) = len;
3838
3839                st->nents++;
3840                count -= len >> PAGE_SHIFT;
3841                if (count == 0) {
3842                        sg_mark_end(sg);
3843                        return st;
3844                }
3845
3846                sg = __sg_next(sg);
3847                iter = __sg_next(iter);
3848                offset = 0;
3849        } while (1);
3850
3851err_sg_alloc:
3852        kfree(st);
3853err_st_alloc:
3854        return ERR_PTR(ret);
3855}
3856
3857static int
3858i915_get_ggtt_vma_pages(struct i915_vma *vma)
3859{
3860        int ret;
3861
3862        /* The vma->pages are only valid within the lifespan of the borrowed
3863         * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3864         * must be the vma->pages. A simple rule is that vma->pages must only
3865         * be accessed when the obj->mm.pages are pinned.
3866         */
3867        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3868
3869        switch (vma->ggtt_view.type) {
3870        default:
3871                GEM_BUG_ON(vma->ggtt_view.type);
3872                /* fall through */
3873        case I915_GGTT_VIEW_NORMAL:
3874                vma->pages = vma->obj->mm.pages;
3875                return 0;
3876
3877        case I915_GGTT_VIEW_ROTATED:
3878                vma->pages =
3879                        intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3880                break;
3881
3882        case I915_GGTT_VIEW_PARTIAL:
3883                vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3884                break;
3885        }
3886
3887        ret = 0;
3888        if (unlikely(IS_ERR(vma->pages))) {
3889                ret = PTR_ERR(vma->pages);
3890                vma->pages = NULL;
3891                DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3892                          vma->ggtt_view.type, ret);
3893        }
3894        return ret;
3895}
3896
3897/**
3898 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3899 * @vm: the &struct i915_address_space
3900 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3901 * @size: how much space to allocate inside the GTT,
3902 *        must be #I915_GTT_PAGE_SIZE aligned
3903 * @offset: where to insert inside the GTT,
3904 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3905 *          (@offset + @size) must fit within the address space
3906 * @color: color to apply to node, if this node is not from a VMA,
3907 *         color must be #I915_COLOR_UNEVICTABLE
3908 * @flags: control search and eviction behaviour
3909 *
3910 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3911 * the address space (using @size and @color). If the @node does not fit, it
3912 * tries to evict any overlapping nodes from the GTT, including any
3913 * neighbouring nodes if the colors do not match (to ensure guard pages between
3914 * differing domains). See i915_gem_evict_for_node() for the gory details
3915 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3916 * evicting active overlapping objects, and any overlapping node that is pinned
3917 * or marked as unevictable will also result in failure.
3918 *
3919 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3920 * asked to wait for eviction and interrupted.
3921 */
3922int i915_gem_gtt_reserve(struct i915_address_space *vm,
3923                         struct drm_mm_node *node,
3924                         u64 size, u64 offset, unsigned long color,
3925                         unsigned int flags)
3926{
3927        int err;
3928
3929        GEM_BUG_ON(!size);
3930        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3931        GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3932        GEM_BUG_ON(range_overflows(offset, size, vm->total));
3933        GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3934        GEM_BUG_ON(drm_mm_node_allocated(node));
3935
3936        node->size = size;
3937        node->start = offset;
3938        node->color = color;
3939
3940        err = drm_mm_reserve_node(&vm->mm, node);
3941        if (err != -ENOSPC)
3942                return err;
3943
3944        if (flags & PIN_NOEVICT)
3945                return -ENOSPC;
3946
3947        err = i915_gem_evict_for_node(vm, node, flags);
3948        if (err == 0)
3949                err = drm_mm_reserve_node(&vm->mm, node);
3950
3951        return err;
3952}
3953
3954static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3955{
3956        u64 range, addr;
3957
3958        GEM_BUG_ON(range_overflows(start, len, end));
3959        GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3960
3961        range = round_down(end - len, align) - round_up(start, align);
3962        if (range) {
3963                if (sizeof(unsigned long) == sizeof(u64)) {
3964                        addr = get_random_long();
3965                } else {
3966                        addr = get_random_int();
3967                        if (range > U32_MAX) {
3968                                addr <<= 32;
3969                                addr |= get_random_int();
3970                        }
3971                }
3972                div64_u64_rem(addr, range, &addr);
3973                start += addr;
3974        }
3975
3976        return round_up(start, align);
3977}
3978
3979/**
3980 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3981 * @vm: the &struct i915_address_space
3982 * @node: the &struct drm_mm_node (typically i915_vma.node)
3983 * @size: how much space to allocate inside the GTT,
3984 *        must be #I915_GTT_PAGE_SIZE aligned
3985 * @alignment: required alignment of starting offset, may be 0 but
3986 *             if specified, this must be a power-of-two and at least
3987 *             #I915_GTT_MIN_ALIGNMENT
3988 * @color: color to apply to node
3989 * @start: start of any range restriction inside GTT (0 for all),
3990 *         must be #I915_GTT_PAGE_SIZE aligned
3991 * @end: end of any range restriction inside GTT (U64_MAX for all),
3992 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3993 * @flags: control search and eviction behaviour
3994 *
3995 * i915_gem_gtt_insert() first searches for an available hole into which
3996 * is can insert the node. The hole address is aligned to @alignment and
3997 * its @size must then fit entirely within the [@start, @end] bounds. The
3998 * nodes on either side of the hole must match @color, or else a guard page
3999 * will be inserted between the two nodes (or the node evicted). If no
4000 * suitable hole is found, first a victim is randomly selected and tested
4001 * for eviction, otherwise then the LRU list of objects within the GTT
4002 * is scanned to find the first set of replacement nodes to create the hole.
4003 * Those old overlapping nodes are evicted from the GTT (and so must be
4004 * rebound before any future use). Any node that is currently pinned cannot
4005 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
4006 * active and #PIN_NONBLOCK is specified, that node is also skipped when
4007 * searching for an eviction candidate. See i915_gem_evict_something() for
4008 * the gory details on the eviction algorithm.
4009 *
4010 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
4011 * asked to wait for eviction and interrupted.
4012 */
4013int i915_gem_gtt_insert(struct i915_address_space *vm,
4014                        struct drm_mm_node *node,
4015                        u64 size, u64 alignment, unsigned long color,
4016                        u64 start, u64 end, unsigned int flags)
4017{
4018        enum drm_mm_insert_mode mode;
4019        u64 offset;
4020        int err;
4021
4022        lockdep_assert_held(&vm->i915->drm.struct_mutex);
4023        GEM_BUG_ON(!size);
4024        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
4025        GEM_BUG_ON(alignment && !is_power_of_2(alignment));
4026        GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
4027        GEM_BUG_ON(start >= end);
4028        GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
4029        GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
4030        GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
4031        GEM_BUG_ON(drm_mm_node_allocated(node));
4032
4033        if (unlikely(range_overflows(start, size, end)))
4034                return -ENOSPC;
4035
4036        if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
4037                return -ENOSPC;
4038
4039        mode = DRM_MM_INSERT_BEST;
4040        if (flags & PIN_HIGH)
4041                mode = DRM_MM_INSERT_HIGHEST;
4042        if (flags & PIN_MAPPABLE)
4043                mode = DRM_MM_INSERT_LOW;
4044
4045        /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
4046         * so we know that we always have a minimum alignment of 4096.
4047         * The drm_mm range manager is optimised to return results
4048         * with zero alignment, so where possible use the optimal
4049         * path.
4050         */
4051        BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
4052        if (alignment <= I915_GTT_MIN_ALIGNMENT)
4053                alignment = 0;
4054
4055        err = drm_mm_insert_node_in_range(&vm->mm, node,
4056                                          size, alignment, color,
4057                                          start, end, mode);
4058        if (err != -ENOSPC)
4059                return err;
4060
4061        if (mode & DRM_MM_INSERT_ONCE) {
4062                err = drm_mm_insert_node_in_range(&vm->mm, node,
4063                                                  size, alignment, color,
4064                                                  start, end,
4065                                                  DRM_MM_INSERT_BEST);
4066                if (err != -ENOSPC)
4067                        return err;
4068        }
4069
4070        if (flags & PIN_NOEVICT)
4071                return -ENOSPC;
4072
4073        /* No free space, pick a slot at random.
4074         *
4075         * There is a pathological case here using a GTT shared between
4076         * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
4077         *
4078         *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
4079         *         (64k objects)             (448k objects)
4080         *
4081         * Now imagine that the eviction LRU is ordered top-down (just because
4082         * pathology meets real life), and that we need to evict an object to
4083         * make room inside the aperture. The eviction scan then has to walk
4084         * the 448k list before it finds one within range. And now imagine that
4085         * it has to search for a new hole between every byte inside the memcpy,
4086         * for several simultaneous clients.
4087         *
4088         * On a full-ppgtt system, if we have run out of available space, there
4089         * will be lots and lots of objects in the eviction list! Again,
4090         * searching that LRU list may be slow if we are also applying any
4091         * range restrictions (e.g. restriction to low 4GiB) and so, for
4092         * simplicity and similarilty between different GTT, try the single
4093         * random replacement first.
4094         */
4095        offset = random_offset(start, end,
4096                               size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4097        err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4098        if (err != -ENOSPC)
4099                return err;
4100
4101        /* Randomly selected placement is pinned, do a search */
4102        err = i915_gem_evict_something(vm, size, alignment, color,
4103                                       start, end, flags);
4104        if (err)
4105                return err;
4106
4107        return drm_mm_insert_node_in_range(&vm->mm, node,
4108                                           size, alignment, color,
4109                                           start, end, DRM_MM_INSERT_EVICT);
4110}
4111
4112#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4113#include "selftests/mock_gtt.c"
4114#include "selftests/i915_gem_gtt.c"
4115#endif
4116