linux/drivers/gpu/drm/i915/display/intel_dpt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2021 Intel Corporation
   4 */
   5
   6#include "i915_drv.h"
   7#include "intel_display_types.h"
   8#include "intel_dpt.h"
   9#include "intel_fb.h"
  10#include "gt/gen8_ppgtt.h"
  11
  12struct i915_dpt {
  13        struct i915_address_space vm;
  14
  15        struct drm_i915_gem_object *obj;
  16        struct i915_vma *vma;
  17        void __iomem *iomem;
  18};
  19
  20#define i915_is_dpt(vm) ((vm)->is_dpt)
  21
  22static inline struct i915_dpt *
  23i915_vm_to_dpt(struct i915_address_space *vm)
  24{
  25        BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
  26        GEM_BUG_ON(!i915_is_dpt(vm));
  27        return container_of(vm, struct i915_dpt, vm);
  28}
  29
  30#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
  31
  32static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
  33{
  34        writeq(pte, addr);
  35}
  36
  37static void dpt_insert_page(struct i915_address_space *vm,
  38                            dma_addr_t addr,
  39                            u64 offset,
  40                            enum i915_cache_level level,
  41                            u32 flags)
  42{
  43        struct i915_dpt *dpt = i915_vm_to_dpt(vm);
  44        gen8_pte_t __iomem *base = dpt->iomem;
  45
  46        gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
  47                     vm->pte_encode(addr, level, flags));
  48}
  49
  50static void dpt_insert_entries(struct i915_address_space *vm,
  51                               struct i915_vma *vma,
  52                               enum i915_cache_level level,
  53                               u32 flags)
  54{
  55        struct i915_dpt *dpt = i915_vm_to_dpt(vm);
  56        gen8_pte_t __iomem *base = dpt->iomem;
  57        const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
  58        struct sgt_iter sgt_iter;
  59        dma_addr_t addr;
  60        int i;
  61
  62        /*
  63         * Note that we ignore PTE_READ_ONLY here. The caller must be careful
  64         * not to allow the user to override access to a read only page.
  65         */
  66
  67        i = vma->node.start / I915_GTT_PAGE_SIZE;
  68        for_each_sgt_daddr(addr, sgt_iter, vma->pages)
  69                gen8_set_pte(&base[i++], pte_encode | addr);
  70}
  71
  72static void dpt_clear_range(struct i915_address_space *vm,
  73                            u64 start, u64 length)
  74{
  75}
  76
  77static void dpt_bind_vma(struct i915_address_space *vm,
  78                         struct i915_vm_pt_stash *stash,
  79                         struct i915_vma *vma,
  80                         enum i915_cache_level cache_level,
  81                         u32 flags)
  82{
  83        struct drm_i915_gem_object *obj = vma->obj;
  84        u32 pte_flags;
  85
  86        /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
  87        pte_flags = 0;
  88        if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
  89                pte_flags |= PTE_READ_ONLY;
  90        if (i915_gem_object_is_lmem(obj))
  91                pte_flags |= PTE_LM;
  92
  93        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
  94
  95        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
  96
  97        /*
  98         * Without aliasing PPGTT there's no difference between
  99         * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
 100         * upgrade to both bound if we bind either to avoid double-binding.
 101         */
 102        atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
 103}
 104
 105static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
 106{
 107        vm->clear_range(vm, vma->node.start, vma->size);
 108}
 109
 110static void dpt_cleanup(struct i915_address_space *vm)
 111{
 112        struct i915_dpt *dpt = i915_vm_to_dpt(vm);
 113
 114        i915_gem_object_put(dpt->obj);
 115}
 116
 117struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
 118{
 119        struct drm_i915_private *i915 = vm->i915;
 120        struct i915_dpt *dpt = i915_vm_to_dpt(vm);
 121        intel_wakeref_t wakeref;
 122        struct i915_vma *vma;
 123        void __iomem *iomem;
 124        struct i915_gem_ww_ctx ww;
 125        int err;
 126
 127        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 128        atomic_inc(&i915->gpu_error.pending_fb_pin);
 129
 130        for_i915_gem_ww(&ww, err, true) {
 131                err = i915_gem_object_lock(dpt->obj, &ww);
 132                if (err)
 133                        continue;
 134
 135                vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
 136                                                  HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
 137                if (IS_ERR(vma)) {
 138                        err = PTR_ERR(vma);
 139                        continue;
 140                }
 141
 142                iomem = i915_vma_pin_iomap(vma);
 143                i915_vma_unpin(vma);
 144
 145                if (IS_ERR(iomem)) {
 146                        err = PTR_ERR(iomem);
 147                        continue;
 148                }
 149
 150                dpt->vma = vma;
 151                dpt->iomem = iomem;
 152
 153                i915_vma_get(vma);
 154        }
 155
 156        atomic_dec(&i915->gpu_error.pending_fb_pin);
 157        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 158
 159        return err ? ERR_PTR(err) : vma;
 160}
 161
 162void intel_dpt_unpin(struct i915_address_space *vm)
 163{
 164        struct i915_dpt *dpt = i915_vm_to_dpt(vm);
 165
 166        i915_vma_unpin_iomap(dpt->vma);
 167        i915_vma_put(dpt->vma);
 168}
 169
 170struct i915_address_space *
 171intel_dpt_create(struct intel_framebuffer *fb)
 172{
 173        struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
 174        struct drm_i915_private *i915 = to_i915(obj->dev);
 175        struct drm_i915_gem_object *dpt_obj;
 176        struct i915_address_space *vm;
 177        struct i915_dpt *dpt;
 178        size_t size;
 179        int ret;
 180
 181        if (intel_fb_needs_pot_stride_remap(fb))
 182                size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
 183        else
 184                size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
 185
 186        size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
 187
 188        if (HAS_LMEM(i915))
 189                dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
 190        else
 191                dpt_obj = i915_gem_object_create_stolen(i915, size);
 192        if (IS_ERR(dpt_obj))
 193                return ERR_CAST(dpt_obj);
 194
 195        ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
 196        if (ret) {
 197                i915_gem_object_put(dpt_obj);
 198                return ERR_PTR(ret);
 199        }
 200
 201        dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
 202        if (!dpt) {
 203                i915_gem_object_put(dpt_obj);
 204                return ERR_PTR(-ENOMEM);
 205        }
 206
 207        vm = &dpt->vm;
 208
 209        vm->gt = &i915->gt;
 210        vm->i915 = i915;
 211        vm->dma = i915->drm.dev;
 212        vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
 213        vm->is_dpt = true;
 214
 215        i915_address_space_init(vm, VM_CLASS_DPT);
 216
 217        vm->insert_page = dpt_insert_page;
 218        vm->clear_range = dpt_clear_range;
 219        vm->insert_entries = dpt_insert_entries;
 220        vm->cleanup = dpt_cleanup;
 221
 222        vm->vma_ops.bind_vma    = dpt_bind_vma;
 223        vm->vma_ops.unbind_vma  = dpt_unbind_vma;
 224        vm->vma_ops.set_pages   = ggtt_set_pages;
 225        vm->vma_ops.clear_pages = clear_pages;
 226
 227        vm->pte_encode = gen8_ggtt_pte_encode;
 228
 229        dpt->obj = dpt_obj;
 230
 231        return &dpt->vm;
 232}
 233
 234void intel_dpt_destroy(struct i915_address_space *vm)
 235{
 236        struct i915_dpt *dpt = i915_vm_to_dpt(vm);
 237
 238        i915_vm_close(&dpt->vm);
 239}
 240