linux/drivers/gpu/drm/msm/msm_gem_vma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include "msm_drv.h"
   8#include "msm_fence.h"
   9#include "msm_gem.h"
  10#include "msm_mmu.h"
  11
  12static void
  13msm_gem_address_space_destroy(struct kref *kref)
  14{
  15        struct msm_gem_address_space *aspace = container_of(kref,
  16                        struct msm_gem_address_space, kref);
  17
  18        drm_mm_takedown(&aspace->mm);
  19        if (aspace->mmu)
  20                aspace->mmu->funcs->destroy(aspace->mmu);
  21        put_pid(aspace->pid);
  22        kfree(aspace);
  23}
  24
  25
  26void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
  27{
  28        if (aspace)
  29                kref_put(&aspace->kref, msm_gem_address_space_destroy);
  30}
  31
  32struct msm_gem_address_space *
  33msm_gem_address_space_get(struct msm_gem_address_space *aspace)
  34{
  35        if (!IS_ERR_OR_NULL(aspace))
  36                kref_get(&aspace->kref);
  37
  38        return aspace;
  39}
  40
  41bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
  42{
  43        if (vma->inuse > 0)
  44                return true;
  45
  46        while (vma->fence_mask) {
  47                unsigned idx = ffs(vma->fence_mask) - 1;
  48
  49                if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
  50                        return true;
  51
  52                vma->fence_mask &= ~BIT(idx);
  53        }
  54
  55        return false;
  56}
  57
  58/* Actually unmap memory for the vma */
  59void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
  60                struct msm_gem_vma *vma)
  61{
  62        unsigned size = vma->node.size;
  63
  64        /* Print a message if we try to purge a vma in use */
  65        GEM_WARN_ON(msm_gem_vma_inuse(vma));
  66
  67        /* Don't do anything if the memory isn't mapped */
  68        if (!vma->mapped)
  69                return;
  70
  71        if (aspace->mmu)
  72                aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
  73
  74        vma->mapped = false;
  75}
  76
  77/* Remove reference counts for the mapping */
  78void msm_gem_unpin_vma(struct msm_gem_vma *vma)
  79{
  80        if (GEM_WARN_ON(!vma->inuse))
  81                return;
  82        if (!GEM_WARN_ON(!vma->iova))
  83                vma->inuse--;
  84}
  85
  86/* Replace pin reference with fence: */
  87void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
  88{
  89        vma->fctx[fctx->index] = fctx;
  90        vma->fence[fctx->index] = fctx->last_fence;
  91        vma->fence_mask |= BIT(fctx->index);
  92        msm_gem_unpin_vma(vma);
  93}
  94
  95/* Map and pin vma: */
  96int
  97msm_gem_map_vma(struct msm_gem_address_space *aspace,
  98                struct msm_gem_vma *vma, int prot,
  99                struct sg_table *sgt, int size)
 100{
 101        int ret = 0;
 102
 103        if (GEM_WARN_ON(!vma->iova))
 104                return -EINVAL;
 105
 106        /* Increase the usage counter */
 107        vma->inuse++;
 108
 109        if (vma->mapped)
 110                return 0;
 111
 112        vma->mapped = true;
 113
 114        if (aspace && aspace->mmu)
 115                ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
 116                                size, prot);
 117
 118        if (ret) {
 119                vma->mapped = false;
 120                vma->inuse--;
 121        }
 122
 123        return ret;
 124}
 125
 126/* Close an iova.  Warn if it is still in use */
 127void msm_gem_close_vma(struct msm_gem_address_space *aspace,
 128                struct msm_gem_vma *vma)
 129{
 130        GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
 131
 132        spin_lock(&aspace->lock);
 133        if (vma->iova)
 134                drm_mm_remove_node(&vma->node);
 135        spin_unlock(&aspace->lock);
 136
 137        vma->iova = 0;
 138
 139        msm_gem_address_space_put(aspace);
 140}
 141
 142/* Initialize a new vma and allocate an iova for it */
 143int msm_gem_init_vma(struct msm_gem_address_space *aspace,
 144                struct msm_gem_vma *vma, int size,
 145                u64 range_start, u64 range_end)
 146{
 147        int ret;
 148
 149        if (GEM_WARN_ON(vma->iova))
 150                return -EBUSY;
 151
 152        spin_lock(&aspace->lock);
 153        ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
 154                                          size, PAGE_SIZE, 0,
 155                                          range_start, range_end, 0);
 156        spin_unlock(&aspace->lock);
 157
 158        if (ret)
 159                return ret;
 160
 161        vma->iova = vma->node.start;
 162        vma->mapped = false;
 163
 164        kref_get(&aspace->kref);
 165
 166        return 0;
 167}
 168
 169struct msm_gem_address_space *
 170msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
 171                u64 va_start, u64 size)
 172{
 173        struct msm_gem_address_space *aspace;
 174
 175        if (IS_ERR(mmu))
 176                return ERR_CAST(mmu);
 177
 178        aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
 179        if (!aspace)
 180                return ERR_PTR(-ENOMEM);
 181
 182        spin_lock_init(&aspace->lock);
 183        aspace->name = name;
 184        aspace->mmu = mmu;
 185        aspace->va_start = va_start;
 186        aspace->va_size  = size;
 187
 188        drm_mm_init(&aspace->mm, va_start, size);
 189
 190        kref_init(&aspace->kref);
 191
 192        return aspace;
 193}
 194