linux/drivers/gpu/drm/msm/msm_gem_vma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include "msm_drv.h"
   8#include "msm_gem.h"
   9#include "msm_mmu.h"
  10
  11static void
  12msm_gem_address_space_destroy(struct kref *kref)
  13{
  14        struct msm_gem_address_space *aspace = container_of(kref,
  15                        struct msm_gem_address_space, kref);
  16
  17        drm_mm_takedown(&aspace->mm);
  18        if (aspace->mmu)
  19                aspace->mmu->funcs->destroy(aspace->mmu);
  20        put_pid(aspace->pid);
  21        kfree(aspace);
  22}
  23
  24
  25void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
  26{
  27        if (aspace)
  28                kref_put(&aspace->kref, msm_gem_address_space_destroy);
  29}
  30
  31struct msm_gem_address_space *
  32msm_gem_address_space_get(struct msm_gem_address_space *aspace)
  33{
  34        if (!IS_ERR_OR_NULL(aspace))
  35                kref_get(&aspace->kref);
  36
  37        return aspace;
  38}
  39
  40/* Actually unmap memory for the vma */
  41void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
  42                struct msm_gem_vma *vma)
  43{
  44        unsigned size = vma->node.size << PAGE_SHIFT;
  45
  46        /* Print a message if we try to purge a vma in use */
  47        if (WARN_ON(vma->inuse > 0))
  48                return;
  49
  50        /* Don't do anything if the memory isn't mapped */
  51        if (!vma->mapped)
  52                return;
  53
  54        if (aspace->mmu)
  55                aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
  56
  57        vma->mapped = false;
  58}
  59
  60/* Remove reference counts for the mapping */
  61void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
  62                struct msm_gem_vma *vma)
  63{
  64        if (!WARN_ON(!vma->iova))
  65                vma->inuse--;
  66}
  67
  68int
  69msm_gem_map_vma(struct msm_gem_address_space *aspace,
  70                struct msm_gem_vma *vma, int prot,
  71                struct sg_table *sgt, int npages)
  72{
  73        unsigned size = npages << PAGE_SHIFT;
  74        int ret = 0;
  75
  76        if (WARN_ON(!vma->iova))
  77                return -EINVAL;
  78
  79        /* Increase the usage counter */
  80        vma->inuse++;
  81
  82        if (vma->mapped)
  83                return 0;
  84
  85        vma->mapped = true;
  86
  87        if (aspace && aspace->mmu)
  88                ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
  89                                size, prot);
  90
  91        if (ret) {
  92                vma->mapped = false;
  93                vma->inuse--;
  94        }
  95
  96        return ret;
  97}
  98
  99/* Close an iova.  Warn if it is still in use */
 100void msm_gem_close_vma(struct msm_gem_address_space *aspace,
 101                struct msm_gem_vma *vma)
 102{
 103        if (WARN_ON(vma->inuse > 0 || vma->mapped))
 104                return;
 105
 106        spin_lock(&aspace->lock);
 107        if (vma->iova)
 108                drm_mm_remove_node(&vma->node);
 109        spin_unlock(&aspace->lock);
 110
 111        vma->iova = 0;
 112
 113        msm_gem_address_space_put(aspace);
 114}
 115
 116/* Initialize a new vma and allocate an iova for it */
 117int msm_gem_init_vma(struct msm_gem_address_space *aspace,
 118                struct msm_gem_vma *vma, int npages,
 119                u64 range_start, u64 range_end)
 120{
 121        int ret;
 122
 123        if (WARN_ON(vma->iova))
 124                return -EBUSY;
 125
 126        spin_lock(&aspace->lock);
 127        ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
 128                0, range_start, range_end, 0);
 129        spin_unlock(&aspace->lock);
 130
 131        if (ret)
 132                return ret;
 133
 134        vma->iova = vma->node.start << PAGE_SHIFT;
 135        vma->mapped = false;
 136
 137        kref_get(&aspace->kref);
 138
 139        return 0;
 140}
 141
 142struct msm_gem_address_space *
 143msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
 144                u64 va_start, u64 size)
 145{
 146        struct msm_gem_address_space *aspace;
 147
 148        if (IS_ERR(mmu))
 149                return ERR_CAST(mmu);
 150
 151        aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
 152        if (!aspace)
 153                return ERR_PTR(-ENOMEM);
 154
 155        spin_lock_init(&aspace->lock);
 156        aspace->name = name;
 157        aspace->mmu = mmu;
 158
 159        drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
 160
 161        kref_init(&aspace->kref);
 162
 163        return aspace;
 164}
 165