linux/drivers/gpu/drm/msm/msm_gem_vma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include "msm_drv.h"
   8#include "msm_gem.h"
   9#include "msm_mmu.h"
  10
  11static void
  12msm_gem_address_space_destroy(struct kref *kref)
  13{
  14        struct msm_gem_address_space *aspace = container_of(kref,
  15                        struct msm_gem_address_space, kref);
  16
  17        drm_mm_takedown(&aspace->mm);
  18        if (aspace->mmu)
  19                aspace->mmu->funcs->destroy(aspace->mmu);
  20        kfree(aspace);
  21}
  22
  23
  24void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
  25{
  26        if (aspace)
  27                kref_put(&aspace->kref, msm_gem_address_space_destroy);
  28}
  29
  30/* Actually unmap memory for the vma */
  31void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
  32                struct msm_gem_vma *vma)
  33{
  34        unsigned size = vma->node.size << PAGE_SHIFT;
  35
  36        /* Print a message if we try to purge a vma in use */
  37        if (WARN_ON(vma->inuse > 0))
  38                return;
  39
  40        /* Don't do anything if the memory isn't mapped */
  41        if (!vma->mapped)
  42                return;
  43
  44        if (aspace->mmu)
  45                aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
  46
  47        vma->mapped = false;
  48}
  49
  50/* Remove reference counts for the mapping */
  51void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
  52                struct msm_gem_vma *vma)
  53{
  54        if (!WARN_ON(!vma->iova))
  55                vma->inuse--;
  56}
  57
  58int
  59msm_gem_map_vma(struct msm_gem_address_space *aspace,
  60                struct msm_gem_vma *vma, int prot,
  61                struct sg_table *sgt, int npages)
  62{
  63        unsigned size = npages << PAGE_SHIFT;
  64        int ret = 0;
  65
  66        if (WARN_ON(!vma->iova))
  67                return -EINVAL;
  68
  69        /* Increase the usage counter */
  70        vma->inuse++;
  71
  72        if (vma->mapped)
  73                return 0;
  74
  75        vma->mapped = true;
  76
  77        if (aspace && aspace->mmu)
  78                ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
  79                                size, prot);
  80
  81        if (ret)
  82                vma->mapped = false;
  83
  84        return ret;
  85}
  86
  87/* Close an iova.  Warn if it is still in use */
  88void msm_gem_close_vma(struct msm_gem_address_space *aspace,
  89                struct msm_gem_vma *vma)
  90{
  91        if (WARN_ON(vma->inuse > 0 || vma->mapped))
  92                return;
  93
  94        spin_lock(&aspace->lock);
  95        if (vma->iova)
  96                drm_mm_remove_node(&vma->node);
  97        spin_unlock(&aspace->lock);
  98
  99        vma->iova = 0;
 100
 101        msm_gem_address_space_put(aspace);
 102}
 103
 104/* Initialize a new vma and allocate an iova for it */
 105int msm_gem_init_vma(struct msm_gem_address_space *aspace,
 106                struct msm_gem_vma *vma, int npages,
 107                u64 range_start, u64 range_end)
 108{
 109        int ret;
 110
 111        if (WARN_ON(vma->iova))
 112                return -EBUSY;
 113
 114        spin_lock(&aspace->lock);
 115        ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
 116                0, range_start, range_end, 0);
 117        spin_unlock(&aspace->lock);
 118
 119        if (ret)
 120                return ret;
 121
 122        vma->iova = vma->node.start << PAGE_SHIFT;
 123        vma->mapped = false;
 124
 125        kref_get(&aspace->kref);
 126
 127        return 0;
 128}
 129
 130struct msm_gem_address_space *
 131msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
 132                u64 va_start, u64 size)
 133{
 134        struct msm_gem_address_space *aspace;
 135
 136        if (IS_ERR(mmu))
 137                return ERR_CAST(mmu);
 138
 139        aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
 140        if (!aspace)
 141                return ERR_PTR(-ENOMEM);
 142
 143        spin_lock_init(&aspace->lock);
 144        aspace->name = name;
 145        aspace->mmu = mmu;
 146
 147        drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
 148
 149        kref_init(&aspace->kref);
 150
 151        return aspace;
 152}
 153