linux/drivers/staging/media/tegra-vde/dmabuf-cache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * NVIDIA Tegra Video decoder driver
   4 *
   5 * Copyright (C) 2016-2019 GRATE-DRIVER project
   6 */
   7
   8#include <linux/dma-buf.h>
   9#include <linux/iova.h>
  10#include <linux/kernel.h>
  11#include <linux/list.h>
  12#include <linux/sched.h>
  13#include <linux/slab.h>
  14#include <linux/workqueue.h>
  15
  16#include "vde.h"
  17
  18struct tegra_vde_cache_entry {
  19        enum dma_data_direction dma_dir;
  20        struct dma_buf_attachment *a;
  21        struct delayed_work dwork;
  22        struct tegra_vde *vde;
  23        struct list_head list;
  24        struct sg_table *sgt;
  25        struct iova *iova;
  26        unsigned int refcnt;
  27};
  28
  29static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry)
  30{
  31        struct dma_buf *dmabuf = entry->a->dmabuf;
  32
  33        WARN_ON_ONCE(entry->refcnt);
  34
  35        if (entry->vde->domain)
  36                tegra_vde_iommu_unmap(entry->vde, entry->iova);
  37
  38        dma_buf_unmap_attachment(entry->a, entry->sgt, entry->dma_dir);
  39        dma_buf_detach(dmabuf, entry->a);
  40        dma_buf_put(dmabuf);
  41
  42        list_del(&entry->list);
  43        kfree(entry);
  44}
  45
  46static void tegra_vde_delayed_unmap(struct work_struct *work)
  47{
  48        struct tegra_vde_cache_entry *entry;
  49        struct tegra_vde *vde;
  50
  51        entry = container_of(work, struct tegra_vde_cache_entry,
  52                             dwork.work);
  53        vde = entry->vde;
  54
  55        mutex_lock(&vde->map_lock);
  56        tegra_vde_release_entry(entry);
  57        mutex_unlock(&vde->map_lock);
  58}
  59
  60int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
  61                               struct dma_buf *dmabuf,
  62                               enum dma_data_direction dma_dir,
  63                               struct dma_buf_attachment **ap,
  64                               dma_addr_t *addrp)
  65{
  66        struct device *dev = vde->miscdev.parent;
  67        struct dma_buf_attachment *attachment;
  68        struct tegra_vde_cache_entry *entry;
  69        struct sg_table *sgt;
  70        struct iova *iova;
  71        int err;
  72
  73        mutex_lock(&vde->map_lock);
  74
  75        list_for_each_entry(entry, &vde->map_list, list) {
  76                if (entry->a->dmabuf != dmabuf)
  77                        continue;
  78
  79                if (!cancel_delayed_work(&entry->dwork))
  80                        continue;
  81
  82                if (entry->dma_dir != dma_dir)
  83                        entry->dma_dir = DMA_BIDIRECTIONAL;
  84
  85                dma_buf_put(dmabuf);
  86
  87                if (vde->domain)
  88                        *addrp = iova_dma_addr(&vde->iova, entry->iova);
  89                else
  90                        *addrp = sg_dma_address(entry->sgt->sgl);
  91
  92                goto ref;
  93        }
  94
  95        attachment = dma_buf_attach(dmabuf, dev);
  96        if (IS_ERR(attachment)) {
  97                dev_err(dev, "Failed to attach dmabuf\n");
  98                err = PTR_ERR(attachment);
  99                goto err_unlock;
 100        }
 101
 102        sgt = dma_buf_map_attachment(attachment, dma_dir);
 103        if (IS_ERR(sgt)) {
 104                dev_err(dev, "Failed to get dmabufs sg_table\n");
 105                err = PTR_ERR(sgt);
 106                goto err_detach;
 107        }
 108
 109        if (!vde->domain && sgt->nents > 1) {
 110                dev_err(dev, "Sparse DMA region is unsupported, please enable IOMMU\n");
 111                err = -EINVAL;
 112                goto err_unmap;
 113        }
 114
 115        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 116        if (!entry) {
 117                err = -ENOMEM;
 118                goto err_unmap;
 119        }
 120
 121        if (vde->domain) {
 122                err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size);
 123                if (err)
 124                        goto err_free;
 125
 126                *addrp = iova_dma_addr(&vde->iova, iova);
 127        } else {
 128                *addrp = sg_dma_address(sgt->sgl);
 129                iova = NULL;
 130        }
 131
 132        INIT_DELAYED_WORK(&entry->dwork, tegra_vde_delayed_unmap);
 133        list_add(&entry->list, &vde->map_list);
 134
 135        entry->dma_dir = dma_dir;
 136        entry->iova = iova;
 137        entry->vde = vde;
 138        entry->sgt = sgt;
 139        entry->a = attachment;
 140ref:
 141        entry->refcnt++;
 142
 143        *ap = entry->a;
 144
 145        mutex_unlock(&vde->map_lock);
 146
 147        return 0;
 148
 149err_free:
 150        kfree(entry);
 151err_unmap:
 152        dma_buf_unmap_attachment(attachment, sgt, dma_dir);
 153err_detach:
 154        dma_buf_detach(dmabuf, attachment);
 155err_unlock:
 156        mutex_unlock(&vde->map_lock);
 157
 158        return err;
 159}
 160
 161void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
 162                                  struct dma_buf_attachment *a,
 163                                  bool release)
 164{
 165        struct tegra_vde_cache_entry *entry;
 166
 167        mutex_lock(&vde->map_lock);
 168
 169        list_for_each_entry(entry, &vde->map_list, list) {
 170                if (entry->a != a)
 171                        continue;
 172
 173                WARN_ON_ONCE(!entry->refcnt);
 174
 175                if (--entry->refcnt == 0) {
 176                        if (release)
 177                                tegra_vde_release_entry(entry);
 178                        else
 179                                schedule_delayed_work(&entry->dwork, 5 * HZ);
 180                }
 181                break;
 182        }
 183
 184        mutex_unlock(&vde->map_lock);
 185}
 186
 187void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde)
 188{
 189        struct tegra_vde_cache_entry *entry, *tmp;
 190
 191        mutex_lock(&vde->map_lock);
 192
 193        list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
 194                if (entry->refcnt)
 195                        continue;
 196
 197                if (!cancel_delayed_work(&entry->dwork))
 198                        continue;
 199
 200                tegra_vde_release_entry(entry);
 201        }
 202
 203        mutex_unlock(&vde->map_lock);
 204}
 205
 206void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde)
 207{
 208        struct tegra_vde_cache_entry *entry, *tmp;
 209
 210        mutex_lock(&vde->map_lock);
 211
 212        while (!list_empty(&vde->map_list)) {
 213                list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
 214                        if (!cancel_delayed_work(&entry->dwork))
 215                                continue;
 216
 217                        tegra_vde_release_entry(entry);
 218                }
 219
 220                mutex_unlock(&vde->map_lock);
 221                schedule();
 222                mutex_lock(&vde->map_lock);
 223        }
 224
 225        mutex_unlock(&vde->map_lock);
 226}
 227