linux/drivers/gpu/drm/msm/msm_gpummu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
   3
   4#include <linux/dma-mapping.h>
   5
   6#include "msm_drv.h"
   7#include "msm_mmu.h"
   8#include "adreno/adreno_gpu.h"
   9#include "adreno/a2xx.xml.h"
  10
  11struct msm_gpummu {
  12        struct msm_mmu base;
  13        struct msm_gpu *gpu;
  14        dma_addr_t pt_base;
  15        uint32_t *table;
  16};
  17#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
  18
  19#define GPUMMU_VA_START SZ_16M
  20#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
  21#define GPUMMU_PAGE_SIZE SZ_4K
  22#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
  23
  24static void msm_gpummu_detach(struct msm_mmu *mmu)
  25{
  26}
  27
  28static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
  29                struct sg_table *sgt, size_t len, int prot)
  30{
  31        struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
  32        unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
  33        struct scatterlist *sg;
  34        unsigned prot_bits = 0;
  35        unsigned i, j;
  36
  37        if (prot & IOMMU_WRITE)
  38                prot_bits |= 1;
  39        if (prot & IOMMU_READ)
  40                prot_bits |= 2;
  41
  42        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  43                dma_addr_t addr = sg->dma_address;
  44                for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
  45                        gpummu->table[idx] = addr | prot_bits;
  46                        addr += GPUMMU_PAGE_SIZE;
  47                }
  48        }
  49
  50        /* we can improve by deferring flush for multiple map() */
  51        gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
  52                A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
  53                A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
  54        return 0;
  55}
  56
  57static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
  58{
  59        struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
  60        unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
  61        unsigned i;
  62
  63        for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
  64                gpummu->table[idx] = 0;
  65
  66        gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
  67                A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
  68                A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
  69        return 0;
  70}
  71
  72static void msm_gpummu_destroy(struct msm_mmu *mmu)
  73{
  74        struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
  75
  76        dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
  77                DMA_ATTR_FORCE_CONTIGUOUS);
  78
  79        kfree(gpummu);
  80}
  81
  82static const struct msm_mmu_funcs funcs = {
  83                .detach = msm_gpummu_detach,
  84                .map = msm_gpummu_map,
  85                .unmap = msm_gpummu_unmap,
  86                .destroy = msm_gpummu_destroy,
  87};
  88
  89struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
  90{
  91        struct msm_gpummu *gpummu;
  92
  93        gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
  94        if (!gpummu)
  95                return ERR_PTR(-ENOMEM);
  96
  97        gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
  98                GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
  99        if (!gpummu->table) {
 100                kfree(gpummu);
 101                return ERR_PTR(-ENOMEM);
 102        }
 103
 104        gpummu->gpu = gpu;
 105        msm_mmu_init(&gpummu->base, dev, &funcs);
 106
 107        return &gpummu->base;
 108}
 109
 110void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
 111                dma_addr_t *tran_error)
 112{
 113        dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
 114
 115        *pt_base = base;
 116        *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
 117}
 118