linux/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2014-2018 Etnaviv Project
   4 */
   5
   6#include <linux/bitops.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/platform_device.h>
   9#include <linux/sizes.h>
  10#include <linux/slab.h>
  11
  12#include "etnaviv_gpu.h"
  13#include "etnaviv_mmu.h"
  14#include "state_hi.xml.h"
  15
  16#define PT_SIZE         SZ_2M
  17#define PT_ENTRIES      (PT_SIZE / sizeof(u32))
  18
  19#define GPU_MEM_START   0x80000000
  20
  21struct etnaviv_iommuv1_context {
  22        struct etnaviv_iommu_context base;
  23        u32 *pgtable_cpu;
  24        dma_addr_t pgtable_dma;
  25};
  26
  27static struct etnaviv_iommuv1_context *
  28to_v1_context(struct etnaviv_iommu_context *context)
  29{
  30        return container_of(context, struct etnaviv_iommuv1_context, base);
  31}
  32
  33static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
  34{
  35        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  36
  37        drm_mm_takedown(&context->mm);
  38
  39        dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
  40                    v1_context->pgtable_dma);
  41
  42        context->global->v1.shared_context = NULL;
  43
  44        kfree(v1_context);
  45}
  46
  47static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
  48                               unsigned long iova, phys_addr_t paddr,
  49                               size_t size, int prot)
  50{
  51        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  52        unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  53
  54        if (size != SZ_4K)
  55                return -EINVAL;
  56
  57        v1_context->pgtable_cpu[index] = paddr;
  58
  59        return 0;
  60}
  61
  62static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
  63        unsigned long iova, size_t size)
  64{
  65        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  66        unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  67
  68        if (size != SZ_4K)
  69                return -EINVAL;
  70
  71        v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
  72
  73        return SZ_4K;
  74}
  75
  76static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
  77{
  78        return PT_SIZE;
  79}
  80
  81static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
  82                                 void *buf)
  83{
  84        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  85
  86        memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
  87}
  88
  89static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
  90                             struct etnaviv_iommu_context *context)
  91{
  92        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  93        u32 pgtable;
  94
  95        if (gpu->mmu_context)
  96                etnaviv_iommu_context_put(gpu->mmu_context);
  97        gpu->mmu_context = etnaviv_iommu_context_get(context);
  98
  99        /* set base addresses */
 100        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
 101        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
 102        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
 103        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
 104        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
 105
 106        /* set page table address in MC */
 107        pgtable = (u32)v1_context->pgtable_dma;
 108
 109        gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
 110        gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
 111        gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
 112        gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
 113        gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
 114}
 115
 116
 117const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
 118        .free = etnaviv_iommuv1_free,
 119        .map = etnaviv_iommuv1_map,
 120        .unmap = etnaviv_iommuv1_unmap,
 121        .dump_size = etnaviv_iommuv1_dump_size,
 122        .dump = etnaviv_iommuv1_dump,
 123        .restore = etnaviv_iommuv1_restore,
 124};
 125
 126struct etnaviv_iommu_context *
 127etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
 128{
 129        struct etnaviv_iommuv1_context *v1_context;
 130        struct etnaviv_iommu_context *context;
 131
 132        mutex_lock(&global->lock);
 133
 134        /*
 135         * MMUv1 does not support switching between different contexts without
 136         * a stop the world operation, so we only support a single shared
 137         * context with this version.
 138         */
 139        if (global->v1.shared_context) {
 140                context = global->v1.shared_context;
 141                etnaviv_iommu_context_get(context);
 142                mutex_unlock(&global->lock);
 143                return context;
 144        }
 145
 146        v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
 147        if (!v1_context) {
 148                mutex_unlock(&global->lock);
 149                return NULL;
 150        }
 151
 152        v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
 153                                               &v1_context->pgtable_dma,
 154                                               GFP_KERNEL);
 155        if (!v1_context->pgtable_cpu)
 156                goto out_free;
 157
 158        memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
 159
 160        context = &v1_context->base;
 161        context->global = global;
 162        kref_init(&context->refcount);
 163        mutex_init(&context->lock);
 164        INIT_LIST_HEAD(&context->mappings);
 165        drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
 166        context->global->v1.shared_context = context;
 167
 168        mutex_unlock(&global->lock);
 169
 170        return context;
 171
 172out_free:
 173        mutex_unlock(&global->lock);
 174        kfree(v1_context);
 175        return NULL;
 176}
 177