linux/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Etnaviv Project
   3  *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License version 2 as published by
   6 * the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16
  17#include <linux/platform_device.h>
  18#include <linux/sizes.h>
  19#include <linux/slab.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/bitops.h>
  22
  23#include "etnaviv_cmdbuf.h"
  24#include "etnaviv_gpu.h"
  25#include "etnaviv_mmu.h"
  26#include "etnaviv_iommu.h"
  27#include "state.xml.h"
  28#include "state_hi.xml.h"
  29
  30#define MMUv2_PTE_PRESENT               BIT(0)
  31#define MMUv2_PTE_EXCEPTION             BIT(1)
  32#define MMUv2_PTE_WRITEABLE             BIT(2)
  33
  34#define MMUv2_MTLB_MASK                 0xffc00000
  35#define MMUv2_MTLB_SHIFT                22
  36#define MMUv2_STLB_MASK                 0x003ff000
  37#define MMUv2_STLB_SHIFT                12
  38
  39#define MMUv2_MAX_STLB_ENTRIES          1024
  40
  41struct etnaviv_iommuv2_domain {
  42        struct etnaviv_iommu_domain base;
  43        /* M(aster) TLB aka first level pagetable */
  44        u32 *mtlb_cpu;
  45        dma_addr_t mtlb_dma;
  46        /* S(lave) TLB aka second level pagetable */
  47        u32 *stlb_cpu[1024];
  48        dma_addr_t stlb_dma[1024];
  49};
  50
  51static struct etnaviv_iommuv2_domain *
  52to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
  53{
  54        return container_of(domain, struct etnaviv_iommuv2_domain, base);
  55}
  56
  57static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
  58                               unsigned long iova, phys_addr_t paddr,
  59                               size_t size, int prot)
  60{
  61        struct etnaviv_iommuv2_domain *etnaviv_domain =
  62                        to_etnaviv_domain(domain);
  63        int mtlb_entry, stlb_entry;
  64        u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
  65
  66        if (size != SZ_4K)
  67                return -EINVAL;
  68
  69        if (prot & ETNAVIV_PROT_WRITE)
  70                entry |= MMUv2_PTE_WRITEABLE;
  71
  72        mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
  73        stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
  74
  75        etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
  76
  77        return 0;
  78}
  79
  80static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
  81                                    unsigned long iova, size_t size)
  82{
  83        struct etnaviv_iommuv2_domain *etnaviv_domain =
  84                        to_etnaviv_domain(domain);
  85        int mtlb_entry, stlb_entry;
  86
  87        if (size != SZ_4K)
  88                return -EINVAL;
  89
  90        mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
  91        stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
  92
  93        etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
  94
  95        return SZ_4K;
  96}
  97
  98static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
  99{
 100        u32 *p;
 101        int ret, i, j;
 102
 103        /* allocate scratch page */
 104        etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
 105                                                etnaviv_domain->base.dev,
 106                                                SZ_4K,
 107                                                &etnaviv_domain->base.bad_page_dma,
 108                                                GFP_KERNEL);
 109        if (!etnaviv_domain->base.bad_page_cpu) {
 110                ret = -ENOMEM;
 111                goto fail_mem;
 112        }
 113        p = etnaviv_domain->base.bad_page_cpu;
 114        for (i = 0; i < SZ_4K / 4; i++)
 115                *p++ = 0xdead55aa;
 116
 117        etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
 118                                                  SZ_4K,
 119                                                  &etnaviv_domain->mtlb_dma,
 120                                                  GFP_KERNEL);
 121        if (!etnaviv_domain->mtlb_cpu) {
 122                ret = -ENOMEM;
 123                goto fail_mem;
 124        }
 125
 126        /* pre-populate STLB pages (may want to switch to on-demand later) */
 127        for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 128                etnaviv_domain->stlb_cpu[i] =
 129                                dma_alloc_coherent(etnaviv_domain->base.dev,
 130                                                   SZ_4K,
 131                                                   &etnaviv_domain->stlb_dma[i],
 132                                                   GFP_KERNEL);
 133                if (!etnaviv_domain->stlb_cpu[i]) {
 134                        ret = -ENOMEM;
 135                        goto fail_mem;
 136                }
 137                p = etnaviv_domain->stlb_cpu[i];
 138                for (j = 0; j < SZ_4K / 4; j++)
 139                        *p++ = MMUv2_PTE_EXCEPTION;
 140
 141                etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
 142                                              MMUv2_PTE_PRESENT;
 143        }
 144
 145        return 0;
 146
 147fail_mem:
 148        if (etnaviv_domain->base.bad_page_cpu)
 149                dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
 150                                  etnaviv_domain->base.bad_page_cpu,
 151                                  etnaviv_domain->base.bad_page_dma);
 152
 153        if (etnaviv_domain->mtlb_cpu)
 154                dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
 155                                  etnaviv_domain->mtlb_cpu,
 156                                  etnaviv_domain->mtlb_dma);
 157
 158        for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 159                if (etnaviv_domain->stlb_cpu[i])
 160                        dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
 161                                          etnaviv_domain->stlb_cpu[i],
 162                                          etnaviv_domain->stlb_dma[i]);
 163        }
 164
 165        return ret;
 166}
 167
 168static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
 169{
 170        struct etnaviv_iommuv2_domain *etnaviv_domain =
 171                        to_etnaviv_domain(domain);
 172        int i;
 173
 174        dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
 175                          etnaviv_domain->base.bad_page_cpu,
 176                          etnaviv_domain->base.bad_page_dma);
 177
 178        dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
 179                          etnaviv_domain->mtlb_cpu,
 180                          etnaviv_domain->mtlb_dma);
 181
 182        for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 183                if (etnaviv_domain->stlb_cpu[i])
 184                        dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
 185                                          etnaviv_domain->stlb_cpu[i],
 186                                          etnaviv_domain->stlb_dma[i]);
 187        }
 188
 189        vfree(etnaviv_domain);
 190}
 191
 192static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
 193{
 194        struct etnaviv_iommuv2_domain *etnaviv_domain =
 195                        to_etnaviv_domain(domain);
 196        size_t dump_size = SZ_4K;
 197        int i;
 198
 199        for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
 200                if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
 201                        dump_size += SZ_4K;
 202
 203        return dump_size;
 204}
 205
 206static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
 207{
 208        struct etnaviv_iommuv2_domain *etnaviv_domain =
 209                        to_etnaviv_domain(domain);
 210        int i;
 211
 212        memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
 213        buf += SZ_4K;
 214        for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
 215                if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
 216                        memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
 217}
 218
 219void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
 220{
 221        struct etnaviv_iommuv2_domain *etnaviv_domain =
 222                        to_etnaviv_domain(gpu->mmu->domain);
 223        u16 prefetch;
 224
 225        /* If the MMU is already enabled the state is still there. */
 226        if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
 227                return;
 228
 229        prefetch = etnaviv_buffer_config_mmuv2(gpu,
 230                                (u32)etnaviv_domain->mtlb_dma,
 231                                (u32)etnaviv_domain->base.bad_page_dma);
 232        etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
 233                             prefetch);
 234        etnaviv_gpu_wait_idle(gpu, 100);
 235
 236        gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
 237}
 238
 239const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
 240        .free = etnaviv_iommuv2_domain_free,
 241        .map = etnaviv_iommuv2_map,
 242        .unmap = etnaviv_iommuv2_unmap,
 243        .dump_size = etnaviv_iommuv2_dump_size,
 244        .dump = etnaviv_iommuv2_dump,
 245};
 246
 247struct etnaviv_iommu_domain *
 248etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
 249{
 250        struct etnaviv_iommuv2_domain *etnaviv_domain;
 251        struct etnaviv_iommu_domain *domain;
 252        int ret;
 253
 254        etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
 255        if (!etnaviv_domain)
 256                return NULL;
 257
 258        domain = &etnaviv_domain->base;
 259
 260        domain->dev = gpu->dev;
 261        domain->base = 0;
 262        domain->size = (u64)SZ_1G * 4;
 263        domain->ops = &etnaviv_iommuv2_ops;
 264
 265        ret = etnaviv_iommuv2_init(etnaviv_domain);
 266        if (ret)
 267                goto out_free;
 268
 269        return &etnaviv_domain->base;
 270
 271out_free:
 272        vfree(etnaviv_domain);
 273        return NULL;
 274}
 275