linux/drivers/iommu/ipmmu-vmsa.c
<<
>>
Prefs
   1/*
   2 * IPMMU VMSA
   3 *
   4 * Copyright (C) 2014 Renesas Electronics Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; version 2 of the License.
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/err.h>
  14#include <linux/export.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/iommu.h>
  18#include <linux/module.h>
  19#include <linux/of.h>
  20#include <linux/platform_device.h>
  21#include <linux/sizes.h>
  22#include <linux/slab.h>
  23
  24#include <asm/dma-iommu.h>
  25#include <asm/pgalloc.h>
  26
  27#include "io-pgtable.h"
  28
  29struct ipmmu_vmsa_device {
  30        struct device *dev;
  31        void __iomem *base;
  32        struct list_head list;
  33
  34        unsigned int num_utlbs;
  35
  36        struct dma_iommu_mapping *mapping;
  37};
  38
  39struct ipmmu_vmsa_domain {
  40        struct ipmmu_vmsa_device *mmu;
  41        struct iommu_domain io_domain;
  42
  43        struct io_pgtable_cfg cfg;
  44        struct io_pgtable_ops *iop;
  45
  46        unsigned int context_id;
  47        spinlock_t lock;                        /* Protects mappings */
  48};
  49
  50struct ipmmu_vmsa_archdata {
  51        struct ipmmu_vmsa_device *mmu;
  52        unsigned int *utlbs;
  53        unsigned int num_utlbs;
  54};
  55
  56static DEFINE_SPINLOCK(ipmmu_devices_lock);
  57static LIST_HEAD(ipmmu_devices);
  58
  59static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
  60{
  61        return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
  62}
  63
  64#define TLB_LOOP_TIMEOUT                100     /* 100us */
  65
  66/* -----------------------------------------------------------------------------
  67 * Registers Definition
  68 */
  69
  70#define IM_NS_ALIAS_OFFSET              0x800
  71
  72#define IM_CTX_SIZE                     0x40
  73
  74#define IMCTR                           0x0000
  75#define IMCTR_TRE                       (1 << 17)
  76#define IMCTR_AFE                       (1 << 16)
  77#define IMCTR_RTSEL_MASK                (3 << 4)
  78#define IMCTR_RTSEL_SHIFT               4
  79#define IMCTR_TREN                      (1 << 3)
  80#define IMCTR_INTEN                     (1 << 2)
  81#define IMCTR_FLUSH                     (1 << 1)
  82#define IMCTR_MMUEN                     (1 << 0)
  83
  84#define IMCAAR                          0x0004
  85
  86#define IMTTBCR                         0x0008
  87#define IMTTBCR_EAE                     (1 << 31)
  88#define IMTTBCR_PMB                     (1 << 30)
  89#define IMTTBCR_SH1_NON_SHAREABLE       (0 << 28)
  90#define IMTTBCR_SH1_OUTER_SHAREABLE     (2 << 28)
  91#define IMTTBCR_SH1_INNER_SHAREABLE     (3 << 28)
  92#define IMTTBCR_SH1_MASK                (3 << 28)
  93#define IMTTBCR_ORGN1_NC                (0 << 26)
  94#define IMTTBCR_ORGN1_WB_WA             (1 << 26)
  95#define IMTTBCR_ORGN1_WT                (2 << 26)
  96#define IMTTBCR_ORGN1_WB                (3 << 26)
  97#define IMTTBCR_ORGN1_MASK              (3 << 26)
  98#define IMTTBCR_IRGN1_NC                (0 << 24)
  99#define IMTTBCR_IRGN1_WB_WA             (1 << 24)
 100#define IMTTBCR_IRGN1_WT                (2 << 24)
 101#define IMTTBCR_IRGN1_WB                (3 << 24)
 102#define IMTTBCR_IRGN1_MASK              (3 << 24)
 103#define IMTTBCR_TSZ1_MASK               (7 << 16)
 104#define IMTTBCR_TSZ1_SHIFT              16
 105#define IMTTBCR_SH0_NON_SHAREABLE       (0 << 12)
 106#define IMTTBCR_SH0_OUTER_SHAREABLE     (2 << 12)
 107#define IMTTBCR_SH0_INNER_SHAREABLE     (3 << 12)
 108#define IMTTBCR_SH0_MASK                (3 << 12)
 109#define IMTTBCR_ORGN0_NC                (0 << 10)
 110#define IMTTBCR_ORGN0_WB_WA             (1 << 10)
 111#define IMTTBCR_ORGN0_WT                (2 << 10)
 112#define IMTTBCR_ORGN0_WB                (3 << 10)
 113#define IMTTBCR_ORGN0_MASK              (3 << 10)
 114#define IMTTBCR_IRGN0_NC                (0 << 8)
 115#define IMTTBCR_IRGN0_WB_WA             (1 << 8)
 116#define IMTTBCR_IRGN0_WT                (2 << 8)
 117#define IMTTBCR_IRGN0_WB                (3 << 8)
 118#define IMTTBCR_IRGN0_MASK              (3 << 8)
 119#define IMTTBCR_SL0_LVL_2               (0 << 4)
 120#define IMTTBCR_SL0_LVL_1               (1 << 4)
 121#define IMTTBCR_TSZ0_MASK               (7 << 0)
 122#define IMTTBCR_TSZ0_SHIFT              O
 123
 124#define IMBUSCR                         0x000c
 125#define IMBUSCR_DVM                     (1 << 2)
 126#define IMBUSCR_BUSSEL_SYS              (0 << 0)
 127#define IMBUSCR_BUSSEL_CCI              (1 << 0)
 128#define IMBUSCR_BUSSEL_IMCAAR           (2 << 0)
 129#define IMBUSCR_BUSSEL_CCI_IMCAAR       (3 << 0)
 130#define IMBUSCR_BUSSEL_MASK             (3 << 0)
 131
 132#define IMTTLBR0                        0x0010
 133#define IMTTUBR0                        0x0014
 134#define IMTTLBR1                        0x0018
 135#define IMTTUBR1                        0x001c
 136
 137#define IMSTR                           0x0020
 138#define IMSTR_ERRLVL_MASK               (3 << 12)
 139#define IMSTR_ERRLVL_SHIFT              12
 140#define IMSTR_ERRCODE_TLB_FORMAT        (1 << 8)
 141#define IMSTR_ERRCODE_ACCESS_PERM       (4 << 8)
 142#define IMSTR_ERRCODE_SECURE_ACCESS     (5 << 8)
 143#define IMSTR_ERRCODE_MASK              (7 << 8)
 144#define IMSTR_MHIT                      (1 << 4)
 145#define IMSTR_ABORT                     (1 << 2)
 146#define IMSTR_PF                        (1 << 1)
 147#define IMSTR_TF                        (1 << 0)
 148
 149#define IMMAIR0                         0x0028
 150#define IMMAIR1                         0x002c
 151#define IMMAIR_ATTR_MASK                0xff
 152#define IMMAIR_ATTR_DEVICE              0x04
 153#define IMMAIR_ATTR_NC                  0x44
 154#define IMMAIR_ATTR_WBRWA               0xff
 155#define IMMAIR_ATTR_SHIFT(n)            ((n) << 3)
 156#define IMMAIR_ATTR_IDX_NC              0
 157#define IMMAIR_ATTR_IDX_WBRWA           1
 158#define IMMAIR_ATTR_IDX_DEV             2
 159
 160#define IMEAR                           0x0030
 161
 162#define IMPCTR                          0x0200
 163#define IMPSTR                          0x0208
 164#define IMPEAR                          0x020c
 165#define IMPMBA(n)                       (0x0280 + ((n) * 4))
 166#define IMPMBD(n)                       (0x02c0 + ((n) * 4))
 167
 168#define IMUCTR(n)                       (0x0300 + ((n) * 16))
 169#define IMUCTR_FIXADDEN                 (1 << 31)
 170#define IMUCTR_FIXADD_MASK              (0xff << 16)
 171#define IMUCTR_FIXADD_SHIFT             16
 172#define IMUCTR_TTSEL_MMU(n)             ((n) << 4)
 173#define IMUCTR_TTSEL_PMB                (8 << 4)
 174#define IMUCTR_TTSEL_MASK               (15 << 4)
 175#define IMUCTR_FLUSH                    (1 << 1)
 176#define IMUCTR_MMUEN                    (1 << 0)
 177
 178#define IMUASID(n)                      (0x0308 + ((n) * 16))
 179#define IMUASID_ASID8_MASK              (0xff << 8)
 180#define IMUASID_ASID8_SHIFT             8
 181#define IMUASID_ASID0_MASK              (0xff << 0)
 182#define IMUASID_ASID0_SHIFT             0
 183
 184/* -----------------------------------------------------------------------------
 185 * Read/Write Access
 186 */
 187
 188static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
 189{
 190        return ioread32(mmu->base + offset);
 191}
 192
 193static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
 194                        u32 data)
 195{
 196        iowrite32(data, mmu->base + offset);
 197}
 198
 199static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
 200{
 201        return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
 202}
 203
 204static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
 205                            u32 data)
 206{
 207        ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
 208}
 209
 210/* -----------------------------------------------------------------------------
 211 * TLB and microTLB Management
 212 */
 213
 214/* Wait for any pending TLB invalidations to complete */
 215static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
 216{
 217        unsigned int count = 0;
 218
 219        while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
 220                cpu_relax();
 221                if (++count == TLB_LOOP_TIMEOUT) {
 222                        dev_err_ratelimited(domain->mmu->dev,
 223                        "TLB sync timed out -- MMU may be deadlocked\n");
 224                        return;
 225                }
 226                udelay(1);
 227        }
 228}
 229
 230static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
 231{
 232        u32 reg;
 233
 234        reg = ipmmu_ctx_read(domain, IMCTR);
 235        reg |= IMCTR_FLUSH;
 236        ipmmu_ctx_write(domain, IMCTR, reg);
 237
 238        ipmmu_tlb_sync(domain);
 239}
 240
 241/*
 242 * Enable MMU translation for the microTLB.
 243 */
 244static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
 245                              unsigned int utlb)
 246{
 247        struct ipmmu_vmsa_device *mmu = domain->mmu;
 248
 249        /*
 250         * TODO: Reference-count the microTLB as several bus masters can be
 251         * connected to the same microTLB.
 252         */
 253
 254        /* TODO: What should we set the ASID to ? */
 255        ipmmu_write(mmu, IMUASID(utlb), 0);
 256        /* TODO: Do we need to flush the microTLB ? */
 257        ipmmu_write(mmu, IMUCTR(utlb),
 258                    IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
 259                    IMUCTR_MMUEN);
 260}
 261
 262/*
 263 * Disable MMU translation for the microTLB.
 264 */
 265static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
 266                               unsigned int utlb)
 267{
 268        struct ipmmu_vmsa_device *mmu = domain->mmu;
 269
 270        ipmmu_write(mmu, IMUCTR(utlb), 0);
 271}
 272
 273static void ipmmu_tlb_flush_all(void *cookie)
 274{
 275        struct ipmmu_vmsa_domain *domain = cookie;
 276
 277        ipmmu_tlb_invalidate(domain);
 278}
 279
 280static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
 281                                void *cookie)
 282{
 283        /* The hardware doesn't support selective TLB flush. */
 284}
 285
 286static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
 287{
 288        unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
 289        struct ipmmu_vmsa_domain *domain = cookie;
 290
 291        /*
 292         * TODO: Add support for coherent walk through CCI with DVM and remove
 293         * cache handling.
 294         */
 295        dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
 296                     DMA_TO_DEVICE);
 297}
 298
 299static struct iommu_gather_ops ipmmu_gather_ops = {
 300        .tlb_flush_all = ipmmu_tlb_flush_all,
 301        .tlb_add_flush = ipmmu_tlb_add_flush,
 302        .tlb_sync = ipmmu_tlb_flush_all,
 303        .flush_pgtable = ipmmu_flush_pgtable,
 304};
 305
 306/* -----------------------------------------------------------------------------
 307 * Domain/Context Management
 308 */
 309
 310static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
 311{
 312        phys_addr_t ttbr;
 313
 314        /*
 315         * Allocate the page table operations.
 316         *
 317         * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
 318         * access, Long-descriptor format" that the NStable bit being set in a
 319         * table descriptor will result in the NStable and NS bits of all child
 320         * entries being ignored and considered as being set. The IPMMU seems
 321         * not to comply with this, as it generates a secure access page fault
 322         * if any of the NStable and NS bits isn't set when running in
 323         * non-secure mode.
 324         */
 325        domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
 326        domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
 327        domain->cfg.ias = 32;
 328        domain->cfg.oas = 40;
 329        domain->cfg.tlb = &ipmmu_gather_ops;
 330
 331        domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
 332                                           domain);
 333        if (!domain->iop)
 334                return -EINVAL;
 335
 336        /*
 337         * TODO: When adding support for multiple contexts, find an unused
 338         * context.
 339         */
 340        domain->context_id = 0;
 341
 342        /* TTBR0 */
 343        ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
 344        ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
 345        ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
 346
 347        /*
 348         * TTBCR
 349         * We use long descriptors with inner-shareable WBWA tables and allocate
 350         * the whole 32-bit VA space to TTBR0.
 351         */
 352        ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
 353                        IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
 354                        IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
 355
 356        /* MAIR0 */
 357        ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
 358
 359        /* IMBUSCR */
 360        ipmmu_ctx_write(domain, IMBUSCR,
 361                        ipmmu_ctx_read(domain, IMBUSCR) &
 362                        ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
 363
 364        /*
 365         * IMSTR
 366         * Clear all interrupt flags.
 367         */
 368        ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
 369
 370        /*
 371         * IMCTR
 372         * Enable the MMU and interrupt generation. The long-descriptor
 373         * translation table format doesn't use TEX remapping. Don't enable AF
 374         * software management as we have no use for it. Flush the TLB as
 375         * required when modifying the context registers.
 376         */
 377        ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
 378
 379        return 0;
 380}
 381
 382static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
 383{
 384        /*
 385         * Disable the context. Flush the TLB as required when modifying the
 386         * context registers.
 387         *
 388         * TODO: Is TLB flush really needed ?
 389         */
 390        ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
 391        ipmmu_tlb_sync(domain);
 392}
 393
 394/* -----------------------------------------------------------------------------
 395 * Fault Handling
 396 */
 397
 398static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
 399{
 400        const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
 401        struct ipmmu_vmsa_device *mmu = domain->mmu;
 402        u32 status;
 403        u32 iova;
 404
 405        status = ipmmu_ctx_read(domain, IMSTR);
 406        if (!(status & err_mask))
 407                return IRQ_NONE;
 408
 409        iova = ipmmu_ctx_read(domain, IMEAR);
 410
 411        /*
 412         * Clear the error status flags. Unlike traditional interrupt flag
 413         * registers that must be cleared by writing 1, this status register
 414         * seems to require 0. The error address register must be read before,
 415         * otherwise its value will be 0.
 416         */
 417        ipmmu_ctx_write(domain, IMSTR, 0);
 418
 419        /* Log fatal errors. */
 420        if (status & IMSTR_MHIT)
 421                dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
 422                                    iova);
 423        if (status & IMSTR_ABORT)
 424                dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
 425                                    iova);
 426
 427        if (!(status & (IMSTR_PF | IMSTR_TF)))
 428                return IRQ_NONE;
 429
 430        /*
 431         * Try to handle page faults and translation faults.
 432         *
 433         * TODO: We need to look up the faulty device based on the I/O VA. Use
 434         * the IOMMU device for now.
 435         */
 436        if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
 437                return IRQ_HANDLED;
 438
 439        dev_err_ratelimited(mmu->dev,
 440                            "Unhandled fault: status 0x%08x iova 0x%08x\n",
 441                            status, iova);
 442
 443        return IRQ_HANDLED;
 444}
 445
 446static irqreturn_t ipmmu_irq(int irq, void *dev)
 447{
 448        struct ipmmu_vmsa_device *mmu = dev;
 449        struct iommu_domain *io_domain;
 450        struct ipmmu_vmsa_domain *domain;
 451
 452        if (!mmu->mapping)
 453                return IRQ_NONE;
 454
 455        io_domain = mmu->mapping->domain;
 456        domain = to_vmsa_domain(io_domain);
 457
 458        return ipmmu_domain_irq(domain);
 459}
 460
 461/* -----------------------------------------------------------------------------
 462 * IOMMU Operations
 463 */
 464
 465static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
 466{
 467        struct ipmmu_vmsa_domain *domain;
 468
 469        if (type != IOMMU_DOMAIN_UNMANAGED)
 470                return NULL;
 471
 472        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 473        if (!domain)
 474                return NULL;
 475
 476        spin_lock_init(&domain->lock);
 477
 478        return &domain->io_domain;
 479}
 480
 481static void ipmmu_domain_free(struct iommu_domain *io_domain)
 482{
 483        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 484
 485        /*
 486         * Free the domain resources. We assume that all devices have already
 487         * been detached.
 488         */
 489        ipmmu_domain_destroy_context(domain);
 490        free_io_pgtable_ops(domain->iop);
 491        kfree(domain);
 492}
 493
 494static int ipmmu_attach_device(struct iommu_domain *io_domain,
 495                               struct device *dev)
 496{
 497        struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
 498        struct ipmmu_vmsa_device *mmu = archdata->mmu;
 499        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 500        unsigned long flags;
 501        unsigned int i;
 502        int ret = 0;
 503
 504        if (!mmu) {
 505                dev_err(dev, "Cannot attach to IPMMU\n");
 506                return -ENXIO;
 507        }
 508
 509        spin_lock_irqsave(&domain->lock, flags);
 510
 511        if (!domain->mmu) {
 512                /* The domain hasn't been used yet, initialize it. */
 513                domain->mmu = mmu;
 514                ret = ipmmu_domain_init_context(domain);
 515        } else if (domain->mmu != mmu) {
 516                /*
 517                 * Something is wrong, we can't attach two devices using
 518                 * different IOMMUs to the same domain.
 519                 */
 520                dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
 521                        dev_name(mmu->dev), dev_name(domain->mmu->dev));
 522                ret = -EINVAL;
 523        }
 524
 525        spin_unlock_irqrestore(&domain->lock, flags);
 526
 527        if (ret < 0)
 528                return ret;
 529
 530        for (i = 0; i < archdata->num_utlbs; ++i)
 531                ipmmu_utlb_enable(domain, archdata->utlbs[i]);
 532
 533        return 0;
 534}
 535
 536static void ipmmu_detach_device(struct iommu_domain *io_domain,
 537                                struct device *dev)
 538{
 539        struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
 540        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 541        unsigned int i;
 542
 543        for (i = 0; i < archdata->num_utlbs; ++i)
 544                ipmmu_utlb_disable(domain, archdata->utlbs[i]);
 545
 546        /*
 547         * TODO: Optimize by disabling the context when no device is attached.
 548         */
 549}
 550
 551static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
 552                     phys_addr_t paddr, size_t size, int prot)
 553{
 554        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 555
 556        if (!domain)
 557                return -ENODEV;
 558
 559        return domain->iop->map(domain->iop, iova, paddr, size, prot);
 560}
 561
 562static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
 563                          size_t size)
 564{
 565        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 566
 567        return domain->iop->unmap(domain->iop, iova, size);
 568}
 569
 570static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
 571                                      dma_addr_t iova)
 572{
 573        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 574
 575        /* TODO: Is locking needed ? */
 576
 577        return domain->iop->iova_to_phys(domain->iop, iova);
 578}
 579
 580static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
 581                            unsigned int *utlbs, unsigned int num_utlbs)
 582{
 583        unsigned int i;
 584
 585        for (i = 0; i < num_utlbs; ++i) {
 586                struct of_phandle_args args;
 587                int ret;
 588
 589                ret = of_parse_phandle_with_args(dev->of_node, "iommus",
 590                                                 "#iommu-cells", i, &args);
 591                if (ret < 0)
 592                        return ret;
 593
 594                of_node_put(args.np);
 595
 596                if (args.np != mmu->dev->of_node || args.args_count != 1)
 597                        return -EINVAL;
 598
 599                utlbs[i] = args.args[0];
 600        }
 601
 602        return 0;
 603}
 604
 605static int ipmmu_add_device(struct device *dev)
 606{
 607        struct ipmmu_vmsa_archdata *archdata;
 608        struct ipmmu_vmsa_device *mmu;
 609        struct iommu_group *group = NULL;
 610        unsigned int *utlbs;
 611        unsigned int i;
 612        int num_utlbs;
 613        int ret = -ENODEV;
 614
 615        if (dev->archdata.iommu) {
 616                dev_warn(dev, "IOMMU driver already assigned to device %s\n",
 617                         dev_name(dev));
 618                return -EINVAL;
 619        }
 620
 621        /* Find the master corresponding to the device. */
 622
 623        num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",
 624                                               "#iommu-cells");
 625        if (num_utlbs < 0)
 626                return -ENODEV;
 627
 628        utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);
 629        if (!utlbs)
 630                return -ENOMEM;
 631
 632        spin_lock(&ipmmu_devices_lock);
 633
 634        list_for_each_entry(mmu, &ipmmu_devices, list) {
 635                ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);
 636                if (!ret) {
 637                        /*
 638                         * TODO Take a reference to the MMU to protect
 639                         * against device removal.
 640                         */
 641                        break;
 642                }
 643        }
 644
 645        spin_unlock(&ipmmu_devices_lock);
 646
 647        if (ret < 0)
 648                return -ENODEV;
 649
 650        for (i = 0; i < num_utlbs; ++i) {
 651                if (utlbs[i] >= mmu->num_utlbs) {
 652                        ret = -EINVAL;
 653                        goto error;
 654                }
 655        }
 656
 657        /* Create a device group and add the device to it. */
 658        group = iommu_group_alloc();
 659        if (IS_ERR(group)) {
 660                dev_err(dev, "Failed to allocate IOMMU group\n");
 661                ret = PTR_ERR(group);
 662                goto error;
 663        }
 664
 665        ret = iommu_group_add_device(group, dev);
 666        iommu_group_put(group);
 667
 668        if (ret < 0) {
 669                dev_err(dev, "Failed to add device to IPMMU group\n");
 670                group = NULL;
 671                goto error;
 672        }
 673
 674        archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
 675        if (!archdata) {
 676                ret = -ENOMEM;
 677                goto error;
 678        }
 679
 680        archdata->mmu = mmu;
 681        archdata->utlbs = utlbs;
 682        archdata->num_utlbs = num_utlbs;
 683        dev->archdata.iommu = archdata;
 684
 685        /*
 686         * Create the ARM mapping, used by the ARM DMA mapping core to allocate
 687         * VAs. This will allocate a corresponding IOMMU domain.
 688         *
 689         * TODO:
 690         * - Create one mapping per context (TLB).
 691         * - Make the mapping size configurable ? We currently use a 2GB mapping
 692         *   at a 1GB offset to ensure that NULL VAs will fault.
 693         */
 694        if (!mmu->mapping) {
 695                struct dma_iommu_mapping *mapping;
 696
 697                mapping = arm_iommu_create_mapping(&platform_bus_type,
 698                                                   SZ_1G, SZ_2G);
 699                if (IS_ERR(mapping)) {
 700                        dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
 701                        ret = PTR_ERR(mapping);
 702                        goto error;
 703                }
 704
 705                mmu->mapping = mapping;
 706        }
 707
 708        /* Attach the ARM VA mapping to the device. */
 709        ret = arm_iommu_attach_device(dev, mmu->mapping);
 710        if (ret < 0) {
 711                dev_err(dev, "Failed to attach device to VA mapping\n");
 712                goto error;
 713        }
 714
 715        return 0;
 716
 717error:
 718        arm_iommu_release_mapping(mmu->mapping);
 719
 720        kfree(dev->archdata.iommu);
 721        kfree(utlbs);
 722
 723        dev->archdata.iommu = NULL;
 724
 725        if (!IS_ERR_OR_NULL(group))
 726                iommu_group_remove_device(dev);
 727
 728        return ret;
 729}
 730
 731static void ipmmu_remove_device(struct device *dev)
 732{
 733        struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
 734
 735        arm_iommu_detach_device(dev);
 736        iommu_group_remove_device(dev);
 737
 738        kfree(archdata->utlbs);
 739        kfree(archdata);
 740
 741        dev->archdata.iommu = NULL;
 742}
 743
 744static const struct iommu_ops ipmmu_ops = {
 745        .domain_alloc = ipmmu_domain_alloc,
 746        .domain_free = ipmmu_domain_free,
 747        .attach_dev = ipmmu_attach_device,
 748        .detach_dev = ipmmu_detach_device,
 749        .map = ipmmu_map,
 750        .unmap = ipmmu_unmap,
 751        .map_sg = default_iommu_map_sg,
 752        .iova_to_phys = ipmmu_iova_to_phys,
 753        .add_device = ipmmu_add_device,
 754        .remove_device = ipmmu_remove_device,
 755        .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
 756};
 757
 758/* -----------------------------------------------------------------------------
 759 * Probe/remove and init
 760 */
 761
 762static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
 763{
 764        unsigned int i;
 765
 766        /* Disable all contexts. */
 767        for (i = 0; i < 4; ++i)
 768                ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
 769}
 770
 771static int ipmmu_probe(struct platform_device *pdev)
 772{
 773        struct ipmmu_vmsa_device *mmu;
 774        struct resource *res;
 775        int irq;
 776        int ret;
 777
 778        if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) {
 779                dev_err(&pdev->dev, "missing platform data\n");
 780                return -EINVAL;
 781        }
 782
 783        mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
 784        if (!mmu) {
 785                dev_err(&pdev->dev, "cannot allocate device data\n");
 786                return -ENOMEM;
 787        }
 788
 789        mmu->dev = &pdev->dev;
 790        mmu->num_utlbs = 32;
 791
 792        /* Map I/O memory and request IRQ. */
 793        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 794        mmu->base = devm_ioremap_resource(&pdev->dev, res);
 795        if (IS_ERR(mmu->base))
 796                return PTR_ERR(mmu->base);
 797
 798        /*
 799         * The IPMMU has two register banks, for secure and non-secure modes.
 800         * The bank mapped at the beginning of the IPMMU address space
 801         * corresponds to the running mode of the CPU. When running in secure
 802         * mode the non-secure register bank is also available at an offset.
 803         *
 804         * Secure mode operation isn't clearly documented and is thus currently
 805         * not implemented in the driver. Furthermore, preliminary tests of
 806         * non-secure operation with the main register bank were not successful.
 807         * Offset the registers base unconditionally to point to the non-secure
 808         * alias space for now.
 809         */
 810        mmu->base += IM_NS_ALIAS_OFFSET;
 811
 812        irq = platform_get_irq(pdev, 0);
 813        if (irq < 0) {
 814                dev_err(&pdev->dev, "no IRQ found\n");
 815                return irq;
 816        }
 817
 818        ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
 819                               dev_name(&pdev->dev), mmu);
 820        if (ret < 0) {
 821                dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
 822                return ret;
 823        }
 824
 825        ipmmu_device_reset(mmu);
 826
 827        /*
 828         * We can't create the ARM mapping here as it requires the bus to have
 829         * an IOMMU, which only happens when bus_set_iommu() is called in
 830         * ipmmu_init() after the probe function returns.
 831         */
 832
 833        spin_lock(&ipmmu_devices_lock);
 834        list_add(&mmu->list, &ipmmu_devices);
 835        spin_unlock(&ipmmu_devices_lock);
 836
 837        platform_set_drvdata(pdev, mmu);
 838
 839        return 0;
 840}
 841
 842static int ipmmu_remove(struct platform_device *pdev)
 843{
 844        struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
 845
 846        spin_lock(&ipmmu_devices_lock);
 847        list_del(&mmu->list);
 848        spin_unlock(&ipmmu_devices_lock);
 849
 850        arm_iommu_release_mapping(mmu->mapping);
 851
 852        ipmmu_device_reset(mmu);
 853
 854        return 0;
 855}
 856
 857static const struct of_device_id ipmmu_of_ids[] = {
 858        { .compatible = "renesas,ipmmu-vmsa", },
 859        { }
 860};
 861
 862static struct platform_driver ipmmu_driver = {
 863        .driver = {
 864                .name = "ipmmu-vmsa",
 865                .of_match_table = of_match_ptr(ipmmu_of_ids),
 866        },
 867        .probe = ipmmu_probe,
 868        .remove = ipmmu_remove,
 869};
 870
 871static int __init ipmmu_init(void)
 872{
 873        int ret;
 874
 875        ret = platform_driver_register(&ipmmu_driver);
 876        if (ret < 0)
 877                return ret;
 878
 879        if (!iommu_present(&platform_bus_type))
 880                bus_set_iommu(&platform_bus_type, &ipmmu_ops);
 881
 882        return 0;
 883}
 884
 885static void __exit ipmmu_exit(void)
 886{
 887        return platform_driver_unregister(&ipmmu_driver);
 888}
 889
 890subsys_initcall(ipmmu_init);
 891module_exit(ipmmu_exit);
 892
 893MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
 894MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
 895MODULE_LICENSE("GPL v2");
 896