linux/drivers/iommu/ipmmu-vmsa.c
<<
>>
Prefs
   1/*
   2 * IPMMU VMSA
   3 *
   4 * Copyright (C) 2014 Renesas Electronics Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; version 2 of the License.
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/err.h>
  14#include <linux/export.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/iommu.h>
  18#include <linux/module.h>
  19#include <linux/of.h>
  20#include <linux/platform_device.h>
  21#include <linux/sizes.h>
  22#include <linux/slab.h>
  23
  24#include <asm/dma-iommu.h>
  25#include <asm/pgalloc.h>
  26
  27#include "io-pgtable.h"
  28
  29struct ipmmu_vmsa_device {
  30        struct device *dev;
  31        void __iomem *base;
  32        struct list_head list;
  33
  34        unsigned int num_utlbs;
  35
  36        struct dma_iommu_mapping *mapping;
  37};
  38
  39struct ipmmu_vmsa_domain {
  40        struct ipmmu_vmsa_device *mmu;
  41        struct iommu_domain io_domain;
  42
  43        struct io_pgtable_cfg cfg;
  44        struct io_pgtable_ops *iop;
  45
  46        unsigned int context_id;
  47        spinlock_t lock;                        /* Protects mappings */
  48};
  49
  50struct ipmmu_vmsa_archdata {
  51        struct ipmmu_vmsa_device *mmu;
  52        unsigned int *utlbs;
  53        unsigned int num_utlbs;
  54};
  55
  56static DEFINE_SPINLOCK(ipmmu_devices_lock);
  57static LIST_HEAD(ipmmu_devices);
  58
  59static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
  60{
  61        return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
  62}
  63
  64#define TLB_LOOP_TIMEOUT                100     /* 100us */
  65
  66/* -----------------------------------------------------------------------------
  67 * Registers Definition
  68 */
  69
  70#define IM_NS_ALIAS_OFFSET              0x800
  71
  72#define IM_CTX_SIZE                     0x40
  73
  74#define IMCTR                           0x0000
  75#define IMCTR_TRE                       (1 << 17)
  76#define IMCTR_AFE                       (1 << 16)
  77#define IMCTR_RTSEL_MASK                (3 << 4)
  78#define IMCTR_RTSEL_SHIFT               4
  79#define IMCTR_TREN                      (1 << 3)
  80#define IMCTR_INTEN                     (1 << 2)
  81#define IMCTR_FLUSH                     (1 << 1)
  82#define IMCTR_MMUEN                     (1 << 0)
  83
  84#define IMCAAR                          0x0004
  85
  86#define IMTTBCR                         0x0008
  87#define IMTTBCR_EAE                     (1 << 31)
  88#define IMTTBCR_PMB                     (1 << 30)
  89#define IMTTBCR_SH1_NON_SHAREABLE       (0 << 28)
  90#define IMTTBCR_SH1_OUTER_SHAREABLE     (2 << 28)
  91#define IMTTBCR_SH1_INNER_SHAREABLE     (3 << 28)
  92#define IMTTBCR_SH1_MASK                (3 << 28)
  93#define IMTTBCR_ORGN1_NC                (0 << 26)
  94#define IMTTBCR_ORGN1_WB_WA             (1 << 26)
  95#define IMTTBCR_ORGN1_WT                (2 << 26)
  96#define IMTTBCR_ORGN1_WB                (3 << 26)
  97#define IMTTBCR_ORGN1_MASK              (3 << 26)
  98#define IMTTBCR_IRGN1_NC                (0 << 24)
  99#define IMTTBCR_IRGN1_WB_WA             (1 << 24)
 100#define IMTTBCR_IRGN1_WT                (2 << 24)
 101#define IMTTBCR_IRGN1_WB                (3 << 24)
 102#define IMTTBCR_IRGN1_MASK              (3 << 24)
 103#define IMTTBCR_TSZ1_MASK               (7 << 16)
 104#define IMTTBCR_TSZ1_SHIFT              16
 105#define IMTTBCR_SH0_NON_SHAREABLE       (0 << 12)
 106#define IMTTBCR_SH0_OUTER_SHAREABLE     (2 << 12)
 107#define IMTTBCR_SH0_INNER_SHAREABLE     (3 << 12)
 108#define IMTTBCR_SH0_MASK                (3 << 12)
 109#define IMTTBCR_ORGN0_NC                (0 << 10)
 110#define IMTTBCR_ORGN0_WB_WA             (1 << 10)
 111#define IMTTBCR_ORGN0_WT                (2 << 10)
 112#define IMTTBCR_ORGN0_WB                (3 << 10)
 113#define IMTTBCR_ORGN0_MASK              (3 << 10)
 114#define IMTTBCR_IRGN0_NC                (0 << 8)
 115#define IMTTBCR_IRGN0_WB_WA             (1 << 8)
 116#define IMTTBCR_IRGN0_WT                (2 << 8)
 117#define IMTTBCR_IRGN0_WB                (3 << 8)
 118#define IMTTBCR_IRGN0_MASK              (3 << 8)
 119#define IMTTBCR_SL0_LVL_2               (0 << 4)
 120#define IMTTBCR_SL0_LVL_1               (1 << 4)
 121#define IMTTBCR_TSZ0_MASK               (7 << 0)
 122#define IMTTBCR_TSZ0_SHIFT              O
 123
 124#define IMBUSCR                         0x000c
 125#define IMBUSCR_DVM                     (1 << 2)
 126#define IMBUSCR_BUSSEL_SYS              (0 << 0)
 127#define IMBUSCR_BUSSEL_CCI              (1 << 0)
 128#define IMBUSCR_BUSSEL_IMCAAR           (2 << 0)
 129#define IMBUSCR_BUSSEL_CCI_IMCAAR       (3 << 0)
 130#define IMBUSCR_BUSSEL_MASK             (3 << 0)
 131
 132#define IMTTLBR0                        0x0010
 133#define IMTTUBR0                        0x0014
 134#define IMTTLBR1                        0x0018
 135#define IMTTUBR1                        0x001c
 136
 137#define IMSTR                           0x0020
 138#define IMSTR_ERRLVL_MASK               (3 << 12)
 139#define IMSTR_ERRLVL_SHIFT              12
 140#define IMSTR_ERRCODE_TLB_FORMAT        (1 << 8)
 141#define IMSTR_ERRCODE_ACCESS_PERM       (4 << 8)
 142#define IMSTR_ERRCODE_SECURE_ACCESS     (5 << 8)
 143#define IMSTR_ERRCODE_MASK              (7 << 8)
 144#define IMSTR_MHIT                      (1 << 4)
 145#define IMSTR_ABORT                     (1 << 2)
 146#define IMSTR_PF                        (1 << 1)
 147#define IMSTR_TF                        (1 << 0)
 148
 149#define IMMAIR0                         0x0028
 150#define IMMAIR1                         0x002c
 151#define IMMAIR_ATTR_MASK                0xff
 152#define IMMAIR_ATTR_DEVICE              0x04
 153#define IMMAIR_ATTR_NC                  0x44
 154#define IMMAIR_ATTR_WBRWA               0xff
 155#define IMMAIR_ATTR_SHIFT(n)            ((n) << 3)
 156#define IMMAIR_ATTR_IDX_NC              0
 157#define IMMAIR_ATTR_IDX_WBRWA           1
 158#define IMMAIR_ATTR_IDX_DEV             2
 159
 160#define IMEAR                           0x0030
 161
 162#define IMPCTR                          0x0200
 163#define IMPSTR                          0x0208
 164#define IMPEAR                          0x020c
 165#define IMPMBA(n)                       (0x0280 + ((n) * 4))
 166#define IMPMBD(n)                       (0x02c0 + ((n) * 4))
 167
 168#define IMUCTR(n)                       (0x0300 + ((n) * 16))
 169#define IMUCTR_FIXADDEN                 (1 << 31)
 170#define IMUCTR_FIXADD_MASK              (0xff << 16)
 171#define IMUCTR_FIXADD_SHIFT             16
 172#define IMUCTR_TTSEL_MMU(n)             ((n) << 4)
 173#define IMUCTR_TTSEL_PMB                (8 << 4)
 174#define IMUCTR_TTSEL_MASK               (15 << 4)
 175#define IMUCTR_FLUSH                    (1 << 1)
 176#define IMUCTR_MMUEN                    (1 << 0)
 177
 178#define IMUASID(n)                      (0x0308 + ((n) * 16))
 179#define IMUASID_ASID8_MASK              (0xff << 8)
 180#define IMUASID_ASID8_SHIFT             8
 181#define IMUASID_ASID0_MASK              (0xff << 0)
 182#define IMUASID_ASID0_SHIFT             0
 183
 184/* -----------------------------------------------------------------------------
 185 * Read/Write Access
 186 */
 187
 188static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
 189{
 190        return ioread32(mmu->base + offset);
 191}
 192
 193static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
 194                        u32 data)
 195{
 196        iowrite32(data, mmu->base + offset);
 197}
 198
 199static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
 200{
 201        return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
 202}
 203
 204static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
 205                            u32 data)
 206{
 207        ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
 208}
 209
 210/* -----------------------------------------------------------------------------
 211 * TLB and microTLB Management
 212 */
 213
 214/* Wait for any pending TLB invalidations to complete */
 215static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
 216{
 217        unsigned int count = 0;
 218
 219        while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
 220                cpu_relax();
 221                if (++count == TLB_LOOP_TIMEOUT) {
 222                        dev_err_ratelimited(domain->mmu->dev,
 223                        "TLB sync timed out -- MMU may be deadlocked\n");
 224                        return;
 225                }
 226                udelay(1);
 227        }
 228}
 229
 230static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
 231{
 232        u32 reg;
 233
 234        reg = ipmmu_ctx_read(domain, IMCTR);
 235        reg |= IMCTR_FLUSH;
 236        ipmmu_ctx_write(domain, IMCTR, reg);
 237
 238        ipmmu_tlb_sync(domain);
 239}
 240
 241/*
 242 * Enable MMU translation for the microTLB.
 243 */
 244static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
 245                              unsigned int utlb)
 246{
 247        struct ipmmu_vmsa_device *mmu = domain->mmu;
 248
 249        /*
 250         * TODO: Reference-count the microTLB as several bus masters can be
 251         * connected to the same microTLB.
 252         */
 253
 254        /* TODO: What should we set the ASID to ? */
 255        ipmmu_write(mmu, IMUASID(utlb), 0);
 256        /* TODO: Do we need to flush the microTLB ? */
 257        ipmmu_write(mmu, IMUCTR(utlb),
 258                    IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
 259                    IMUCTR_MMUEN);
 260}
 261
 262/*
 263 * Disable MMU translation for the microTLB.
 264 */
 265static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
 266                               unsigned int utlb)
 267{
 268        struct ipmmu_vmsa_device *mmu = domain->mmu;
 269
 270        ipmmu_write(mmu, IMUCTR(utlb), 0);
 271}
 272
 273static void ipmmu_tlb_flush_all(void *cookie)
 274{
 275        struct ipmmu_vmsa_domain *domain = cookie;
 276
 277        ipmmu_tlb_invalidate(domain);
 278}
 279
 280static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
 281                                size_t granule, bool leaf, void *cookie)
 282{
 283        /* The hardware doesn't support selective TLB flush. */
 284}
 285
 286static struct iommu_gather_ops ipmmu_gather_ops = {
 287        .tlb_flush_all = ipmmu_tlb_flush_all,
 288        .tlb_add_flush = ipmmu_tlb_add_flush,
 289        .tlb_sync = ipmmu_tlb_flush_all,
 290};
 291
 292/* -----------------------------------------------------------------------------
 293 * Domain/Context Management
 294 */
 295
 296static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
 297{
 298        u64 ttbr;
 299
 300        /*
 301         * Allocate the page table operations.
 302         *
 303         * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
 304         * access, Long-descriptor format" that the NStable bit being set in a
 305         * table descriptor will result in the NStable and NS bits of all child
 306         * entries being ignored and considered as being set. The IPMMU seems
 307         * not to comply with this, as it generates a secure access page fault
 308         * if any of the NStable and NS bits isn't set when running in
 309         * non-secure mode.
 310         */
 311        domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
 312        domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
 313        domain->cfg.ias = 32;
 314        domain->cfg.oas = 40;
 315        domain->cfg.tlb = &ipmmu_gather_ops;
 316        /*
 317         * TODO: Add support for coherent walk through CCI with DVM and remove
 318         * cache handling. For now, delegate it to the io-pgtable code.
 319         */
 320        domain->cfg.iommu_dev = domain->mmu->dev;
 321
 322        domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
 323                                           domain);
 324        if (!domain->iop)
 325                return -EINVAL;
 326
 327        /*
 328         * TODO: When adding support for multiple contexts, find an unused
 329         * context.
 330         */
 331        domain->context_id = 0;
 332
 333        /* TTBR0 */
 334        ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
 335        ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
 336        ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
 337
 338        /*
 339         * TTBCR
 340         * We use long descriptors with inner-shareable WBWA tables and allocate
 341         * the whole 32-bit VA space to TTBR0.
 342         */
 343        ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
 344                        IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
 345                        IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
 346
 347        /* MAIR0 */
 348        ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
 349
 350        /* IMBUSCR */
 351        ipmmu_ctx_write(domain, IMBUSCR,
 352                        ipmmu_ctx_read(domain, IMBUSCR) &
 353                        ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
 354
 355        /*
 356         * IMSTR
 357         * Clear all interrupt flags.
 358         */
 359        ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
 360
 361        /*
 362         * IMCTR
 363         * Enable the MMU and interrupt generation. The long-descriptor
 364         * translation table format doesn't use TEX remapping. Don't enable AF
 365         * software management as we have no use for it. Flush the TLB as
 366         * required when modifying the context registers.
 367         */
 368        ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
 369
 370        return 0;
 371}
 372
 373static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
 374{
 375        /*
 376         * Disable the context. Flush the TLB as required when modifying the
 377         * context registers.
 378         *
 379         * TODO: Is TLB flush really needed ?
 380         */
 381        ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
 382        ipmmu_tlb_sync(domain);
 383}
 384
 385/* -----------------------------------------------------------------------------
 386 * Fault Handling
 387 */
 388
 389static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
 390{
 391        const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
 392        struct ipmmu_vmsa_device *mmu = domain->mmu;
 393        u32 status;
 394        u32 iova;
 395
 396        status = ipmmu_ctx_read(domain, IMSTR);
 397        if (!(status & err_mask))
 398                return IRQ_NONE;
 399
 400        iova = ipmmu_ctx_read(domain, IMEAR);
 401
 402        /*
 403         * Clear the error status flags. Unlike traditional interrupt flag
 404         * registers that must be cleared by writing 1, this status register
 405         * seems to require 0. The error address register must be read before,
 406         * otherwise its value will be 0.
 407         */
 408        ipmmu_ctx_write(domain, IMSTR, 0);
 409
 410        /* Log fatal errors. */
 411        if (status & IMSTR_MHIT)
 412                dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
 413                                    iova);
 414        if (status & IMSTR_ABORT)
 415                dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
 416                                    iova);
 417
 418        if (!(status & (IMSTR_PF | IMSTR_TF)))
 419                return IRQ_NONE;
 420
 421        /*
 422         * Try to handle page faults and translation faults.
 423         *
 424         * TODO: We need to look up the faulty device based on the I/O VA. Use
 425         * the IOMMU device for now.
 426         */
 427        if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
 428                return IRQ_HANDLED;
 429
 430        dev_err_ratelimited(mmu->dev,
 431                            "Unhandled fault: status 0x%08x iova 0x%08x\n",
 432                            status, iova);
 433
 434        return IRQ_HANDLED;
 435}
 436
 437static irqreturn_t ipmmu_irq(int irq, void *dev)
 438{
 439        struct ipmmu_vmsa_device *mmu = dev;
 440        struct iommu_domain *io_domain;
 441        struct ipmmu_vmsa_domain *domain;
 442
 443        if (!mmu->mapping)
 444                return IRQ_NONE;
 445
 446        io_domain = mmu->mapping->domain;
 447        domain = to_vmsa_domain(io_domain);
 448
 449        return ipmmu_domain_irq(domain);
 450}
 451
 452/* -----------------------------------------------------------------------------
 453 * IOMMU Operations
 454 */
 455
 456static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
 457{
 458        struct ipmmu_vmsa_domain *domain;
 459
 460        if (type != IOMMU_DOMAIN_UNMANAGED)
 461                return NULL;
 462
 463        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 464        if (!domain)
 465                return NULL;
 466
 467        spin_lock_init(&domain->lock);
 468
 469        return &domain->io_domain;
 470}
 471
 472static void ipmmu_domain_free(struct iommu_domain *io_domain)
 473{
 474        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 475
 476        /*
 477         * Free the domain resources. We assume that all devices have already
 478         * been detached.
 479         */
 480        ipmmu_domain_destroy_context(domain);
 481        free_io_pgtable_ops(domain->iop);
 482        kfree(domain);
 483}
 484
 485static int ipmmu_attach_device(struct iommu_domain *io_domain,
 486                               struct device *dev)
 487{
 488        struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
 489        struct ipmmu_vmsa_device *mmu = archdata->mmu;
 490        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 491        unsigned long flags;
 492        unsigned int i;
 493        int ret = 0;
 494
 495        if (!mmu) {
 496                dev_err(dev, "Cannot attach to IPMMU\n");
 497                return -ENXIO;
 498        }
 499
 500        spin_lock_irqsave(&domain->lock, flags);
 501
 502        if (!domain->mmu) {
 503                /* The domain hasn't been used yet, initialize it. */
 504                domain->mmu = mmu;
 505                ret = ipmmu_domain_init_context(domain);
 506        } else if (domain->mmu != mmu) {
 507                /*
 508                 * Something is wrong, we can't attach two devices using
 509                 * different IOMMUs to the same domain.
 510                 */
 511                dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
 512                        dev_name(mmu->dev), dev_name(domain->mmu->dev));
 513                ret = -EINVAL;
 514        }
 515
 516        spin_unlock_irqrestore(&domain->lock, flags);
 517
 518        if (ret < 0)
 519                return ret;
 520
 521        for (i = 0; i < archdata->num_utlbs; ++i)
 522                ipmmu_utlb_enable(domain, archdata->utlbs[i]);
 523
 524        return 0;
 525}
 526
 527static void ipmmu_detach_device(struct iommu_domain *io_domain,
 528                                struct device *dev)
 529{
 530        struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
 531        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 532        unsigned int i;
 533
 534        for (i = 0; i < archdata->num_utlbs; ++i)
 535                ipmmu_utlb_disable(domain, archdata->utlbs[i]);
 536
 537        /*
 538         * TODO: Optimize by disabling the context when no device is attached.
 539         */
 540}
 541
 542static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
 543                     phys_addr_t paddr, size_t size, int prot)
 544{
 545        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 546
 547        if (!domain)
 548                return -ENODEV;
 549
 550        return domain->iop->map(domain->iop, iova, paddr, size, prot);
 551}
 552
 553static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
 554                          size_t size)
 555{
 556        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 557
 558        return domain->iop->unmap(domain->iop, iova, size);
 559}
 560
 561static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
 562                                      dma_addr_t iova)
 563{
 564        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 565
 566        /* TODO: Is locking needed ? */
 567
 568        return domain->iop->iova_to_phys(domain->iop, iova);
 569}
 570
 571static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
 572                            unsigned int *utlbs, unsigned int num_utlbs)
 573{
 574        unsigned int i;
 575
 576        for (i = 0; i < num_utlbs; ++i) {
 577                struct of_phandle_args args;
 578                int ret;
 579
 580                ret = of_parse_phandle_with_args(dev->of_node, "iommus",
 581                                                 "#iommu-cells", i, &args);
 582                if (ret < 0)
 583                        return ret;
 584
 585                of_node_put(args.np);
 586
 587                if (args.np != mmu->dev->of_node || args.args_count != 1)
 588                        return -EINVAL;
 589
 590                utlbs[i] = args.args[0];
 591        }
 592
 593        return 0;
 594}
 595
 596static int ipmmu_add_device(struct device *dev)
 597{
 598        struct ipmmu_vmsa_archdata *archdata;
 599        struct ipmmu_vmsa_device *mmu;
 600        struct iommu_group *group = NULL;
 601        unsigned int *utlbs;
 602        unsigned int i;
 603        int num_utlbs;
 604        int ret = -ENODEV;
 605
 606        if (dev->archdata.iommu) {
 607                dev_warn(dev, "IOMMU driver already assigned to device %s\n",
 608                         dev_name(dev));
 609                return -EINVAL;
 610        }
 611
 612        /* Find the master corresponding to the device. */
 613
 614        num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",
 615                                               "#iommu-cells");
 616        if (num_utlbs < 0)
 617                return -ENODEV;
 618
 619        utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);
 620        if (!utlbs)
 621                return -ENOMEM;
 622
 623        spin_lock(&ipmmu_devices_lock);
 624
 625        list_for_each_entry(mmu, &ipmmu_devices, list) {
 626                ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);
 627                if (!ret) {
 628                        /*
 629                         * TODO Take a reference to the MMU to protect
 630                         * against device removal.
 631                         */
 632                        break;
 633                }
 634        }
 635
 636        spin_unlock(&ipmmu_devices_lock);
 637
 638        if (ret < 0)
 639                goto error;
 640
 641        for (i = 0; i < num_utlbs; ++i) {
 642                if (utlbs[i] >= mmu->num_utlbs) {
 643                        ret = -EINVAL;
 644                        goto error;
 645                }
 646        }
 647
 648        /* Create a device group and add the device to it. */
 649        group = iommu_group_alloc();
 650        if (IS_ERR(group)) {
 651                dev_err(dev, "Failed to allocate IOMMU group\n");
 652                ret = PTR_ERR(group);
 653                goto error;
 654        }
 655
 656        ret = iommu_group_add_device(group, dev);
 657        iommu_group_put(group);
 658
 659        if (ret < 0) {
 660                dev_err(dev, "Failed to add device to IPMMU group\n");
 661                group = NULL;
 662                goto error;
 663        }
 664
 665        archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
 666        if (!archdata) {
 667                ret = -ENOMEM;
 668                goto error;
 669        }
 670
 671        archdata->mmu = mmu;
 672        archdata->utlbs = utlbs;
 673        archdata->num_utlbs = num_utlbs;
 674        dev->archdata.iommu = archdata;
 675
 676        /*
 677         * Create the ARM mapping, used by the ARM DMA mapping core to allocate
 678         * VAs. This will allocate a corresponding IOMMU domain.
 679         *
 680         * TODO:
 681         * - Create one mapping per context (TLB).
 682         * - Make the mapping size configurable ? We currently use a 2GB mapping
 683         *   at a 1GB offset to ensure that NULL VAs will fault.
 684         */
 685        if (!mmu->mapping) {
 686                struct dma_iommu_mapping *mapping;
 687
 688                mapping = arm_iommu_create_mapping(&platform_bus_type,
 689                                                   SZ_1G, SZ_2G);
 690                if (IS_ERR(mapping)) {
 691                        dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
 692                        ret = PTR_ERR(mapping);
 693                        goto error;
 694                }
 695
 696                mmu->mapping = mapping;
 697        }
 698
 699        /* Attach the ARM VA mapping to the device. */
 700        ret = arm_iommu_attach_device(dev, mmu->mapping);
 701        if (ret < 0) {
 702                dev_err(dev, "Failed to attach device to VA mapping\n");
 703                goto error;
 704        }
 705
 706        return 0;
 707
 708error:
 709        arm_iommu_release_mapping(mmu->mapping);
 710
 711        kfree(dev->archdata.iommu);
 712        kfree(utlbs);
 713
 714        dev->archdata.iommu = NULL;
 715
 716        if (!IS_ERR_OR_NULL(group))
 717                iommu_group_remove_device(dev);
 718
 719        return ret;
 720}
 721
 722static void ipmmu_remove_device(struct device *dev)
 723{
 724        struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
 725
 726        arm_iommu_detach_device(dev);
 727        iommu_group_remove_device(dev);
 728
 729        kfree(archdata->utlbs);
 730        kfree(archdata);
 731
 732        dev->archdata.iommu = NULL;
 733}
 734
 735static const struct iommu_ops ipmmu_ops = {
 736        .domain_alloc = ipmmu_domain_alloc,
 737        .domain_free = ipmmu_domain_free,
 738        .attach_dev = ipmmu_attach_device,
 739        .detach_dev = ipmmu_detach_device,
 740        .map = ipmmu_map,
 741        .unmap = ipmmu_unmap,
 742        .map_sg = default_iommu_map_sg,
 743        .iova_to_phys = ipmmu_iova_to_phys,
 744        .add_device = ipmmu_add_device,
 745        .remove_device = ipmmu_remove_device,
 746        .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
 747};
 748
 749/* -----------------------------------------------------------------------------
 750 * Probe/remove and init
 751 */
 752
 753static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
 754{
 755        unsigned int i;
 756
 757        /* Disable all contexts. */
 758        for (i = 0; i < 4; ++i)
 759                ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
 760}
 761
 762static int ipmmu_probe(struct platform_device *pdev)
 763{
 764        struct ipmmu_vmsa_device *mmu;
 765        struct resource *res;
 766        int irq;
 767        int ret;
 768
 769        if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) {
 770                dev_err(&pdev->dev, "missing platform data\n");
 771                return -EINVAL;
 772        }
 773
 774        mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
 775        if (!mmu) {
 776                dev_err(&pdev->dev, "cannot allocate device data\n");
 777                return -ENOMEM;
 778        }
 779
 780        mmu->dev = &pdev->dev;
 781        mmu->num_utlbs = 32;
 782
 783        /* Map I/O memory and request IRQ. */
 784        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 785        mmu->base = devm_ioremap_resource(&pdev->dev, res);
 786        if (IS_ERR(mmu->base))
 787                return PTR_ERR(mmu->base);
 788
 789        /*
 790         * The IPMMU has two register banks, for secure and non-secure modes.
 791         * The bank mapped at the beginning of the IPMMU address space
 792         * corresponds to the running mode of the CPU. When running in secure
 793         * mode the non-secure register bank is also available at an offset.
 794         *
 795         * Secure mode operation isn't clearly documented and is thus currently
 796         * not implemented in the driver. Furthermore, preliminary tests of
 797         * non-secure operation with the main register bank were not successful.
 798         * Offset the registers base unconditionally to point to the non-secure
 799         * alias space for now.
 800         */
 801        mmu->base += IM_NS_ALIAS_OFFSET;
 802
 803        irq = platform_get_irq(pdev, 0);
 804        if (irq < 0) {
 805                dev_err(&pdev->dev, "no IRQ found\n");
 806                return irq;
 807        }
 808
 809        ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
 810                               dev_name(&pdev->dev), mmu);
 811        if (ret < 0) {
 812                dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
 813                return ret;
 814        }
 815
 816        ipmmu_device_reset(mmu);
 817
 818        /*
 819         * We can't create the ARM mapping here as it requires the bus to have
 820         * an IOMMU, which only happens when bus_set_iommu() is called in
 821         * ipmmu_init() after the probe function returns.
 822         */
 823
 824        spin_lock(&ipmmu_devices_lock);
 825        list_add(&mmu->list, &ipmmu_devices);
 826        spin_unlock(&ipmmu_devices_lock);
 827
 828        platform_set_drvdata(pdev, mmu);
 829
 830        return 0;
 831}
 832
 833static int ipmmu_remove(struct platform_device *pdev)
 834{
 835        struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
 836
 837        spin_lock(&ipmmu_devices_lock);
 838        list_del(&mmu->list);
 839        spin_unlock(&ipmmu_devices_lock);
 840
 841        arm_iommu_release_mapping(mmu->mapping);
 842
 843        ipmmu_device_reset(mmu);
 844
 845        return 0;
 846}
 847
 848static const struct of_device_id ipmmu_of_ids[] = {
 849        { .compatible = "renesas,ipmmu-vmsa", },
 850        { }
 851};
 852
 853static struct platform_driver ipmmu_driver = {
 854        .driver = {
 855                .name = "ipmmu-vmsa",
 856                .of_match_table = of_match_ptr(ipmmu_of_ids),
 857        },
 858        .probe = ipmmu_probe,
 859        .remove = ipmmu_remove,
 860};
 861
 862static int __init ipmmu_init(void)
 863{
 864        int ret;
 865
 866        ret = platform_driver_register(&ipmmu_driver);
 867        if (ret < 0)
 868                return ret;
 869
 870        if (!iommu_present(&platform_bus_type))
 871                bus_set_iommu(&platform_bus_type, &ipmmu_ops);
 872
 873        return 0;
 874}
 875
 876static void __exit ipmmu_exit(void)
 877{
 878        return platform_driver_unregister(&ipmmu_driver);
 879}
 880
 881subsys_initcall(ipmmu_init);
 882module_exit(ipmmu_exit);
 883
 884MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
 885MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
 886MODULE_LICENSE("GPL v2");
 887