linux/drivers/iommu/omap-iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * omap iommu: tlb and pagetable primitives
   4 *
   5 * Copyright (C) 2008-2010 Nokia Corporation
   6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
   7 *
   8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
   9 *              Paul Mundt and Toshihiro Kobayashi
  10 */
  11
  12#include <linux/dma-mapping.h>
  13#include <linux/err.h>
  14#include <linux/slab.h>
  15#include <linux/interrupt.h>
  16#include <linux/ioport.h>
  17#include <linux/platform_device.h>
  18#include <linux/iommu.h>
  19#include <linux/omap-iommu.h>
  20#include <linux/mutex.h>
  21#include <linux/spinlock.h>
  22#include <linux/io.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/of.h>
  25#include <linux/of_iommu.h>
  26#include <linux/of_irq.h>
  27#include <linux/of_platform.h>
  28#include <linux/regmap.h>
  29#include <linux/mfd/syscon.h>
  30
  31#include <linux/platform_data/iommu-omap.h>
  32
  33#include "omap-iopgtable.h"
  34#include "omap-iommu.h"
  35
  36static const struct iommu_ops omap_iommu_ops;
  37
  38#define to_iommu(dev)   ((struct omap_iommu *)dev_get_drvdata(dev))
  39
  40/* bitmap of the page sizes currently supported */
  41#define OMAP_IOMMU_PGSIZES      (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  42
  43#define MMU_LOCK_BASE_SHIFT     10
  44#define MMU_LOCK_BASE_MASK      (0x1f << MMU_LOCK_BASE_SHIFT)
  45#define MMU_LOCK_BASE(x)        \
  46        ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
  47
  48#define MMU_LOCK_VICT_SHIFT     4
  49#define MMU_LOCK_VICT_MASK      (0x1f << MMU_LOCK_VICT_SHIFT)
  50#define MMU_LOCK_VICT(x)        \
  51        ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
  52
  53static struct platform_driver omap_iommu_driver;
  54static struct kmem_cache *iopte_cachep;
  55
  56/**
  57 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
  58 * @dom:        generic iommu domain handle
  59 **/
  60static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
  61{
  62        return container_of(dom, struct omap_iommu_domain, domain);
  63}
  64
  65/**
  66 * omap_iommu_save_ctx - Save registers for pm off-mode support
  67 * @dev:        client device
  68 **/
  69void omap_iommu_save_ctx(struct device *dev)
  70{
  71        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
  72        struct omap_iommu *obj;
  73        u32 *p;
  74        int i;
  75
  76        if (!arch_data)
  77                return;
  78
  79        while (arch_data->iommu_dev) {
  80                obj = arch_data->iommu_dev;
  81                p = obj->ctx;
  82                for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
  83                        p[i] = iommu_read_reg(obj, i * sizeof(u32));
  84                        dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
  85                                p[i]);
  86                }
  87                arch_data++;
  88        }
  89}
  90EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
  91
  92/**
  93 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
  94 * @dev:        client device
  95 **/
  96void omap_iommu_restore_ctx(struct device *dev)
  97{
  98        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
  99        struct omap_iommu *obj;
 100        u32 *p;
 101        int i;
 102
 103        if (!arch_data)
 104                return;
 105
 106        while (arch_data->iommu_dev) {
 107                obj = arch_data->iommu_dev;
 108                p = obj->ctx;
 109                for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
 110                        iommu_write_reg(obj, p[i], i * sizeof(u32));
 111                        dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
 112                                p[i]);
 113                }
 114                arch_data++;
 115        }
 116}
 117EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
 118
 119static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
 120{
 121        u32 val, mask;
 122
 123        if (!obj->syscfg)
 124                return;
 125
 126        mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
 127        val = enable ? mask : 0;
 128        regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
 129}
 130
 131static void __iommu_set_twl(struct omap_iommu *obj, bool on)
 132{
 133        u32 l = iommu_read_reg(obj, MMU_CNTL);
 134
 135        if (on)
 136                iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
 137        else
 138                iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
 139
 140        l &= ~MMU_CNTL_MASK;
 141        if (on)
 142                l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
 143        else
 144                l |= (MMU_CNTL_MMU_EN);
 145
 146        iommu_write_reg(obj, l, MMU_CNTL);
 147}
 148
 149static int omap2_iommu_enable(struct omap_iommu *obj)
 150{
 151        u32 l, pa;
 152
 153        if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd,  SZ_16K))
 154                return -EINVAL;
 155
 156        pa = virt_to_phys(obj->iopgd);
 157        if (!IS_ALIGNED(pa, SZ_16K))
 158                return -EINVAL;
 159
 160        l = iommu_read_reg(obj, MMU_REVISION);
 161        dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
 162                 (l >> 4) & 0xf, l & 0xf);
 163
 164        iommu_write_reg(obj, pa, MMU_TTB);
 165
 166        dra7_cfg_dspsys_mmu(obj, true);
 167
 168        if (obj->has_bus_err_back)
 169                iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
 170
 171        __iommu_set_twl(obj, true);
 172
 173        return 0;
 174}
 175
 176static void omap2_iommu_disable(struct omap_iommu *obj)
 177{
 178        u32 l = iommu_read_reg(obj, MMU_CNTL);
 179
 180        l &= ~MMU_CNTL_MASK;
 181        iommu_write_reg(obj, l, MMU_CNTL);
 182        dra7_cfg_dspsys_mmu(obj, false);
 183
 184        dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
 185}
 186
 187static int iommu_enable(struct omap_iommu *obj)
 188{
 189        int err;
 190        struct platform_device *pdev = to_platform_device(obj->dev);
 191        struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
 192
 193        if (pdata && pdata->deassert_reset) {
 194                err = pdata->deassert_reset(pdev, pdata->reset_name);
 195                if (err) {
 196                        dev_err(obj->dev, "deassert_reset failed: %d\n", err);
 197                        return err;
 198                }
 199        }
 200
 201        pm_runtime_get_sync(obj->dev);
 202
 203        err = omap2_iommu_enable(obj);
 204
 205        return err;
 206}
 207
 208static void iommu_disable(struct omap_iommu *obj)
 209{
 210        struct platform_device *pdev = to_platform_device(obj->dev);
 211        struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
 212
 213        omap2_iommu_disable(obj);
 214
 215        pm_runtime_put_sync(obj->dev);
 216
 217        if (pdata && pdata->assert_reset)
 218                pdata->assert_reset(pdev, pdata->reset_name);
 219}
 220
 221/*
 222 *      TLB operations
 223 */
 224static u32 iotlb_cr_to_virt(struct cr_regs *cr)
 225{
 226        u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
 227        u32 mask = get_cam_va_mask(cr->cam & page_size);
 228
 229        return cr->cam & mask;
 230}
 231
 232static u32 get_iopte_attr(struct iotlb_entry *e)
 233{
 234        u32 attr;
 235
 236        attr = e->mixed << 5;
 237        attr |= e->endian;
 238        attr |= e->elsz >> 3;
 239        attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
 240                        (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
 241        return attr;
 242}
 243
 244static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
 245{
 246        u32 status, fault_addr;
 247
 248        status = iommu_read_reg(obj, MMU_IRQSTATUS);
 249        status &= MMU_IRQ_MASK;
 250        if (!status) {
 251                *da = 0;
 252                return 0;
 253        }
 254
 255        fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
 256        *da = fault_addr;
 257
 258        iommu_write_reg(obj, status, MMU_IRQSTATUS);
 259
 260        return status;
 261}
 262
 263void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
 264{
 265        u32 val;
 266
 267        val = iommu_read_reg(obj, MMU_LOCK);
 268
 269        l->base = MMU_LOCK_BASE(val);
 270        l->vict = MMU_LOCK_VICT(val);
 271}
 272
 273void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
 274{
 275        u32 val;
 276
 277        val = (l->base << MMU_LOCK_BASE_SHIFT);
 278        val |= (l->vict << MMU_LOCK_VICT_SHIFT);
 279
 280        iommu_write_reg(obj, val, MMU_LOCK);
 281}
 282
 283static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
 284{
 285        cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
 286        cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
 287}
 288
 289static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
 290{
 291        iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
 292        iommu_write_reg(obj, cr->ram, MMU_RAM);
 293
 294        iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
 295        iommu_write_reg(obj, 1, MMU_LD_TLB);
 296}
 297
 298/* only used in iotlb iteration for-loop */
 299struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
 300{
 301        struct cr_regs cr;
 302        struct iotlb_lock l;
 303
 304        iotlb_lock_get(obj, &l);
 305        l.vict = n;
 306        iotlb_lock_set(obj, &l);
 307        iotlb_read_cr(obj, &cr);
 308
 309        return cr;
 310}
 311
 312#ifdef PREFETCH_IOTLB
 313static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
 314                                      struct iotlb_entry *e)
 315{
 316        struct cr_regs *cr;
 317
 318        if (!e)
 319                return NULL;
 320
 321        if (e->da & ~(get_cam_va_mask(e->pgsz))) {
 322                dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
 323                        e->da);
 324                return ERR_PTR(-EINVAL);
 325        }
 326
 327        cr = kmalloc(sizeof(*cr), GFP_KERNEL);
 328        if (!cr)
 329                return ERR_PTR(-ENOMEM);
 330
 331        cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
 332        cr->ram = e->pa | e->endian | e->elsz | e->mixed;
 333
 334        return cr;
 335}
 336
 337/**
 338 * load_iotlb_entry - Set an iommu tlb entry
 339 * @obj:        target iommu
 340 * @e:          an iommu tlb entry info
 341 **/
 342static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
 343{
 344        int err = 0;
 345        struct iotlb_lock l;
 346        struct cr_regs *cr;
 347
 348        if (!obj || !obj->nr_tlb_entries || !e)
 349                return -EINVAL;
 350
 351        pm_runtime_get_sync(obj->dev);
 352
 353        iotlb_lock_get(obj, &l);
 354        if (l.base == obj->nr_tlb_entries) {
 355                dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
 356                err = -EBUSY;
 357                goto out;
 358        }
 359        if (!e->prsvd) {
 360                int i;
 361                struct cr_regs tmp;
 362
 363                for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
 364                        if (!iotlb_cr_valid(&tmp))
 365                                break;
 366
 367                if (i == obj->nr_tlb_entries) {
 368                        dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
 369                        err = -EBUSY;
 370                        goto out;
 371                }
 372
 373                iotlb_lock_get(obj, &l);
 374        } else {
 375                l.vict = l.base;
 376                iotlb_lock_set(obj, &l);
 377        }
 378
 379        cr = iotlb_alloc_cr(obj, e);
 380        if (IS_ERR(cr)) {
 381                pm_runtime_put_sync(obj->dev);
 382                return PTR_ERR(cr);
 383        }
 384
 385        iotlb_load_cr(obj, cr);
 386        kfree(cr);
 387
 388        if (e->prsvd)
 389                l.base++;
 390        /* increment victim for next tlb load */
 391        if (++l.vict == obj->nr_tlb_entries)
 392                l.vict = l.base;
 393        iotlb_lock_set(obj, &l);
 394out:
 395        pm_runtime_put_sync(obj->dev);
 396        return err;
 397}
 398
 399#else /* !PREFETCH_IOTLB */
 400
 401static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
 402{
 403        return 0;
 404}
 405
 406#endif /* !PREFETCH_IOTLB */
 407
 408static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
 409{
 410        return load_iotlb_entry(obj, e);
 411}
 412
 413/**
 414 * flush_iotlb_page - Clear an iommu tlb entry
 415 * @obj:        target iommu
 416 * @da:         iommu device virtual address
 417 *
 418 * Clear an iommu tlb entry which includes 'da' address.
 419 **/
 420static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
 421{
 422        int i;
 423        struct cr_regs cr;
 424
 425        pm_runtime_get_sync(obj->dev);
 426
 427        for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
 428                u32 start;
 429                size_t bytes;
 430
 431                if (!iotlb_cr_valid(&cr))
 432                        continue;
 433
 434                start = iotlb_cr_to_virt(&cr);
 435                bytes = iopgsz_to_bytes(cr.cam & 3);
 436
 437                if ((start <= da) && (da < start + bytes)) {
 438                        dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
 439                                __func__, start, da, bytes);
 440                        iotlb_load_cr(obj, &cr);
 441                        iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
 442                        break;
 443                }
 444        }
 445        pm_runtime_put_sync(obj->dev);
 446
 447        if (i == obj->nr_tlb_entries)
 448                dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
 449}
 450
 451/**
 452 * flush_iotlb_all - Clear all iommu tlb entries
 453 * @obj:        target iommu
 454 **/
 455static void flush_iotlb_all(struct omap_iommu *obj)
 456{
 457        struct iotlb_lock l;
 458
 459        pm_runtime_get_sync(obj->dev);
 460
 461        l.base = 0;
 462        l.vict = 0;
 463        iotlb_lock_set(obj, &l);
 464
 465        iommu_write_reg(obj, 1, MMU_GFLUSH);
 466
 467        pm_runtime_put_sync(obj->dev);
 468}
 469
 470/*
 471 *      H/W pagetable operations
 472 */
 473static void flush_iopte_range(struct device *dev, dma_addr_t dma,
 474                              unsigned long offset, int num_entries)
 475{
 476        size_t size = num_entries * sizeof(u32);
 477
 478        dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
 479}
 480
 481static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
 482{
 483        dma_addr_t pt_dma;
 484
 485        /* Note: freed iopte's must be clean ready for re-use */
 486        if (iopte) {
 487                if (dma_valid) {
 488                        pt_dma = virt_to_phys(iopte);
 489                        dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
 490                                         DMA_TO_DEVICE);
 491                }
 492
 493                kmem_cache_free(iopte_cachep, iopte);
 494        }
 495}
 496
 497static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
 498                        dma_addr_t *pt_dma, u32 da)
 499{
 500        u32 *iopte;
 501        unsigned long offset = iopgd_index(da) * sizeof(da);
 502
 503        /* a table has already existed */
 504        if (*iopgd)
 505                goto pte_ready;
 506
 507        /*
 508         * do the allocation outside the page table lock
 509         */
 510        spin_unlock(&obj->page_table_lock);
 511        iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
 512        spin_lock(&obj->page_table_lock);
 513
 514        if (!*iopgd) {
 515                if (!iopte)
 516                        return ERR_PTR(-ENOMEM);
 517
 518                *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
 519                                         DMA_TO_DEVICE);
 520                if (dma_mapping_error(obj->dev, *pt_dma)) {
 521                        dev_err(obj->dev, "DMA map error for L2 table\n");
 522                        iopte_free(obj, iopte, false);
 523                        return ERR_PTR(-ENOMEM);
 524                }
 525
 526                /*
 527                 * we rely on dma address and the physical address to be
 528                 * the same for mapping the L2 table
 529                 */
 530                if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
 531                        dev_err(obj->dev, "DMA translation error for L2 table\n");
 532                        dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
 533                                         DMA_TO_DEVICE);
 534                        iopte_free(obj, iopte, false);
 535                        return ERR_PTR(-ENOMEM);
 536                }
 537
 538                *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
 539
 540                flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
 541                dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
 542        } else {
 543                /* We raced, free the reduniovant table */
 544                iopte_free(obj, iopte, false);
 545        }
 546
 547pte_ready:
 548        iopte = iopte_offset(iopgd, da);
 549        *pt_dma = iopgd_page_paddr(iopgd);
 550        dev_vdbg(obj->dev,
 551                 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
 552                 __func__, da, iopgd, *iopgd, iopte, *iopte);
 553
 554        return iopte;
 555}
 556
 557static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
 558{
 559        u32 *iopgd = iopgd_offset(obj, da);
 560        unsigned long offset = iopgd_index(da) * sizeof(da);
 561
 562        if ((da | pa) & ~IOSECTION_MASK) {
 563                dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
 564                        __func__, da, pa, IOSECTION_SIZE);
 565                return -EINVAL;
 566        }
 567
 568        *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
 569        flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
 570        return 0;
 571}
 572
 573static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
 574{
 575        u32 *iopgd = iopgd_offset(obj, da);
 576        unsigned long offset = iopgd_index(da) * sizeof(da);
 577        int i;
 578
 579        if ((da | pa) & ~IOSUPER_MASK) {
 580                dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
 581                        __func__, da, pa, IOSUPER_SIZE);
 582                return -EINVAL;
 583        }
 584
 585        for (i = 0; i < 16; i++)
 586                *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
 587        flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
 588        return 0;
 589}
 590
 591static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
 592{
 593        u32 *iopgd = iopgd_offset(obj, da);
 594        dma_addr_t pt_dma;
 595        u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
 596        unsigned long offset = iopte_index(da) * sizeof(da);
 597
 598        if (IS_ERR(iopte))
 599                return PTR_ERR(iopte);
 600
 601        *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
 602        flush_iopte_range(obj->dev, pt_dma, offset, 1);
 603
 604        dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
 605                 __func__, da, pa, iopte, *iopte);
 606
 607        return 0;
 608}
 609
 610static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
 611{
 612        u32 *iopgd = iopgd_offset(obj, da);
 613        dma_addr_t pt_dma;
 614        u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
 615        unsigned long offset = iopte_index(da) * sizeof(da);
 616        int i;
 617
 618        if ((da | pa) & ~IOLARGE_MASK) {
 619                dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
 620                        __func__, da, pa, IOLARGE_SIZE);
 621                return -EINVAL;
 622        }
 623
 624        if (IS_ERR(iopte))
 625                return PTR_ERR(iopte);
 626
 627        for (i = 0; i < 16; i++)
 628                *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
 629        flush_iopte_range(obj->dev, pt_dma, offset, 16);
 630        return 0;
 631}
 632
 633static int
 634iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
 635{
 636        int (*fn)(struct omap_iommu *, u32, u32, u32);
 637        u32 prot;
 638        int err;
 639
 640        if (!obj || !e)
 641                return -EINVAL;
 642
 643        switch (e->pgsz) {
 644        case MMU_CAM_PGSZ_16M:
 645                fn = iopgd_alloc_super;
 646                break;
 647        case MMU_CAM_PGSZ_1M:
 648                fn = iopgd_alloc_section;
 649                break;
 650        case MMU_CAM_PGSZ_64K:
 651                fn = iopte_alloc_large;
 652                break;
 653        case MMU_CAM_PGSZ_4K:
 654                fn = iopte_alloc_page;
 655                break;
 656        default:
 657                fn = NULL;
 658                break;
 659        }
 660
 661        if (WARN_ON(!fn))
 662                return -EINVAL;
 663
 664        prot = get_iopte_attr(e);
 665
 666        spin_lock(&obj->page_table_lock);
 667        err = fn(obj, e->da, e->pa, prot);
 668        spin_unlock(&obj->page_table_lock);
 669
 670        return err;
 671}
 672
 673/**
 674 * omap_iopgtable_store_entry - Make an iommu pte entry
 675 * @obj:        target iommu
 676 * @e:          an iommu tlb entry info
 677 **/
 678static int
 679omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
 680{
 681        int err;
 682
 683        flush_iotlb_page(obj, e->da);
 684        err = iopgtable_store_entry_core(obj, e);
 685        if (!err)
 686                prefetch_iotlb_entry(obj, e);
 687        return err;
 688}
 689
 690/**
 691 * iopgtable_lookup_entry - Lookup an iommu pte entry
 692 * @obj:        target iommu
 693 * @da:         iommu device virtual address
 694 * @ppgd:       iommu pgd entry pointer to be returned
 695 * @ppte:       iommu pte entry pointer to be returned
 696 **/
 697static void
 698iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
 699{
 700        u32 *iopgd, *iopte = NULL;
 701
 702        iopgd = iopgd_offset(obj, da);
 703        if (!*iopgd)
 704                goto out;
 705
 706        if (iopgd_is_table(*iopgd))
 707                iopte = iopte_offset(iopgd, da);
 708out:
 709        *ppgd = iopgd;
 710        *ppte = iopte;
 711}
 712
 713static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
 714{
 715        size_t bytes;
 716        u32 *iopgd = iopgd_offset(obj, da);
 717        int nent = 1;
 718        dma_addr_t pt_dma;
 719        unsigned long pd_offset = iopgd_index(da) * sizeof(da);
 720        unsigned long pt_offset = iopte_index(da) * sizeof(da);
 721
 722        if (!*iopgd)
 723                return 0;
 724
 725        if (iopgd_is_table(*iopgd)) {
 726                int i;
 727                u32 *iopte = iopte_offset(iopgd, da);
 728
 729                bytes = IOPTE_SIZE;
 730                if (*iopte & IOPTE_LARGE) {
 731                        nent *= 16;
 732                        /* rewind to the 1st entry */
 733                        iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
 734                }
 735                bytes *= nent;
 736                memset(iopte, 0, nent * sizeof(*iopte));
 737                pt_dma = iopgd_page_paddr(iopgd);
 738                flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
 739
 740                /*
 741                 * do table walk to check if this table is necessary or not
 742                 */
 743                iopte = iopte_offset(iopgd, 0);
 744                for (i = 0; i < PTRS_PER_IOPTE; i++)
 745                        if (iopte[i])
 746                                goto out;
 747
 748                iopte_free(obj, iopte, true);
 749                nent = 1; /* for the next L1 entry */
 750        } else {
 751                bytes = IOPGD_SIZE;
 752                if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
 753                        nent *= 16;
 754                        /* rewind to the 1st entry */
 755                        iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
 756                }
 757                bytes *= nent;
 758        }
 759        memset(iopgd, 0, nent * sizeof(*iopgd));
 760        flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
 761out:
 762        return bytes;
 763}
 764
 765/**
 766 * iopgtable_clear_entry - Remove an iommu pte entry
 767 * @obj:        target iommu
 768 * @da:         iommu device virtual address
 769 **/
 770static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
 771{
 772        size_t bytes;
 773
 774        spin_lock(&obj->page_table_lock);
 775
 776        bytes = iopgtable_clear_entry_core(obj, da);
 777        flush_iotlb_page(obj, da);
 778
 779        spin_unlock(&obj->page_table_lock);
 780
 781        return bytes;
 782}
 783
 784static void iopgtable_clear_entry_all(struct omap_iommu *obj)
 785{
 786        unsigned long offset;
 787        int i;
 788
 789        spin_lock(&obj->page_table_lock);
 790
 791        for (i = 0; i < PTRS_PER_IOPGD; i++) {
 792                u32 da;
 793                u32 *iopgd;
 794
 795                da = i << IOPGD_SHIFT;
 796                iopgd = iopgd_offset(obj, da);
 797                offset = iopgd_index(da) * sizeof(da);
 798
 799                if (!*iopgd)
 800                        continue;
 801
 802                if (iopgd_is_table(*iopgd))
 803                        iopte_free(obj, iopte_offset(iopgd, 0), true);
 804
 805                *iopgd = 0;
 806                flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
 807        }
 808
 809        flush_iotlb_all(obj);
 810
 811        spin_unlock(&obj->page_table_lock);
 812}
 813
 814/*
 815 *      Device IOMMU generic operations
 816 */
 817static irqreturn_t iommu_fault_handler(int irq, void *data)
 818{
 819        u32 da, errs;
 820        u32 *iopgd, *iopte;
 821        struct omap_iommu *obj = data;
 822        struct iommu_domain *domain = obj->domain;
 823        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
 824
 825        if (!omap_domain->dev)
 826                return IRQ_NONE;
 827
 828        errs = iommu_report_fault(obj, &da);
 829        if (errs == 0)
 830                return IRQ_HANDLED;
 831
 832        /* Fault callback or TLB/PTE Dynamic loading */
 833        if (!report_iommu_fault(domain, obj->dev, da, 0))
 834                return IRQ_HANDLED;
 835
 836        iommu_write_reg(obj, 0, MMU_IRQENABLE);
 837
 838        iopgd = iopgd_offset(obj, da);
 839
 840        if (!iopgd_is_table(*iopgd)) {
 841                dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
 842                        obj->name, errs, da, iopgd, *iopgd);
 843                return IRQ_NONE;
 844        }
 845
 846        iopte = iopte_offset(iopgd, da);
 847
 848        dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
 849                obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
 850
 851        return IRQ_NONE;
 852}
 853
 854/**
 855 * omap_iommu_attach() - attach iommu device to an iommu domain
 856 * @obj:        target omap iommu device
 857 * @iopgd:      page table
 858 **/
 859static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
 860{
 861        int err;
 862
 863        spin_lock(&obj->iommu_lock);
 864
 865        obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
 866                                     DMA_TO_DEVICE);
 867        if (dma_mapping_error(obj->dev, obj->pd_dma)) {
 868                dev_err(obj->dev, "DMA map error for L1 table\n");
 869                err = -ENOMEM;
 870                goto out_err;
 871        }
 872
 873        obj->iopgd = iopgd;
 874        err = iommu_enable(obj);
 875        if (err)
 876                goto out_err;
 877        flush_iotlb_all(obj);
 878
 879        spin_unlock(&obj->iommu_lock);
 880
 881        dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
 882
 883        return 0;
 884
 885out_err:
 886        spin_unlock(&obj->iommu_lock);
 887
 888        return err;
 889}
 890
 891/**
 892 * omap_iommu_detach - release iommu device
 893 * @obj:        target iommu
 894 **/
 895static void omap_iommu_detach(struct omap_iommu *obj)
 896{
 897        if (!obj || IS_ERR(obj))
 898                return;
 899
 900        spin_lock(&obj->iommu_lock);
 901
 902        dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
 903                         DMA_TO_DEVICE);
 904        iommu_disable(obj);
 905        obj->pd_dma = 0;
 906        obj->iopgd = NULL;
 907
 908        spin_unlock(&obj->iommu_lock);
 909
 910        dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
 911}
 912
 913static bool omap_iommu_can_register(struct platform_device *pdev)
 914{
 915        struct device_node *np = pdev->dev.of_node;
 916
 917        if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
 918                return true;
 919
 920        /*
 921         * restrict IOMMU core registration only for processor-port MDMA MMUs
 922         * on DRA7 DSPs
 923         */
 924        if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
 925            (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
 926                return true;
 927
 928        return false;
 929}
 930
 931static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
 932                                              struct omap_iommu *obj)
 933{
 934        struct device_node *np = pdev->dev.of_node;
 935        int ret;
 936
 937        if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
 938                return 0;
 939
 940        if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
 941                dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
 942                return -EINVAL;
 943        }
 944
 945        obj->syscfg =
 946                syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
 947        if (IS_ERR(obj->syscfg)) {
 948                /* can fail with -EPROBE_DEFER */
 949                ret = PTR_ERR(obj->syscfg);
 950                return ret;
 951        }
 952
 953        if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
 954                                       &obj->id)) {
 955                dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
 956                return -EINVAL;
 957        }
 958
 959        if (obj->id != 0 && obj->id != 1) {
 960                dev_err(&pdev->dev, "invalid IOMMU instance id\n");
 961                return -EINVAL;
 962        }
 963
 964        return 0;
 965}
 966
 967/*
 968 *      OMAP Device MMU(IOMMU) detection
 969 */
 970static int omap_iommu_probe(struct platform_device *pdev)
 971{
 972        int err = -ENODEV;
 973        int irq;
 974        struct omap_iommu *obj;
 975        struct resource *res;
 976        struct device_node *of = pdev->dev.of_node;
 977
 978        if (!of) {
 979                pr_err("%s: only DT-based devices are supported\n", __func__);
 980                return -ENODEV;
 981        }
 982
 983        obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
 984        if (!obj)
 985                return -ENOMEM;
 986
 987        obj->name = dev_name(&pdev->dev);
 988        obj->nr_tlb_entries = 32;
 989        err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
 990        if (err && err != -EINVAL)
 991                return err;
 992        if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
 993                return -EINVAL;
 994        if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
 995                obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
 996
 997        obj->dev = &pdev->dev;
 998        obj->ctx = (void *)obj + sizeof(*obj);
 999
1000        spin_lock_init(&obj->iommu_lock);
1001        spin_lock_init(&obj->page_table_lock);
1002
1003        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1004        obj->regbase = devm_ioremap_resource(obj->dev, res);
1005        if (IS_ERR(obj->regbase))
1006                return PTR_ERR(obj->regbase);
1007
1008        err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
1009        if (err)
1010                return err;
1011
1012        irq = platform_get_irq(pdev, 0);
1013        if (irq < 0)
1014                return -ENODEV;
1015
1016        err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
1017                               dev_name(obj->dev), obj);
1018        if (err < 0)
1019                return err;
1020        platform_set_drvdata(pdev, obj);
1021
1022        if (omap_iommu_can_register(pdev)) {
1023                obj->group = iommu_group_alloc();
1024                if (IS_ERR(obj->group))
1025                        return PTR_ERR(obj->group);
1026
1027                err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
1028                                             obj->name);
1029                if (err)
1030                        goto out_group;
1031
1032                iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
1033
1034                err = iommu_device_register(&obj->iommu);
1035                if (err)
1036                        goto out_sysfs;
1037        }
1038
1039        pm_runtime_irq_safe(obj->dev);
1040        pm_runtime_enable(obj->dev);
1041
1042        omap_iommu_debugfs_add(obj);
1043
1044        dev_info(&pdev->dev, "%s registered\n", obj->name);
1045
1046        return 0;
1047
1048out_sysfs:
1049        iommu_device_sysfs_remove(&obj->iommu);
1050out_group:
1051        iommu_group_put(obj->group);
1052        return err;
1053}
1054
1055static int omap_iommu_remove(struct platform_device *pdev)
1056{
1057        struct omap_iommu *obj = platform_get_drvdata(pdev);
1058
1059        if (obj->group) {
1060                iommu_group_put(obj->group);
1061                obj->group = NULL;
1062
1063                iommu_device_sysfs_remove(&obj->iommu);
1064                iommu_device_unregister(&obj->iommu);
1065        }
1066
1067        omap_iommu_debugfs_remove(obj);
1068
1069        pm_runtime_disable(obj->dev);
1070
1071        dev_info(&pdev->dev, "%s removed\n", obj->name);
1072        return 0;
1073}
1074
1075static const struct of_device_id omap_iommu_of_match[] = {
1076        { .compatible = "ti,omap2-iommu" },
1077        { .compatible = "ti,omap4-iommu" },
1078        { .compatible = "ti,dra7-iommu" },
1079        { .compatible = "ti,dra7-dsp-iommu" },
1080        {},
1081};
1082
1083static struct platform_driver omap_iommu_driver = {
1084        .probe  = omap_iommu_probe,
1085        .remove = omap_iommu_remove,
1086        .driver = {
1087                .name   = "omap-iommu",
1088                .of_match_table = of_match_ptr(omap_iommu_of_match),
1089        },
1090};
1091
1092static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1093{
1094        memset(e, 0, sizeof(*e));
1095
1096        e->da           = da;
1097        e->pa           = pa;
1098        e->valid        = MMU_CAM_V;
1099        e->pgsz         = pgsz;
1100        e->endian       = MMU_RAM_ENDIAN_LITTLE;
1101        e->elsz         = MMU_RAM_ELSZ_8;
1102        e->mixed        = 0;
1103
1104        return iopgsz_to_bytes(e->pgsz);
1105}
1106
1107static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1108                          phys_addr_t pa, size_t bytes, int prot)
1109{
1110        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1111        struct device *dev = omap_domain->dev;
1112        struct omap_iommu_device *iommu;
1113        struct omap_iommu *oiommu;
1114        struct iotlb_entry e;
1115        int omap_pgsz;
1116        u32 ret = -EINVAL;
1117        int i;
1118
1119        omap_pgsz = bytes_to_iopgsz(bytes);
1120        if (omap_pgsz < 0) {
1121                dev_err(dev, "invalid size to map: %d\n", bytes);
1122                return -EINVAL;
1123        }
1124
1125        dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
1126
1127        iotlb_init_entry(&e, da, pa, omap_pgsz);
1128
1129        iommu = omap_domain->iommus;
1130        for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1131                oiommu = iommu->iommu_dev;
1132                ret = omap_iopgtable_store_entry(oiommu, &e);
1133                if (ret) {
1134                        dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
1135                                ret);
1136                        break;
1137                }
1138        }
1139
1140        if (ret) {
1141                while (i--) {
1142                        iommu--;
1143                        oiommu = iommu->iommu_dev;
1144                        iopgtable_clear_entry(oiommu, da);
1145                }
1146        }
1147
1148        return ret;
1149}
1150
1151static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1152                               size_t size)
1153{
1154        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1155        struct device *dev = omap_domain->dev;
1156        struct omap_iommu_device *iommu;
1157        struct omap_iommu *oiommu;
1158        bool error = false;
1159        size_t bytes = 0;
1160        int i;
1161
1162        dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1163
1164        iommu = omap_domain->iommus;
1165        for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1166                oiommu = iommu->iommu_dev;
1167                bytes = iopgtable_clear_entry(oiommu, da);
1168                if (!bytes)
1169                        error = true;
1170        }
1171
1172        /*
1173         * simplify return - we are only checking if any of the iommus
1174         * reported an error, but not if all of them are unmapping the
1175         * same number of entries. This should not occur due to the
1176         * mirror programming.
1177         */
1178        return error ? 0 : bytes;
1179}
1180
1181static int omap_iommu_count(struct device *dev)
1182{
1183        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1184        int count = 0;
1185
1186        while (arch_data->iommu_dev) {
1187                count++;
1188                arch_data++;
1189        }
1190
1191        return count;
1192}
1193
1194/* caller should call cleanup if this function fails */
1195static int omap_iommu_attach_init(struct device *dev,
1196                                  struct omap_iommu_domain *odomain)
1197{
1198        struct omap_iommu_device *iommu;
1199        int i;
1200
1201        odomain->num_iommus = omap_iommu_count(dev);
1202        if (!odomain->num_iommus)
1203                return -EINVAL;
1204
1205        odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
1206                                  GFP_ATOMIC);
1207        if (!odomain->iommus)
1208                return -ENOMEM;
1209
1210        iommu = odomain->iommus;
1211        for (i = 0; i < odomain->num_iommus; i++, iommu++) {
1212                iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
1213                if (!iommu->pgtable)
1214                        return -ENOMEM;
1215
1216                /*
1217                 * should never fail, but please keep this around to ensure
1218                 * we keep the hardware happy
1219                 */
1220                if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
1221                                        IOPGD_TABLE_SIZE)))
1222                        return -EINVAL;
1223        }
1224
1225        return 0;
1226}
1227
1228static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
1229{
1230        int i;
1231        struct omap_iommu_device *iommu = odomain->iommus;
1232
1233        for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
1234                kfree(iommu->pgtable);
1235
1236        kfree(odomain->iommus);
1237        odomain->num_iommus = 0;
1238        odomain->iommus = NULL;
1239}
1240
1241static int
1242omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1243{
1244        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1245        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1246        struct omap_iommu_device *iommu;
1247        struct omap_iommu *oiommu;
1248        int ret = 0;
1249        int i;
1250
1251        if (!arch_data || !arch_data->iommu_dev) {
1252                dev_err(dev, "device doesn't have an associated iommu\n");
1253                return -EINVAL;
1254        }
1255
1256        spin_lock(&omap_domain->lock);
1257
1258        /* only a single client device can be attached to a domain */
1259        if (omap_domain->dev) {
1260                dev_err(dev, "iommu domain is already attached\n");
1261                ret = -EBUSY;
1262                goto out;
1263        }
1264
1265        ret = omap_iommu_attach_init(dev, omap_domain);
1266        if (ret) {
1267                dev_err(dev, "failed to allocate required iommu data %d\n",
1268                        ret);
1269                goto init_fail;
1270        }
1271
1272        iommu = omap_domain->iommus;
1273        for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
1274                /* configure and enable the omap iommu */
1275                oiommu = arch_data->iommu_dev;
1276                ret = omap_iommu_attach(oiommu, iommu->pgtable);
1277                if (ret) {
1278                        dev_err(dev, "can't get omap iommu: %d\n", ret);
1279                        goto attach_fail;
1280                }
1281
1282                oiommu->domain = domain;
1283                iommu->iommu_dev = oiommu;
1284        }
1285
1286        omap_domain->dev = dev;
1287
1288        goto out;
1289
1290attach_fail:
1291        while (i--) {
1292                iommu--;
1293                arch_data--;
1294                oiommu = iommu->iommu_dev;
1295                omap_iommu_detach(oiommu);
1296                iommu->iommu_dev = NULL;
1297                oiommu->domain = NULL;
1298        }
1299init_fail:
1300        omap_iommu_detach_fini(omap_domain);
1301out:
1302        spin_unlock(&omap_domain->lock);
1303        return ret;
1304}
1305
1306static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1307                                   struct device *dev)
1308{
1309        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1310        struct omap_iommu_device *iommu = omap_domain->iommus;
1311        struct omap_iommu *oiommu;
1312        int i;
1313
1314        if (!omap_domain->dev) {
1315                dev_err(dev, "domain has no attached device\n");
1316                return;
1317        }
1318
1319        /* only a single device is supported per domain for now */
1320        if (omap_domain->dev != dev) {
1321                dev_err(dev, "invalid attached device\n");
1322                return;
1323        }
1324
1325        /*
1326         * cleanup in the reverse order of attachment - this addresses
1327         * any h/w dependencies between multiple instances, if any
1328         */
1329        iommu += (omap_domain->num_iommus - 1);
1330        arch_data += (omap_domain->num_iommus - 1);
1331        for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
1332                oiommu = iommu->iommu_dev;
1333                iopgtable_clear_entry_all(oiommu);
1334
1335                omap_iommu_detach(oiommu);
1336                iommu->iommu_dev = NULL;
1337                oiommu->domain = NULL;
1338        }
1339
1340        omap_iommu_detach_fini(omap_domain);
1341
1342        omap_domain->dev = NULL;
1343}
1344
1345static void omap_iommu_detach_dev(struct iommu_domain *domain,
1346                                  struct device *dev)
1347{
1348        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1349
1350        spin_lock(&omap_domain->lock);
1351        _omap_iommu_detach_dev(omap_domain, dev);
1352        spin_unlock(&omap_domain->lock);
1353}
1354
1355static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
1356{
1357        struct omap_iommu_domain *omap_domain;
1358
1359        if (type != IOMMU_DOMAIN_UNMANAGED)
1360                return NULL;
1361
1362        omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1363        if (!omap_domain)
1364                return NULL;
1365
1366        spin_lock_init(&omap_domain->lock);
1367
1368        omap_domain->domain.geometry.aperture_start = 0;
1369        omap_domain->domain.geometry.aperture_end   = (1ULL << 32) - 1;
1370        omap_domain->domain.geometry.force_aperture = true;
1371
1372        return &omap_domain->domain;
1373}
1374
1375static void omap_iommu_domain_free(struct iommu_domain *domain)
1376{
1377        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1378
1379        /*
1380         * An iommu device is still attached
1381         * (currently, only one device can be attached) ?
1382         */
1383        if (omap_domain->dev)
1384                _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1385
1386        kfree(omap_domain);
1387}
1388
1389static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1390                                           dma_addr_t da)
1391{
1392        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1393        struct omap_iommu_device *iommu = omap_domain->iommus;
1394        struct omap_iommu *oiommu = iommu->iommu_dev;
1395        struct device *dev = oiommu->dev;
1396        u32 *pgd, *pte;
1397        phys_addr_t ret = 0;
1398
1399        /*
1400         * all the iommus within the domain will have identical programming,
1401         * so perform the lookup using just the first iommu
1402         */
1403        iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1404
1405        if (pte) {
1406                if (iopte_is_small(*pte))
1407                        ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1408                else if (iopte_is_large(*pte))
1409                        ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1410                else
1411                        dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
1412                                (unsigned long long)da);
1413        } else {
1414                if (iopgd_is_section(*pgd))
1415                        ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1416                else if (iopgd_is_super(*pgd))
1417                        ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1418                else
1419                        dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
1420                                (unsigned long long)da);
1421        }
1422
1423        return ret;
1424}
1425
1426static int omap_iommu_add_device(struct device *dev)
1427{
1428        struct omap_iommu_arch_data *arch_data, *tmp;
1429        struct omap_iommu *oiommu;
1430        struct iommu_group *group;
1431        struct device_node *np;
1432        struct platform_device *pdev;
1433        int num_iommus, i;
1434        int ret;
1435
1436        /*
1437         * Allocate the archdata iommu structure for DT-based devices.
1438         *
1439         * TODO: Simplify this when removing non-DT support completely from the
1440         * IOMMU users.
1441         */
1442        if (!dev->of_node)
1443                return 0;
1444
1445        /*
1446         * retrieve the count of IOMMU nodes using phandle size as element size
1447         * since #iommu-cells = 0 for OMAP
1448         */
1449        num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
1450                                                     sizeof(phandle));
1451        if (num_iommus < 0)
1452                return 0;
1453
1454        arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
1455        if (!arch_data)
1456                return -ENOMEM;
1457
1458        for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
1459                np = of_parse_phandle(dev->of_node, "iommus", i);
1460                if (!np) {
1461                        kfree(arch_data);
1462                        return -EINVAL;
1463                }
1464
1465                pdev = of_find_device_by_node(np);
1466                if (WARN_ON(!pdev)) {
1467                        of_node_put(np);
1468                        kfree(arch_data);
1469                        return -EINVAL;
1470                }
1471
1472                oiommu = platform_get_drvdata(pdev);
1473                if (!oiommu) {
1474                        of_node_put(np);
1475                        kfree(arch_data);
1476                        return -EINVAL;
1477                }
1478
1479                tmp->iommu_dev = oiommu;
1480
1481                of_node_put(np);
1482        }
1483
1484        /*
1485         * use the first IOMMU alone for the sysfs device linking.
1486         * TODO: Evaluate if a single iommu_group needs to be
1487         * maintained for both IOMMUs
1488         */
1489        oiommu = arch_data->iommu_dev;
1490        ret = iommu_device_link(&oiommu->iommu, dev);
1491        if (ret) {
1492                kfree(arch_data);
1493                return ret;
1494        }
1495
1496        dev->archdata.iommu = arch_data;
1497
1498        /*
1499         * IOMMU group initialization calls into omap_iommu_device_group, which
1500         * needs a valid dev->archdata.iommu pointer
1501         */
1502        group = iommu_group_get_for_dev(dev);
1503        if (IS_ERR(group)) {
1504                iommu_device_unlink(&oiommu->iommu, dev);
1505                dev->archdata.iommu = NULL;
1506                kfree(arch_data);
1507                return PTR_ERR(group);
1508        }
1509        iommu_group_put(group);
1510
1511        return 0;
1512}
1513
1514static void omap_iommu_remove_device(struct device *dev)
1515{
1516        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1517
1518        if (!dev->of_node || !arch_data)
1519                return;
1520
1521        iommu_device_unlink(&arch_data->iommu_dev->iommu, dev);
1522        iommu_group_remove_device(dev);
1523
1524        dev->archdata.iommu = NULL;
1525        kfree(arch_data);
1526
1527}
1528
1529static struct iommu_group *omap_iommu_device_group(struct device *dev)
1530{
1531        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1532        struct iommu_group *group = ERR_PTR(-EINVAL);
1533
1534        if (arch_data->iommu_dev)
1535                group = iommu_group_ref_get(arch_data->iommu_dev->group);
1536
1537        return group;
1538}
1539
1540static const struct iommu_ops omap_iommu_ops = {
1541        .domain_alloc   = omap_iommu_domain_alloc,
1542        .domain_free    = omap_iommu_domain_free,
1543        .attach_dev     = omap_iommu_attach_dev,
1544        .detach_dev     = omap_iommu_detach_dev,
1545        .map            = omap_iommu_map,
1546        .unmap          = omap_iommu_unmap,
1547        .iova_to_phys   = omap_iommu_iova_to_phys,
1548        .add_device     = omap_iommu_add_device,
1549        .remove_device  = omap_iommu_remove_device,
1550        .device_group   = omap_iommu_device_group,
1551        .pgsize_bitmap  = OMAP_IOMMU_PGSIZES,
1552};
1553
1554static int __init omap_iommu_init(void)
1555{
1556        struct kmem_cache *p;
1557        const unsigned long flags = SLAB_HWCACHE_ALIGN;
1558        size_t align = 1 << 10; /* L2 pagetable alignement */
1559        struct device_node *np;
1560        int ret;
1561
1562        np = of_find_matching_node(NULL, omap_iommu_of_match);
1563        if (!np)
1564                return 0;
1565
1566        of_node_put(np);
1567
1568        p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1569                              NULL);
1570        if (!p)
1571                return -ENOMEM;
1572        iopte_cachep = p;
1573
1574        omap_iommu_debugfs_init();
1575
1576        ret = platform_driver_register(&omap_iommu_driver);
1577        if (ret) {
1578                pr_err("%s: failed to register driver\n", __func__);
1579                goto fail_driver;
1580        }
1581
1582        ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1583        if (ret)
1584                goto fail_bus;
1585
1586        return 0;
1587
1588fail_bus:
1589        platform_driver_unregister(&omap_iommu_driver);
1590fail_driver:
1591        kmem_cache_destroy(iopte_cachep);
1592        return ret;
1593}
1594subsys_initcall(omap_iommu_init);
1595/* must be ready before omap3isp is probed */
1596