linux/drivers/iommu/msm_iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
   3 *
   4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
   5 */
   6
   7#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/platform_device.h>
  11#include <linux/errno.h>
  12#include <linux/io.h>
  13#include <linux/io-pgtable.h>
  14#include <linux/interrupt.h>
  15#include <linux/list.h>
  16#include <linux/spinlock.h>
  17#include <linux/slab.h>
  18#include <linux/iommu.h>
  19#include <linux/clk.h>
  20#include <linux/err.h>
  21
  22#include <asm/cacheflush.h>
  23#include <linux/sizes.h>
  24
  25#include "msm_iommu_hw-8xxx.h"
  26#include "msm_iommu.h"
  27
  28#define MRC(reg, processor, op1, crn, crm, op2)                         \
  29__asm__ __volatile__ (                                                  \
  30"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
  31: "=r" (reg))
  32
  33/* bitmap of the page sizes currently supported */
  34#define MSM_IOMMU_PGSIZES       (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  35
  36static DEFINE_SPINLOCK(msm_iommu_lock);
  37static LIST_HEAD(qcom_iommu_devices);
  38static struct iommu_ops msm_iommu_ops;
  39
  40struct msm_priv {
  41        struct list_head list_attached;
  42        struct iommu_domain domain;
  43        struct io_pgtable_cfg   cfg;
  44        struct io_pgtable_ops   *iop;
  45        struct device           *dev;
  46        spinlock_t              pgtlock; /* pagetable lock */
  47};
  48
  49static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  50{
  51        return container_of(dom, struct msm_priv, domain);
  52}
  53
  54static int __enable_clocks(struct msm_iommu_dev *iommu)
  55{
  56        int ret;
  57
  58        ret = clk_enable(iommu->pclk);
  59        if (ret)
  60                goto fail;
  61
  62        if (iommu->clk) {
  63                ret = clk_enable(iommu->clk);
  64                if (ret)
  65                        clk_disable(iommu->pclk);
  66        }
  67fail:
  68        return ret;
  69}
  70
  71static void __disable_clocks(struct msm_iommu_dev *iommu)
  72{
  73        if (iommu->clk)
  74                clk_disable(iommu->clk);
  75        clk_disable(iommu->pclk);
  76}
  77
  78static void msm_iommu_reset(void __iomem *base, int ncb)
  79{
  80        int ctx;
  81
  82        SET_RPUE(base, 0);
  83        SET_RPUEIE(base, 0);
  84        SET_ESRRESTORE(base, 0);
  85        SET_TBE(base, 0);
  86        SET_CR(base, 0);
  87        SET_SPDMBE(base, 0);
  88        SET_TESTBUSCR(base, 0);
  89        SET_TLBRSW(base, 0);
  90        SET_GLOBAL_TLBIALL(base, 0);
  91        SET_RPU_ACR(base, 0);
  92        SET_TLBLKCRWE(base, 1);
  93
  94        for (ctx = 0; ctx < ncb; ctx++) {
  95                SET_BPRCOSH(base, ctx, 0);
  96                SET_BPRCISH(base, ctx, 0);
  97                SET_BPRCNSH(base, ctx, 0);
  98                SET_BPSHCFG(base, ctx, 0);
  99                SET_BPMTCFG(base, ctx, 0);
 100                SET_ACTLR(base, ctx, 0);
 101                SET_SCTLR(base, ctx, 0);
 102                SET_FSRRESTORE(base, ctx, 0);
 103                SET_TTBR0(base, ctx, 0);
 104                SET_TTBR1(base, ctx, 0);
 105                SET_TTBCR(base, ctx, 0);
 106                SET_BFBCR(base, ctx, 0);
 107                SET_PAR(base, ctx, 0);
 108                SET_FAR(base, ctx, 0);
 109                SET_CTX_TLBIALL(base, ctx, 0);
 110                SET_TLBFLPTER(base, ctx, 0);
 111                SET_TLBSLPTER(base, ctx, 0);
 112                SET_TLBLKCR(base, ctx, 0);
 113                SET_CONTEXTIDR(base, ctx, 0);
 114        }
 115}
 116
 117static void __flush_iotlb(void *cookie)
 118{
 119        struct msm_priv *priv = cookie;
 120        struct msm_iommu_dev *iommu = NULL;
 121        struct msm_iommu_ctx_dev *master;
 122        int ret = 0;
 123
 124        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 125                ret = __enable_clocks(iommu);
 126                if (ret)
 127                        goto fail;
 128
 129                list_for_each_entry(master, &iommu->ctx_list, list)
 130                        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 131
 132                __disable_clocks(iommu);
 133        }
 134fail:
 135        return;
 136}
 137
 138static void __flush_iotlb_range(unsigned long iova, size_t size,
 139                                size_t granule, bool leaf, void *cookie)
 140{
 141        struct msm_priv *priv = cookie;
 142        struct msm_iommu_dev *iommu = NULL;
 143        struct msm_iommu_ctx_dev *master;
 144        int ret = 0;
 145        int temp_size;
 146
 147        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 148                ret = __enable_clocks(iommu);
 149                if (ret)
 150                        goto fail;
 151
 152                list_for_each_entry(master, &iommu->ctx_list, list) {
 153                        temp_size = size;
 154                        do {
 155                                iova &= TLBIVA_VA;
 156                                iova |= GET_CONTEXTIDR_ASID(iommu->base,
 157                                                            master->num);
 158                                SET_TLBIVA(iommu->base, master->num, iova);
 159                                iova += granule;
 160                        } while (temp_size -= granule);
 161                }
 162
 163                __disable_clocks(iommu);
 164        }
 165
 166fail:
 167        return;
 168}
 169
 170static void __flush_iotlb_walk(unsigned long iova, size_t size,
 171                               size_t granule, void *cookie)
 172{
 173        __flush_iotlb_range(iova, size, granule, false, cookie);
 174}
 175
 176static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
 177                               unsigned long iova, size_t granule, void *cookie)
 178{
 179        __flush_iotlb_range(iova, granule, granule, true, cookie);
 180}
 181
 182static const struct iommu_flush_ops msm_iommu_flush_ops = {
 183        .tlb_flush_all = __flush_iotlb,
 184        .tlb_flush_walk = __flush_iotlb_walk,
 185        .tlb_add_page = __flush_iotlb_page,
 186};
 187
 188static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
 189{
 190        int idx;
 191
 192        do {
 193                idx = find_next_zero_bit(map, end, start);
 194                if (idx == end)
 195                        return -ENOSPC;
 196        } while (test_and_set_bit(idx, map));
 197
 198        return idx;
 199}
 200
 201static void msm_iommu_free_ctx(unsigned long *map, int idx)
 202{
 203        clear_bit(idx, map);
 204}
 205
 206static void config_mids(struct msm_iommu_dev *iommu,
 207                        struct msm_iommu_ctx_dev *master)
 208{
 209        int mid, ctx, i;
 210
 211        for (i = 0; i < master->num_mids; i++) {
 212                mid = master->mids[i];
 213                ctx = master->num;
 214
 215                SET_M2VCBR_N(iommu->base, mid, 0);
 216                SET_CBACR_N(iommu->base, ctx, 0);
 217
 218                /* Set VMID = 0 */
 219                SET_VMID(iommu->base, mid, 0);
 220
 221                /* Set the context number for that MID to this context */
 222                SET_CBNDX(iommu->base, mid, ctx);
 223
 224                /* Set MID associated with this context bank to 0*/
 225                SET_CBVMID(iommu->base, ctx, 0);
 226
 227                /* Set the ASID for TLB tagging for this context */
 228                SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
 229
 230                /* Set security bit override to be Non-secure */
 231                SET_NSCFG(iommu->base, mid, 3);
 232        }
 233}
 234
 235static void __reset_context(void __iomem *base, int ctx)
 236{
 237        SET_BPRCOSH(base, ctx, 0);
 238        SET_BPRCISH(base, ctx, 0);
 239        SET_BPRCNSH(base, ctx, 0);
 240        SET_BPSHCFG(base, ctx, 0);
 241        SET_BPMTCFG(base, ctx, 0);
 242        SET_ACTLR(base, ctx, 0);
 243        SET_SCTLR(base, ctx, 0);
 244        SET_FSRRESTORE(base, ctx, 0);
 245        SET_TTBR0(base, ctx, 0);
 246        SET_TTBR1(base, ctx, 0);
 247        SET_TTBCR(base, ctx, 0);
 248        SET_BFBCR(base, ctx, 0);
 249        SET_PAR(base, ctx, 0);
 250        SET_FAR(base, ctx, 0);
 251        SET_CTX_TLBIALL(base, ctx, 0);
 252        SET_TLBFLPTER(base, ctx, 0);
 253        SET_TLBSLPTER(base, ctx, 0);
 254        SET_TLBLKCR(base, ctx, 0);
 255}
 256
 257static void __program_context(void __iomem *base, int ctx,
 258                              struct msm_priv *priv)
 259{
 260        __reset_context(base, ctx);
 261
 262        /* Turn on TEX Remap */
 263        SET_TRE(base, ctx, 1);
 264        SET_AFE(base, ctx, 1);
 265
 266        /* Set up HTW mode */
 267        /* TLB miss configuration: perform HTW on miss */
 268        SET_TLBMCFG(base, ctx, 0x3);
 269
 270        /* V2P configuration: HTW for access */
 271        SET_V2PCFG(base, ctx, 0x3);
 272
 273        SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
 274        SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
 275        SET_TTBR1(base, ctx, 0);
 276
 277        /* Set prrr and nmrr */
 278        SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
 279        SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
 280
 281        /* Invalidate the TLB for this context */
 282        SET_CTX_TLBIALL(base, ctx, 0);
 283
 284        /* Set interrupt number to "secure" interrupt */
 285        SET_IRPTNDX(base, ctx, 0);
 286
 287        /* Enable context fault interrupt */
 288        SET_CFEIE(base, ctx, 1);
 289
 290        /* Stall access on a context fault and let the handler deal with it */
 291        SET_CFCFG(base, ctx, 1);
 292
 293        /* Redirect all cacheable requests to L2 slave port. */
 294        SET_RCISH(base, ctx, 1);
 295        SET_RCOSH(base, ctx, 1);
 296        SET_RCNSH(base, ctx, 1);
 297
 298        /* Turn on BFB prefetch */
 299        SET_BFBDFE(base, ctx, 1);
 300
 301        /* Enable the MMU */
 302        SET_M(base, ctx, 1);
 303}
 304
 305static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
 306{
 307        struct msm_priv *priv;
 308
 309        if (type != IOMMU_DOMAIN_UNMANAGED)
 310                return NULL;
 311
 312        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 313        if (!priv)
 314                goto fail_nomem;
 315
 316        INIT_LIST_HEAD(&priv->list_attached);
 317
 318        priv->domain.geometry.aperture_start = 0;
 319        priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
 320        priv->domain.geometry.force_aperture = true;
 321
 322        return &priv->domain;
 323
 324fail_nomem:
 325        kfree(priv);
 326        return NULL;
 327}
 328
 329static void msm_iommu_domain_free(struct iommu_domain *domain)
 330{
 331        struct msm_priv *priv;
 332        unsigned long flags;
 333
 334        spin_lock_irqsave(&msm_iommu_lock, flags);
 335        priv = to_msm_priv(domain);
 336        kfree(priv);
 337        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 338}
 339
 340static int msm_iommu_domain_config(struct msm_priv *priv)
 341{
 342        spin_lock_init(&priv->pgtlock);
 343
 344        priv->cfg = (struct io_pgtable_cfg) {
 345                .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
 346                .ias = 32,
 347                .oas = 32,
 348                .tlb = &msm_iommu_flush_ops,
 349                .iommu_dev = priv->dev,
 350        };
 351
 352        priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
 353        if (!priv->iop) {
 354                dev_err(priv->dev, "Failed to allocate pgtable\n");
 355                return -EINVAL;
 356        }
 357
 358        msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
 359
 360        return 0;
 361}
 362
 363/* Must be called under msm_iommu_lock */
 364static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
 365{
 366        struct msm_iommu_dev *iommu, *ret = NULL;
 367        struct msm_iommu_ctx_dev *master;
 368
 369        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 370                master = list_first_entry(&iommu->ctx_list,
 371                                          struct msm_iommu_ctx_dev,
 372                                          list);
 373                if (master->of_node == dev->of_node) {
 374                        ret = iommu;
 375                        break;
 376                }
 377        }
 378
 379        return ret;
 380}
 381
 382static struct iommu_device *msm_iommu_probe_device(struct device *dev)
 383{
 384        struct msm_iommu_dev *iommu;
 385        unsigned long flags;
 386
 387        spin_lock_irqsave(&msm_iommu_lock, flags);
 388        iommu = find_iommu_for_dev(dev);
 389        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 390
 391        if (!iommu)
 392                return ERR_PTR(-ENODEV);
 393
 394        return &iommu->iommu;
 395}
 396
 397static void msm_iommu_release_device(struct device *dev)
 398{
 399}
 400
 401static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 402{
 403        int ret = 0;
 404        unsigned long flags;
 405        struct msm_iommu_dev *iommu;
 406        struct msm_priv *priv = to_msm_priv(domain);
 407        struct msm_iommu_ctx_dev *master;
 408
 409        priv->dev = dev;
 410        msm_iommu_domain_config(priv);
 411
 412        spin_lock_irqsave(&msm_iommu_lock, flags);
 413        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 414                master = list_first_entry(&iommu->ctx_list,
 415                                          struct msm_iommu_ctx_dev,
 416                                          list);
 417                if (master->of_node == dev->of_node) {
 418                        ret = __enable_clocks(iommu);
 419                        if (ret)
 420                                goto fail;
 421
 422                        list_for_each_entry(master, &iommu->ctx_list, list) {
 423                                if (master->num) {
 424                                        dev_err(dev, "domain already attached");
 425                                        ret = -EEXIST;
 426                                        goto fail;
 427                                }
 428                                master->num =
 429                                        msm_iommu_alloc_ctx(iommu->context_map,
 430                                                            0, iommu->ncb);
 431                                if (IS_ERR_VALUE(master->num)) {
 432                                        ret = -ENODEV;
 433                                        goto fail;
 434                                }
 435                                config_mids(iommu, master);
 436                                __program_context(iommu->base, master->num,
 437                                                  priv);
 438                        }
 439                        __disable_clocks(iommu);
 440                        list_add(&iommu->dom_node, &priv->list_attached);
 441                }
 442        }
 443
 444fail:
 445        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 446
 447        return ret;
 448}
 449
 450static void msm_iommu_detach_dev(struct iommu_domain *domain,
 451                                 struct device *dev)
 452{
 453        struct msm_priv *priv = to_msm_priv(domain);
 454        unsigned long flags;
 455        struct msm_iommu_dev *iommu;
 456        struct msm_iommu_ctx_dev *master;
 457        int ret;
 458
 459        free_io_pgtable_ops(priv->iop);
 460
 461        spin_lock_irqsave(&msm_iommu_lock, flags);
 462        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 463                ret = __enable_clocks(iommu);
 464                if (ret)
 465                        goto fail;
 466
 467                list_for_each_entry(master, &iommu->ctx_list, list) {
 468                        msm_iommu_free_ctx(iommu->context_map, master->num);
 469                        __reset_context(iommu->base, master->num);
 470                }
 471                __disable_clocks(iommu);
 472        }
 473fail:
 474        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 475}
 476
 477static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
 478                         phys_addr_t pa, size_t len, int prot, gfp_t gfp)
 479{
 480        struct msm_priv *priv = to_msm_priv(domain);
 481        unsigned long flags;
 482        int ret;
 483
 484        spin_lock_irqsave(&priv->pgtlock, flags);
 485        ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
 486        spin_unlock_irqrestore(&priv->pgtlock, flags);
 487
 488        return ret;
 489}
 490
 491static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
 492                               size_t size)
 493{
 494        struct msm_priv *priv = to_msm_priv(domain);
 495
 496        __flush_iotlb_range(iova, size, SZ_4K, false, priv);
 497}
 498
 499static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 500                              size_t len, struct iommu_iotlb_gather *gather)
 501{
 502        struct msm_priv *priv = to_msm_priv(domain);
 503        unsigned long flags;
 504
 505        spin_lock_irqsave(&priv->pgtlock, flags);
 506        len = priv->iop->unmap(priv->iop, iova, len, gather);
 507        spin_unlock_irqrestore(&priv->pgtlock, flags);
 508
 509        return len;
 510}
 511
 512static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 513                                          dma_addr_t va)
 514{
 515        struct msm_priv *priv;
 516        struct msm_iommu_dev *iommu;
 517        struct msm_iommu_ctx_dev *master;
 518        unsigned int par;
 519        unsigned long flags;
 520        phys_addr_t ret = 0;
 521
 522        spin_lock_irqsave(&msm_iommu_lock, flags);
 523
 524        priv = to_msm_priv(domain);
 525        iommu = list_first_entry(&priv->list_attached,
 526                                 struct msm_iommu_dev, dom_node);
 527
 528        if (list_empty(&iommu->ctx_list))
 529                goto fail;
 530
 531        master = list_first_entry(&iommu->ctx_list,
 532                                  struct msm_iommu_ctx_dev, list);
 533        if (!master)
 534                goto fail;
 535
 536        ret = __enable_clocks(iommu);
 537        if (ret)
 538                goto fail;
 539
 540        /* Invalidate context TLB */
 541        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 542        SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
 543
 544        par = GET_PAR(iommu->base, master->num);
 545
 546        /* We are dealing with a supersection */
 547        if (GET_NOFAULT_SS(iommu->base, master->num))
 548                ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
 549        else    /* Upper 20 bits from PAR, lower 12 from VA */
 550                ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
 551
 552        if (GET_FAULT(iommu->base, master->num))
 553                ret = 0;
 554
 555        __disable_clocks(iommu);
 556fail:
 557        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 558        return ret;
 559}
 560
 561static bool msm_iommu_capable(enum iommu_cap cap)
 562{
 563        return false;
 564}
 565
 566static void print_ctx_regs(void __iomem *base, int ctx)
 567{
 568        unsigned int fsr = GET_FSR(base, ctx);
 569        pr_err("FAR    = %08x    PAR    = %08x\n",
 570               GET_FAR(base, ctx), GET_PAR(base, ctx));
 571        pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
 572                        (fsr & 0x02) ? "TF " : "",
 573                        (fsr & 0x04) ? "AFF " : "",
 574                        (fsr & 0x08) ? "APF " : "",
 575                        (fsr & 0x10) ? "TLBMF " : "",
 576                        (fsr & 0x20) ? "HTWDEEF " : "",
 577                        (fsr & 0x40) ? "HTWSEEF " : "",
 578                        (fsr & 0x80) ? "MHF " : "",
 579                        (fsr & 0x10000) ? "SL " : "",
 580                        (fsr & 0x40000000) ? "SS " : "",
 581                        (fsr & 0x80000000) ? "MULTI " : "");
 582
 583        pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
 584               GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
 585        pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
 586               GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
 587        pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
 588               GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
 589}
 590
 591static void insert_iommu_master(struct device *dev,
 592                                struct msm_iommu_dev **iommu,
 593                                struct of_phandle_args *spec)
 594{
 595        struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
 596        int sid;
 597
 598        if (list_empty(&(*iommu)->ctx_list)) {
 599                master = kzalloc(sizeof(*master), GFP_ATOMIC);
 600                master->of_node = dev->of_node;
 601                list_add(&master->list, &(*iommu)->ctx_list);
 602                dev_iommu_priv_set(dev, master);
 603        }
 604
 605        for (sid = 0; sid < master->num_mids; sid++)
 606                if (master->mids[sid] == spec->args[0]) {
 607                        dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
 608                                 sid);
 609                        return;
 610                }
 611
 612        master->mids[master->num_mids++] = spec->args[0];
 613}
 614
 615static int qcom_iommu_of_xlate(struct device *dev,
 616                               struct of_phandle_args *spec)
 617{
 618        struct msm_iommu_dev *iommu;
 619        unsigned long flags;
 620        int ret = 0;
 621
 622        spin_lock_irqsave(&msm_iommu_lock, flags);
 623        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
 624                if (iommu->dev->of_node == spec->np)
 625                        break;
 626
 627        if (!iommu || iommu->dev->of_node != spec->np) {
 628                ret = -ENODEV;
 629                goto fail;
 630        }
 631
 632        insert_iommu_master(dev, &iommu, spec);
 633fail:
 634        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 635
 636        return ret;
 637}
 638
 639irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
 640{
 641        struct msm_iommu_dev *iommu = dev_id;
 642        unsigned int fsr;
 643        int i, ret;
 644
 645        spin_lock(&msm_iommu_lock);
 646
 647        if (!iommu) {
 648                pr_err("Invalid device ID in context interrupt handler\n");
 649                goto fail;
 650        }
 651
 652        pr_err("Unexpected IOMMU page fault!\n");
 653        pr_err("base = %08x\n", (unsigned int)iommu->base);
 654
 655        ret = __enable_clocks(iommu);
 656        if (ret)
 657                goto fail;
 658
 659        for (i = 0; i < iommu->ncb; i++) {
 660                fsr = GET_FSR(iommu->base, i);
 661                if (fsr) {
 662                        pr_err("Fault occurred in context %d.\n", i);
 663                        pr_err("Interesting registers:\n");
 664                        print_ctx_regs(iommu->base, i);
 665                        SET_FSR(iommu->base, i, 0x4000000F);
 666                }
 667        }
 668        __disable_clocks(iommu);
 669fail:
 670        spin_unlock(&msm_iommu_lock);
 671        return 0;
 672}
 673
 674static struct iommu_ops msm_iommu_ops = {
 675        .capable = msm_iommu_capable,
 676        .domain_alloc = msm_iommu_domain_alloc,
 677        .domain_free = msm_iommu_domain_free,
 678        .attach_dev = msm_iommu_attach_dev,
 679        .detach_dev = msm_iommu_detach_dev,
 680        .map = msm_iommu_map,
 681        .unmap = msm_iommu_unmap,
 682        /*
 683         * Nothing is needed here, the barrier to guarantee
 684         * completion of the tlb sync operation is implicitly
 685         * taken care when the iommu client does a writel before
 686         * kick starting the other master.
 687         */
 688        .iotlb_sync = NULL,
 689        .iotlb_sync_map = msm_iommu_sync_map,
 690        .iova_to_phys = msm_iommu_iova_to_phys,
 691        .probe_device = msm_iommu_probe_device,
 692        .release_device = msm_iommu_release_device,
 693        .device_group = generic_device_group,
 694        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
 695        .of_xlate = qcom_iommu_of_xlate,
 696};
 697
 698static int msm_iommu_probe(struct platform_device *pdev)
 699{
 700        struct resource *r;
 701        resource_size_t ioaddr;
 702        struct msm_iommu_dev *iommu;
 703        int ret, par, val;
 704
 705        iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
 706        if (!iommu)
 707                return -ENODEV;
 708
 709        iommu->dev = &pdev->dev;
 710        INIT_LIST_HEAD(&iommu->ctx_list);
 711
 712        iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
 713        if (IS_ERR(iommu->pclk)) {
 714                dev_err(iommu->dev, "could not get smmu_pclk\n");
 715                return PTR_ERR(iommu->pclk);
 716        }
 717
 718        ret = clk_prepare(iommu->pclk);
 719        if (ret) {
 720                dev_err(iommu->dev, "could not prepare smmu_pclk\n");
 721                return ret;
 722        }
 723
 724        iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
 725        if (IS_ERR(iommu->clk)) {
 726                dev_err(iommu->dev, "could not get iommu_clk\n");
 727                clk_unprepare(iommu->pclk);
 728                return PTR_ERR(iommu->clk);
 729        }
 730
 731        ret = clk_prepare(iommu->clk);
 732        if (ret) {
 733                dev_err(iommu->dev, "could not prepare iommu_clk\n");
 734                clk_unprepare(iommu->pclk);
 735                return ret;
 736        }
 737
 738        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 739        iommu->base = devm_ioremap_resource(iommu->dev, r);
 740        if (IS_ERR(iommu->base)) {
 741                dev_err(iommu->dev, "could not get iommu base\n");
 742                ret = PTR_ERR(iommu->base);
 743                goto fail;
 744        }
 745        ioaddr = r->start;
 746
 747        iommu->irq = platform_get_irq(pdev, 0);
 748        if (iommu->irq < 0) {
 749                ret = -ENODEV;
 750                goto fail;
 751        }
 752
 753        ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
 754        if (ret) {
 755                dev_err(iommu->dev, "could not get ncb\n");
 756                goto fail;
 757        }
 758        iommu->ncb = val;
 759
 760        msm_iommu_reset(iommu->base, iommu->ncb);
 761        SET_M(iommu->base, 0, 1);
 762        SET_PAR(iommu->base, 0, 0);
 763        SET_V2PCFG(iommu->base, 0, 1);
 764        SET_V2PPR(iommu->base, 0, 0);
 765        par = GET_PAR(iommu->base, 0);
 766        SET_V2PCFG(iommu->base, 0, 0);
 767        SET_M(iommu->base, 0, 0);
 768
 769        if (!par) {
 770                pr_err("Invalid PAR value detected\n");
 771                ret = -ENODEV;
 772                goto fail;
 773        }
 774
 775        ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
 776                                        msm_iommu_fault_handler,
 777                                        IRQF_ONESHOT | IRQF_SHARED,
 778                                        "msm_iommu_secure_irpt_handler",
 779                                        iommu);
 780        if (ret) {
 781                pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
 782                goto fail;
 783        }
 784
 785        list_add(&iommu->dev_node, &qcom_iommu_devices);
 786
 787        ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
 788                                     "msm-smmu.%pa", &ioaddr);
 789        if (ret) {
 790                pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
 791                goto fail;
 792        }
 793
 794        ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
 795        if (ret) {
 796                pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
 797                goto fail;
 798        }
 799
 800        bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
 801
 802        pr_info("device mapped at %p, irq %d with %d ctx banks\n",
 803                iommu->base, iommu->irq, iommu->ncb);
 804
 805        return ret;
 806fail:
 807        clk_unprepare(iommu->clk);
 808        clk_unprepare(iommu->pclk);
 809        return ret;
 810}
 811
 812static const struct of_device_id msm_iommu_dt_match[] = {
 813        { .compatible = "qcom,apq8064-iommu" },
 814        {}
 815};
 816
 817static int msm_iommu_remove(struct platform_device *pdev)
 818{
 819        struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
 820
 821        clk_unprepare(iommu->clk);
 822        clk_unprepare(iommu->pclk);
 823        return 0;
 824}
 825
 826static struct platform_driver msm_iommu_driver = {
 827        .driver = {
 828                .name   = "msm_iommu",
 829                .of_match_table = msm_iommu_dt_match,
 830        },
 831        .probe          = msm_iommu_probe,
 832        .remove         = msm_iommu_remove,
 833};
 834
 835static int __init msm_iommu_driver_init(void)
 836{
 837        int ret;
 838
 839        ret = platform_driver_register(&msm_iommu_driver);
 840        if (ret != 0)
 841                pr_err("Failed to register IOMMU driver\n");
 842
 843        return ret;
 844}
 845subsys_initcall(msm_iommu_driver_init);
 846
 847