linux/drivers/iommu/msm_iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
   3 *
   4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
   5 */
   6
   7#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/platform_device.h>
  11#include <linux/errno.h>
  12#include <linux/io.h>
  13#include <linux/io-pgtable.h>
  14#include <linux/interrupt.h>
  15#include <linux/list.h>
  16#include <linux/spinlock.h>
  17#include <linux/slab.h>
  18#include <linux/iommu.h>
  19#include <linux/clk.h>
  20#include <linux/err.h>
  21#include <linux/of_iommu.h>
  22
  23#include <asm/cacheflush.h>
  24#include <linux/sizes.h>
  25
  26#include "msm_iommu_hw-8xxx.h"
  27#include "msm_iommu.h"
  28
  29#define MRC(reg, processor, op1, crn, crm, op2)                         \
  30__asm__ __volatile__ (                                                  \
  31"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
  32: "=r" (reg))
  33
  34/* bitmap of the page sizes currently supported */
  35#define MSM_IOMMU_PGSIZES       (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  36
  37DEFINE_SPINLOCK(msm_iommu_lock);
  38static LIST_HEAD(qcom_iommu_devices);
  39static struct iommu_ops msm_iommu_ops;
  40
  41struct msm_priv {
  42        struct list_head list_attached;
  43        struct iommu_domain domain;
  44        struct io_pgtable_cfg   cfg;
  45        struct io_pgtable_ops   *iop;
  46        struct device           *dev;
  47        spinlock_t              pgtlock; /* pagetable lock */
  48};
  49
  50static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  51{
  52        return container_of(dom, struct msm_priv, domain);
  53}
  54
  55static int __enable_clocks(struct msm_iommu_dev *iommu)
  56{
  57        int ret;
  58
  59        ret = clk_enable(iommu->pclk);
  60        if (ret)
  61                goto fail;
  62
  63        if (iommu->clk) {
  64                ret = clk_enable(iommu->clk);
  65                if (ret)
  66                        clk_disable(iommu->pclk);
  67        }
  68fail:
  69        return ret;
  70}
  71
  72static void __disable_clocks(struct msm_iommu_dev *iommu)
  73{
  74        if (iommu->clk)
  75                clk_disable(iommu->clk);
  76        clk_disable(iommu->pclk);
  77}
  78
  79static void msm_iommu_reset(void __iomem *base, int ncb)
  80{
  81        int ctx;
  82
  83        SET_RPUE(base, 0);
  84        SET_RPUEIE(base, 0);
  85        SET_ESRRESTORE(base, 0);
  86        SET_TBE(base, 0);
  87        SET_CR(base, 0);
  88        SET_SPDMBE(base, 0);
  89        SET_TESTBUSCR(base, 0);
  90        SET_TLBRSW(base, 0);
  91        SET_GLOBAL_TLBIALL(base, 0);
  92        SET_RPU_ACR(base, 0);
  93        SET_TLBLKCRWE(base, 1);
  94
  95        for (ctx = 0; ctx < ncb; ctx++) {
  96                SET_BPRCOSH(base, ctx, 0);
  97                SET_BPRCISH(base, ctx, 0);
  98                SET_BPRCNSH(base, ctx, 0);
  99                SET_BPSHCFG(base, ctx, 0);
 100                SET_BPMTCFG(base, ctx, 0);
 101                SET_ACTLR(base, ctx, 0);
 102                SET_SCTLR(base, ctx, 0);
 103                SET_FSRRESTORE(base, ctx, 0);
 104                SET_TTBR0(base, ctx, 0);
 105                SET_TTBR1(base, ctx, 0);
 106                SET_TTBCR(base, ctx, 0);
 107                SET_BFBCR(base, ctx, 0);
 108                SET_PAR(base, ctx, 0);
 109                SET_FAR(base, ctx, 0);
 110                SET_CTX_TLBIALL(base, ctx, 0);
 111                SET_TLBFLPTER(base, ctx, 0);
 112                SET_TLBSLPTER(base, ctx, 0);
 113                SET_TLBLKCR(base, ctx, 0);
 114                SET_CONTEXTIDR(base, ctx, 0);
 115        }
 116}
 117
 118static void __flush_iotlb(void *cookie)
 119{
 120        struct msm_priv *priv = cookie;
 121        struct msm_iommu_dev *iommu = NULL;
 122        struct msm_iommu_ctx_dev *master;
 123        int ret = 0;
 124
 125        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 126                ret = __enable_clocks(iommu);
 127                if (ret)
 128                        goto fail;
 129
 130                list_for_each_entry(master, &iommu->ctx_list, list)
 131                        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 132
 133                __disable_clocks(iommu);
 134        }
 135fail:
 136        return;
 137}
 138
 139static void __flush_iotlb_range(unsigned long iova, size_t size,
 140                                size_t granule, bool leaf, void *cookie)
 141{
 142        struct msm_priv *priv = cookie;
 143        struct msm_iommu_dev *iommu = NULL;
 144        struct msm_iommu_ctx_dev *master;
 145        int ret = 0;
 146        int temp_size;
 147
 148        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 149                ret = __enable_clocks(iommu);
 150                if (ret)
 151                        goto fail;
 152
 153                list_for_each_entry(master, &iommu->ctx_list, list) {
 154                        temp_size = size;
 155                        do {
 156                                iova &= TLBIVA_VA;
 157                                iova |= GET_CONTEXTIDR_ASID(iommu->base,
 158                                                            master->num);
 159                                SET_TLBIVA(iommu->base, master->num, iova);
 160                                iova += granule;
 161                        } while (temp_size -= granule);
 162                }
 163
 164                __disable_clocks(iommu);
 165        }
 166
 167fail:
 168        return;
 169}
 170
 171static void __flush_iotlb_sync(void *cookie)
 172{
 173        /*
 174         * Nothing is needed here, the barrier to guarantee
 175         * completion of the tlb sync operation is implicitly
 176         * taken care when the iommu client does a writel before
 177         * kick starting the other master.
 178         */
 179}
 180
 181static const struct iommu_gather_ops msm_iommu_gather_ops = {
 182        .tlb_flush_all = __flush_iotlb,
 183        .tlb_add_flush = __flush_iotlb_range,
 184        .tlb_sync = __flush_iotlb_sync,
 185};
 186
 187static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
 188{
 189        int idx;
 190
 191        do {
 192                idx = find_next_zero_bit(map, end, start);
 193                if (idx == end)
 194                        return -ENOSPC;
 195        } while (test_and_set_bit(idx, map));
 196
 197        return idx;
 198}
 199
 200static void msm_iommu_free_ctx(unsigned long *map, int idx)
 201{
 202        clear_bit(idx, map);
 203}
 204
 205static void config_mids(struct msm_iommu_dev *iommu,
 206                        struct msm_iommu_ctx_dev *master)
 207{
 208        int mid, ctx, i;
 209
 210        for (i = 0; i < master->num_mids; i++) {
 211                mid = master->mids[i];
 212                ctx = master->num;
 213
 214                SET_M2VCBR_N(iommu->base, mid, 0);
 215                SET_CBACR_N(iommu->base, ctx, 0);
 216
 217                /* Set VMID = 0 */
 218                SET_VMID(iommu->base, mid, 0);
 219
 220                /* Set the context number for that MID to this context */
 221                SET_CBNDX(iommu->base, mid, ctx);
 222
 223                /* Set MID associated with this context bank to 0*/
 224                SET_CBVMID(iommu->base, ctx, 0);
 225
 226                /* Set the ASID for TLB tagging for this context */
 227                SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
 228
 229                /* Set security bit override to be Non-secure */
 230                SET_NSCFG(iommu->base, mid, 3);
 231        }
 232}
 233
 234static void __reset_context(void __iomem *base, int ctx)
 235{
 236        SET_BPRCOSH(base, ctx, 0);
 237        SET_BPRCISH(base, ctx, 0);
 238        SET_BPRCNSH(base, ctx, 0);
 239        SET_BPSHCFG(base, ctx, 0);
 240        SET_BPMTCFG(base, ctx, 0);
 241        SET_ACTLR(base, ctx, 0);
 242        SET_SCTLR(base, ctx, 0);
 243        SET_FSRRESTORE(base, ctx, 0);
 244        SET_TTBR0(base, ctx, 0);
 245        SET_TTBR1(base, ctx, 0);
 246        SET_TTBCR(base, ctx, 0);
 247        SET_BFBCR(base, ctx, 0);
 248        SET_PAR(base, ctx, 0);
 249        SET_FAR(base, ctx, 0);
 250        SET_CTX_TLBIALL(base, ctx, 0);
 251        SET_TLBFLPTER(base, ctx, 0);
 252        SET_TLBSLPTER(base, ctx, 0);
 253        SET_TLBLKCR(base, ctx, 0);
 254}
 255
 256static void __program_context(void __iomem *base, int ctx,
 257                              struct msm_priv *priv)
 258{
 259        __reset_context(base, ctx);
 260
 261        /* Turn on TEX Remap */
 262        SET_TRE(base, ctx, 1);
 263        SET_AFE(base, ctx, 1);
 264
 265        /* Set up HTW mode */
 266        /* TLB miss configuration: perform HTW on miss */
 267        SET_TLBMCFG(base, ctx, 0x3);
 268
 269        /* V2P configuration: HTW for access */
 270        SET_V2PCFG(base, ctx, 0x3);
 271
 272        SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
 273        SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
 274        SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
 275
 276        /* Set prrr and nmrr */
 277        SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
 278        SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
 279
 280        /* Invalidate the TLB for this context */
 281        SET_CTX_TLBIALL(base, ctx, 0);
 282
 283        /* Set interrupt number to "secure" interrupt */
 284        SET_IRPTNDX(base, ctx, 0);
 285
 286        /* Enable context fault interrupt */
 287        SET_CFEIE(base, ctx, 1);
 288
 289        /* Stall access on a context fault and let the handler deal with it */
 290        SET_CFCFG(base, ctx, 1);
 291
 292        /* Redirect all cacheable requests to L2 slave port. */
 293        SET_RCISH(base, ctx, 1);
 294        SET_RCOSH(base, ctx, 1);
 295        SET_RCNSH(base, ctx, 1);
 296
 297        /* Turn on BFB prefetch */
 298        SET_BFBDFE(base, ctx, 1);
 299
 300        /* Enable the MMU */
 301        SET_M(base, ctx, 1);
 302}
 303
 304static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
 305{
 306        struct msm_priv *priv;
 307
 308        if (type != IOMMU_DOMAIN_UNMANAGED)
 309                return NULL;
 310
 311        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 312        if (!priv)
 313                goto fail_nomem;
 314
 315        INIT_LIST_HEAD(&priv->list_attached);
 316
 317        priv->domain.geometry.aperture_start = 0;
 318        priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
 319        priv->domain.geometry.force_aperture = true;
 320
 321        return &priv->domain;
 322
 323fail_nomem:
 324        kfree(priv);
 325        return NULL;
 326}
 327
 328static void msm_iommu_domain_free(struct iommu_domain *domain)
 329{
 330        struct msm_priv *priv;
 331        unsigned long flags;
 332
 333        spin_lock_irqsave(&msm_iommu_lock, flags);
 334        priv = to_msm_priv(domain);
 335        kfree(priv);
 336        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 337}
 338
 339static int msm_iommu_domain_config(struct msm_priv *priv)
 340{
 341        spin_lock_init(&priv->pgtlock);
 342
 343        priv->cfg = (struct io_pgtable_cfg) {
 344                .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
 345                .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
 346                .ias = 32,
 347                .oas = 32,
 348                .tlb = &msm_iommu_gather_ops,
 349                .iommu_dev = priv->dev,
 350        };
 351
 352        priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
 353        if (!priv->iop) {
 354                dev_err(priv->dev, "Failed to allocate pgtable\n");
 355                return -EINVAL;
 356        }
 357
 358        msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
 359
 360        return 0;
 361}
 362
 363/* Must be called under msm_iommu_lock */
 364static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
 365{
 366        struct msm_iommu_dev *iommu, *ret = NULL;
 367        struct msm_iommu_ctx_dev *master;
 368
 369        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 370                master = list_first_entry(&iommu->ctx_list,
 371                                          struct msm_iommu_ctx_dev,
 372                                          list);
 373                if (master->of_node == dev->of_node) {
 374                        ret = iommu;
 375                        break;
 376                }
 377        }
 378
 379        return ret;
 380}
 381
 382static int msm_iommu_add_device(struct device *dev)
 383{
 384        struct msm_iommu_dev *iommu;
 385        struct iommu_group *group;
 386        unsigned long flags;
 387
 388        spin_lock_irqsave(&msm_iommu_lock, flags);
 389        iommu = find_iommu_for_dev(dev);
 390        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 391
 392        if (iommu)
 393                iommu_device_link(&iommu->iommu, dev);
 394        else
 395                return -ENODEV;
 396
 397        group = iommu_group_get_for_dev(dev);
 398        if (IS_ERR(group))
 399                return PTR_ERR(group);
 400
 401        iommu_group_put(group);
 402
 403        return 0;
 404}
 405
 406static void msm_iommu_remove_device(struct device *dev)
 407{
 408        struct msm_iommu_dev *iommu;
 409        unsigned long flags;
 410
 411        spin_lock_irqsave(&msm_iommu_lock, flags);
 412        iommu = find_iommu_for_dev(dev);
 413        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 414
 415        if (iommu)
 416                iommu_device_unlink(&iommu->iommu, dev);
 417
 418        iommu_group_remove_device(dev);
 419}
 420
 421static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 422{
 423        int ret = 0;
 424        unsigned long flags;
 425        struct msm_iommu_dev *iommu;
 426        struct msm_priv *priv = to_msm_priv(domain);
 427        struct msm_iommu_ctx_dev *master;
 428
 429        priv->dev = dev;
 430        msm_iommu_domain_config(priv);
 431
 432        spin_lock_irqsave(&msm_iommu_lock, flags);
 433        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 434                master = list_first_entry(&iommu->ctx_list,
 435                                          struct msm_iommu_ctx_dev,
 436                                          list);
 437                if (master->of_node == dev->of_node) {
 438                        ret = __enable_clocks(iommu);
 439                        if (ret)
 440                                goto fail;
 441
 442                        list_for_each_entry(master, &iommu->ctx_list, list) {
 443                                if (master->num) {
 444                                        dev_err(dev, "domain already attached");
 445                                        ret = -EEXIST;
 446                                        goto fail;
 447                                }
 448                                master->num =
 449                                        msm_iommu_alloc_ctx(iommu->context_map,
 450                                                            0, iommu->ncb);
 451                                if (IS_ERR_VALUE(master->num)) {
 452                                        ret = -ENODEV;
 453                                        goto fail;
 454                                }
 455                                config_mids(iommu, master);
 456                                __program_context(iommu->base, master->num,
 457                                                  priv);
 458                        }
 459                        __disable_clocks(iommu);
 460                        list_add(&iommu->dom_node, &priv->list_attached);
 461                }
 462        }
 463
 464fail:
 465        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 466
 467        return ret;
 468}
 469
 470static void msm_iommu_detach_dev(struct iommu_domain *domain,
 471                                 struct device *dev)
 472{
 473        struct msm_priv *priv = to_msm_priv(domain);
 474        unsigned long flags;
 475        struct msm_iommu_dev *iommu;
 476        struct msm_iommu_ctx_dev *master;
 477        int ret;
 478
 479        free_io_pgtable_ops(priv->iop);
 480
 481        spin_lock_irqsave(&msm_iommu_lock, flags);
 482        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 483                ret = __enable_clocks(iommu);
 484                if (ret)
 485                        goto fail;
 486
 487                list_for_each_entry(master, &iommu->ctx_list, list) {
 488                        msm_iommu_free_ctx(iommu->context_map, master->num);
 489                        __reset_context(iommu->base, master->num);
 490                }
 491                __disable_clocks(iommu);
 492        }
 493fail:
 494        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 495}
 496
 497static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
 498                         phys_addr_t pa, size_t len, int prot)
 499{
 500        struct msm_priv *priv = to_msm_priv(domain);
 501        unsigned long flags;
 502        int ret;
 503
 504        spin_lock_irqsave(&priv->pgtlock, flags);
 505        ret = priv->iop->map(priv->iop, iova, pa, len, prot);
 506        spin_unlock_irqrestore(&priv->pgtlock, flags);
 507
 508        return ret;
 509}
 510
 511static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 512                              size_t len)
 513{
 514        struct msm_priv *priv = to_msm_priv(domain);
 515        unsigned long flags;
 516
 517        spin_lock_irqsave(&priv->pgtlock, flags);
 518        len = priv->iop->unmap(priv->iop, iova, len);
 519        spin_unlock_irqrestore(&priv->pgtlock, flags);
 520
 521        return len;
 522}
 523
 524static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 525                                          dma_addr_t va)
 526{
 527        struct msm_priv *priv;
 528        struct msm_iommu_dev *iommu;
 529        struct msm_iommu_ctx_dev *master;
 530        unsigned int par;
 531        unsigned long flags;
 532        phys_addr_t ret = 0;
 533
 534        spin_lock_irqsave(&msm_iommu_lock, flags);
 535
 536        priv = to_msm_priv(domain);
 537        iommu = list_first_entry(&priv->list_attached,
 538                                 struct msm_iommu_dev, dom_node);
 539
 540        if (list_empty(&iommu->ctx_list))
 541                goto fail;
 542
 543        master = list_first_entry(&iommu->ctx_list,
 544                                  struct msm_iommu_ctx_dev, list);
 545        if (!master)
 546                goto fail;
 547
 548        ret = __enable_clocks(iommu);
 549        if (ret)
 550                goto fail;
 551
 552        /* Invalidate context TLB */
 553        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 554        SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
 555
 556        par = GET_PAR(iommu->base, master->num);
 557
 558        /* We are dealing with a supersection */
 559        if (GET_NOFAULT_SS(iommu->base, master->num))
 560                ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
 561        else    /* Upper 20 bits from PAR, lower 12 from VA */
 562                ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
 563
 564        if (GET_FAULT(iommu->base, master->num))
 565                ret = 0;
 566
 567        __disable_clocks(iommu);
 568fail:
 569        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 570        return ret;
 571}
 572
 573static bool msm_iommu_capable(enum iommu_cap cap)
 574{
 575        return false;
 576}
 577
 578static void print_ctx_regs(void __iomem *base, int ctx)
 579{
 580        unsigned int fsr = GET_FSR(base, ctx);
 581        pr_err("FAR    = %08x    PAR    = %08x\n",
 582               GET_FAR(base, ctx), GET_PAR(base, ctx));
 583        pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
 584                        (fsr & 0x02) ? "TF " : "",
 585                        (fsr & 0x04) ? "AFF " : "",
 586                        (fsr & 0x08) ? "APF " : "",
 587                        (fsr & 0x10) ? "TLBMF " : "",
 588                        (fsr & 0x20) ? "HTWDEEF " : "",
 589                        (fsr & 0x40) ? "HTWSEEF " : "",
 590                        (fsr & 0x80) ? "MHF " : "",
 591                        (fsr & 0x10000) ? "SL " : "",
 592                        (fsr & 0x40000000) ? "SS " : "",
 593                        (fsr & 0x80000000) ? "MULTI " : "");
 594
 595        pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
 596               GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
 597        pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
 598               GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
 599        pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
 600               GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
 601}
 602
 603static void insert_iommu_master(struct device *dev,
 604                                struct msm_iommu_dev **iommu,
 605                                struct of_phandle_args *spec)
 606{
 607        struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
 608        int sid;
 609
 610        if (list_empty(&(*iommu)->ctx_list)) {
 611                master = kzalloc(sizeof(*master), GFP_ATOMIC);
 612                master->of_node = dev->of_node;
 613                list_add(&master->list, &(*iommu)->ctx_list);
 614                dev->archdata.iommu = master;
 615        }
 616
 617        for (sid = 0; sid < master->num_mids; sid++)
 618                if (master->mids[sid] == spec->args[0]) {
 619                        dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
 620                                 sid);
 621                        return;
 622                }
 623
 624        master->mids[master->num_mids++] = spec->args[0];
 625}
 626
 627static int qcom_iommu_of_xlate(struct device *dev,
 628                               struct of_phandle_args *spec)
 629{
 630        struct msm_iommu_dev *iommu;
 631        unsigned long flags;
 632        int ret = 0;
 633
 634        spin_lock_irqsave(&msm_iommu_lock, flags);
 635        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
 636                if (iommu->dev->of_node == spec->np)
 637                        break;
 638
 639        if (!iommu || iommu->dev->of_node != spec->np) {
 640                ret = -ENODEV;
 641                goto fail;
 642        }
 643
 644        insert_iommu_master(dev, &iommu, spec);
 645fail:
 646        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 647
 648        return ret;
 649}
 650
 651irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
 652{
 653        struct msm_iommu_dev *iommu = dev_id;
 654        unsigned int fsr;
 655        int i, ret;
 656
 657        spin_lock(&msm_iommu_lock);
 658
 659        if (!iommu) {
 660                pr_err("Invalid device ID in context interrupt handler\n");
 661                goto fail;
 662        }
 663
 664        pr_err("Unexpected IOMMU page fault!\n");
 665        pr_err("base = %08x\n", (unsigned int)iommu->base);
 666
 667        ret = __enable_clocks(iommu);
 668        if (ret)
 669                goto fail;
 670
 671        for (i = 0; i < iommu->ncb; i++) {
 672                fsr = GET_FSR(iommu->base, i);
 673                if (fsr) {
 674                        pr_err("Fault occurred in context %d.\n", i);
 675                        pr_err("Interesting registers:\n");
 676                        print_ctx_regs(iommu->base, i);
 677                        SET_FSR(iommu->base, i, 0x4000000F);
 678                }
 679        }
 680        __disable_clocks(iommu);
 681fail:
 682        spin_unlock(&msm_iommu_lock);
 683        return 0;
 684}
 685
 686static struct iommu_ops msm_iommu_ops = {
 687        .capable = msm_iommu_capable,
 688        .domain_alloc = msm_iommu_domain_alloc,
 689        .domain_free = msm_iommu_domain_free,
 690        .attach_dev = msm_iommu_attach_dev,
 691        .detach_dev = msm_iommu_detach_dev,
 692        .map = msm_iommu_map,
 693        .unmap = msm_iommu_unmap,
 694        .iova_to_phys = msm_iommu_iova_to_phys,
 695        .add_device = msm_iommu_add_device,
 696        .remove_device = msm_iommu_remove_device,
 697        .device_group = generic_device_group,
 698        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
 699        .of_xlate = qcom_iommu_of_xlate,
 700};
 701
 702static int msm_iommu_probe(struct platform_device *pdev)
 703{
 704        struct resource *r;
 705        resource_size_t ioaddr;
 706        struct msm_iommu_dev *iommu;
 707        int ret, par, val;
 708
 709        iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
 710        if (!iommu)
 711                return -ENODEV;
 712
 713        iommu->dev = &pdev->dev;
 714        INIT_LIST_HEAD(&iommu->ctx_list);
 715
 716        iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
 717        if (IS_ERR(iommu->pclk)) {
 718                dev_err(iommu->dev, "could not get smmu_pclk\n");
 719                return PTR_ERR(iommu->pclk);
 720        }
 721
 722        ret = clk_prepare(iommu->pclk);
 723        if (ret) {
 724                dev_err(iommu->dev, "could not prepare smmu_pclk\n");
 725                return ret;
 726        }
 727
 728        iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
 729        if (IS_ERR(iommu->clk)) {
 730                dev_err(iommu->dev, "could not get iommu_clk\n");
 731                clk_unprepare(iommu->pclk);
 732                return PTR_ERR(iommu->clk);
 733        }
 734
 735        ret = clk_prepare(iommu->clk);
 736        if (ret) {
 737                dev_err(iommu->dev, "could not prepare iommu_clk\n");
 738                clk_unprepare(iommu->pclk);
 739                return ret;
 740        }
 741
 742        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 743        iommu->base = devm_ioremap_resource(iommu->dev, r);
 744        if (IS_ERR(iommu->base)) {
 745                dev_err(iommu->dev, "could not get iommu base\n");
 746                ret = PTR_ERR(iommu->base);
 747                goto fail;
 748        }
 749        ioaddr = r->start;
 750
 751        iommu->irq = platform_get_irq(pdev, 0);
 752        if (iommu->irq < 0) {
 753                dev_err(iommu->dev, "could not get iommu irq\n");
 754                ret = -ENODEV;
 755                goto fail;
 756        }
 757
 758        ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
 759        if (ret) {
 760                dev_err(iommu->dev, "could not get ncb\n");
 761                goto fail;
 762        }
 763        iommu->ncb = val;
 764
 765        msm_iommu_reset(iommu->base, iommu->ncb);
 766        SET_M(iommu->base, 0, 1);
 767        SET_PAR(iommu->base, 0, 0);
 768        SET_V2PCFG(iommu->base, 0, 1);
 769        SET_V2PPR(iommu->base, 0, 0);
 770        par = GET_PAR(iommu->base, 0);
 771        SET_V2PCFG(iommu->base, 0, 0);
 772        SET_M(iommu->base, 0, 0);
 773
 774        if (!par) {
 775                pr_err("Invalid PAR value detected\n");
 776                ret = -ENODEV;
 777                goto fail;
 778        }
 779
 780        ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
 781                                        msm_iommu_fault_handler,
 782                                        IRQF_ONESHOT | IRQF_SHARED,
 783                                        "msm_iommu_secure_irpt_handler",
 784                                        iommu);
 785        if (ret) {
 786                pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
 787                goto fail;
 788        }
 789
 790        list_add(&iommu->dev_node, &qcom_iommu_devices);
 791
 792        ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
 793                                     "msm-smmu.%pa", &ioaddr);
 794        if (ret) {
 795                pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
 796                goto fail;
 797        }
 798
 799        iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
 800        iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
 801
 802        ret = iommu_device_register(&iommu->iommu);
 803        if (ret) {
 804                pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
 805                goto fail;
 806        }
 807
 808        bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
 809
 810        pr_info("device mapped at %p, irq %d with %d ctx banks\n",
 811                iommu->base, iommu->irq, iommu->ncb);
 812
 813        return ret;
 814fail:
 815        clk_unprepare(iommu->clk);
 816        clk_unprepare(iommu->pclk);
 817        return ret;
 818}
 819
 820static const struct of_device_id msm_iommu_dt_match[] = {
 821        { .compatible = "qcom,apq8064-iommu" },
 822        {}
 823};
 824
 825static int msm_iommu_remove(struct platform_device *pdev)
 826{
 827        struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
 828
 829        clk_unprepare(iommu->clk);
 830        clk_unprepare(iommu->pclk);
 831        return 0;
 832}
 833
 834static struct platform_driver msm_iommu_driver = {
 835        .driver = {
 836                .name   = "msm_iommu",
 837                .of_match_table = msm_iommu_dt_match,
 838        },
 839        .probe          = msm_iommu_probe,
 840        .remove         = msm_iommu_remove,
 841};
 842
 843static int __init msm_iommu_driver_init(void)
 844{
 845        int ret;
 846
 847        ret = platform_driver_register(&msm_iommu_driver);
 848        if (ret != 0)
 849                pr_err("Failed to register IOMMU driver\n");
 850
 851        return ret;
 852}
 853subsys_initcall(msm_iommu_driver_init);
 854
 855