linux/drivers/iommu/msm_iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
   3 *
   4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
   5 */
   6
   7#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/platform_device.h>
  11#include <linux/errno.h>
  12#include <linux/io.h>
  13#include <linux/io-pgtable.h>
  14#include <linux/interrupt.h>
  15#include <linux/list.h>
  16#include <linux/spinlock.h>
  17#include <linux/slab.h>
  18#include <linux/iommu.h>
  19#include <linux/clk.h>
  20#include <linux/err.h>
  21#include <linux/of_iommu.h>
  22
  23#include <asm/cacheflush.h>
  24#include <linux/sizes.h>
  25
  26#include "msm_iommu_hw-8xxx.h"
  27#include "msm_iommu.h"
  28
  29#define MRC(reg, processor, op1, crn, crm, op2)                         \
  30__asm__ __volatile__ (                                                  \
  31"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
  32: "=r" (reg))
  33
  34/* bitmap of the page sizes currently supported */
  35#define MSM_IOMMU_PGSIZES       (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  36
  37DEFINE_SPINLOCK(msm_iommu_lock);
  38static LIST_HEAD(qcom_iommu_devices);
  39static struct iommu_ops msm_iommu_ops;
  40
  41struct msm_priv {
  42        struct list_head list_attached;
  43        struct iommu_domain domain;
  44        struct io_pgtable_cfg   cfg;
  45        struct io_pgtable_ops   *iop;
  46        struct device           *dev;
  47        spinlock_t              pgtlock; /* pagetable lock */
  48};
  49
  50static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  51{
  52        return container_of(dom, struct msm_priv, domain);
  53}
  54
  55static int __enable_clocks(struct msm_iommu_dev *iommu)
  56{
  57        int ret;
  58
  59        ret = clk_enable(iommu->pclk);
  60        if (ret)
  61                goto fail;
  62
  63        if (iommu->clk) {
  64                ret = clk_enable(iommu->clk);
  65                if (ret)
  66                        clk_disable(iommu->pclk);
  67        }
  68fail:
  69        return ret;
  70}
  71
  72static void __disable_clocks(struct msm_iommu_dev *iommu)
  73{
  74        if (iommu->clk)
  75                clk_disable(iommu->clk);
  76        clk_disable(iommu->pclk);
  77}
  78
  79static void msm_iommu_reset(void __iomem *base, int ncb)
  80{
  81        int ctx;
  82
  83        SET_RPUE(base, 0);
  84        SET_RPUEIE(base, 0);
  85        SET_ESRRESTORE(base, 0);
  86        SET_TBE(base, 0);
  87        SET_CR(base, 0);
  88        SET_SPDMBE(base, 0);
  89        SET_TESTBUSCR(base, 0);
  90        SET_TLBRSW(base, 0);
  91        SET_GLOBAL_TLBIALL(base, 0);
  92        SET_RPU_ACR(base, 0);
  93        SET_TLBLKCRWE(base, 1);
  94
  95        for (ctx = 0; ctx < ncb; ctx++) {
  96                SET_BPRCOSH(base, ctx, 0);
  97                SET_BPRCISH(base, ctx, 0);
  98                SET_BPRCNSH(base, ctx, 0);
  99                SET_BPSHCFG(base, ctx, 0);
 100                SET_BPMTCFG(base, ctx, 0);
 101                SET_ACTLR(base, ctx, 0);
 102                SET_SCTLR(base, ctx, 0);
 103                SET_FSRRESTORE(base, ctx, 0);
 104                SET_TTBR0(base, ctx, 0);
 105                SET_TTBR1(base, ctx, 0);
 106                SET_TTBCR(base, ctx, 0);
 107                SET_BFBCR(base, ctx, 0);
 108                SET_PAR(base, ctx, 0);
 109                SET_FAR(base, ctx, 0);
 110                SET_CTX_TLBIALL(base, ctx, 0);
 111                SET_TLBFLPTER(base, ctx, 0);
 112                SET_TLBSLPTER(base, ctx, 0);
 113                SET_TLBLKCR(base, ctx, 0);
 114                SET_CONTEXTIDR(base, ctx, 0);
 115        }
 116}
 117
 118static void __flush_iotlb(void *cookie)
 119{
 120        struct msm_priv *priv = cookie;
 121        struct msm_iommu_dev *iommu = NULL;
 122        struct msm_iommu_ctx_dev *master;
 123        int ret = 0;
 124
 125        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 126                ret = __enable_clocks(iommu);
 127                if (ret)
 128                        goto fail;
 129
 130                list_for_each_entry(master, &iommu->ctx_list, list)
 131                        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 132
 133                __disable_clocks(iommu);
 134        }
 135fail:
 136        return;
 137}
 138
 139static void __flush_iotlb_range(unsigned long iova, size_t size,
 140                                size_t granule, bool leaf, void *cookie)
 141{
 142        struct msm_priv *priv = cookie;
 143        struct msm_iommu_dev *iommu = NULL;
 144        struct msm_iommu_ctx_dev *master;
 145        int ret = 0;
 146        int temp_size;
 147
 148        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 149                ret = __enable_clocks(iommu);
 150                if (ret)
 151                        goto fail;
 152
 153                list_for_each_entry(master, &iommu->ctx_list, list) {
 154                        temp_size = size;
 155                        do {
 156                                iova &= TLBIVA_VA;
 157                                iova |= GET_CONTEXTIDR_ASID(iommu->base,
 158                                                            master->num);
 159                                SET_TLBIVA(iommu->base, master->num, iova);
 160                                iova += granule;
 161                        } while (temp_size -= granule);
 162                }
 163
 164                __disable_clocks(iommu);
 165        }
 166
 167fail:
 168        return;
 169}
 170
 171static void __flush_iotlb_walk(unsigned long iova, size_t size,
 172                               size_t granule, void *cookie)
 173{
 174        __flush_iotlb_range(iova, size, granule, false, cookie);
 175}
 176
 177static void __flush_iotlb_leaf(unsigned long iova, size_t size,
 178                               size_t granule, void *cookie)
 179{
 180        __flush_iotlb_range(iova, size, granule, true, cookie);
 181}
 182
 183static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
 184                               unsigned long iova, size_t granule, void *cookie)
 185{
 186        __flush_iotlb_range(iova, granule, granule, true, cookie);
 187}
 188
 189static const struct iommu_flush_ops msm_iommu_flush_ops = {
 190        .tlb_flush_all = __flush_iotlb,
 191        .tlb_flush_walk = __flush_iotlb_walk,
 192        .tlb_flush_leaf = __flush_iotlb_leaf,
 193        .tlb_add_page = __flush_iotlb_page,
 194};
 195
 196static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
 197{
 198        int idx;
 199
 200        do {
 201                idx = find_next_zero_bit(map, end, start);
 202                if (idx == end)
 203                        return -ENOSPC;
 204        } while (test_and_set_bit(idx, map));
 205
 206        return idx;
 207}
 208
 209static void msm_iommu_free_ctx(unsigned long *map, int idx)
 210{
 211        clear_bit(idx, map);
 212}
 213
 214static void config_mids(struct msm_iommu_dev *iommu,
 215                        struct msm_iommu_ctx_dev *master)
 216{
 217        int mid, ctx, i;
 218
 219        for (i = 0; i < master->num_mids; i++) {
 220                mid = master->mids[i];
 221                ctx = master->num;
 222
 223                SET_M2VCBR_N(iommu->base, mid, 0);
 224                SET_CBACR_N(iommu->base, ctx, 0);
 225
 226                /* Set VMID = 0 */
 227                SET_VMID(iommu->base, mid, 0);
 228
 229                /* Set the context number for that MID to this context */
 230                SET_CBNDX(iommu->base, mid, ctx);
 231
 232                /* Set MID associated with this context bank to 0*/
 233                SET_CBVMID(iommu->base, ctx, 0);
 234
 235                /* Set the ASID for TLB tagging for this context */
 236                SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
 237
 238                /* Set security bit override to be Non-secure */
 239                SET_NSCFG(iommu->base, mid, 3);
 240        }
 241}
 242
 243static void __reset_context(void __iomem *base, int ctx)
 244{
 245        SET_BPRCOSH(base, ctx, 0);
 246        SET_BPRCISH(base, ctx, 0);
 247        SET_BPRCNSH(base, ctx, 0);
 248        SET_BPSHCFG(base, ctx, 0);
 249        SET_BPMTCFG(base, ctx, 0);
 250        SET_ACTLR(base, ctx, 0);
 251        SET_SCTLR(base, ctx, 0);
 252        SET_FSRRESTORE(base, ctx, 0);
 253        SET_TTBR0(base, ctx, 0);
 254        SET_TTBR1(base, ctx, 0);
 255        SET_TTBCR(base, ctx, 0);
 256        SET_BFBCR(base, ctx, 0);
 257        SET_PAR(base, ctx, 0);
 258        SET_FAR(base, ctx, 0);
 259        SET_CTX_TLBIALL(base, ctx, 0);
 260        SET_TLBFLPTER(base, ctx, 0);
 261        SET_TLBSLPTER(base, ctx, 0);
 262        SET_TLBLKCR(base, ctx, 0);
 263}
 264
 265static void __program_context(void __iomem *base, int ctx,
 266                              struct msm_priv *priv)
 267{
 268        __reset_context(base, ctx);
 269
 270        /* Turn on TEX Remap */
 271        SET_TRE(base, ctx, 1);
 272        SET_AFE(base, ctx, 1);
 273
 274        /* Set up HTW mode */
 275        /* TLB miss configuration: perform HTW on miss */
 276        SET_TLBMCFG(base, ctx, 0x3);
 277
 278        /* V2P configuration: HTW for access */
 279        SET_V2PCFG(base, ctx, 0x3);
 280
 281        SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
 282        SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
 283        SET_TTBR1(base, ctx, 0);
 284
 285        /* Set prrr and nmrr */
 286        SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
 287        SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
 288
 289        /* Invalidate the TLB for this context */
 290        SET_CTX_TLBIALL(base, ctx, 0);
 291
 292        /* Set interrupt number to "secure" interrupt */
 293        SET_IRPTNDX(base, ctx, 0);
 294
 295        /* Enable context fault interrupt */
 296        SET_CFEIE(base, ctx, 1);
 297
 298        /* Stall access on a context fault and let the handler deal with it */
 299        SET_CFCFG(base, ctx, 1);
 300
 301        /* Redirect all cacheable requests to L2 slave port. */
 302        SET_RCISH(base, ctx, 1);
 303        SET_RCOSH(base, ctx, 1);
 304        SET_RCNSH(base, ctx, 1);
 305
 306        /* Turn on BFB prefetch */
 307        SET_BFBDFE(base, ctx, 1);
 308
 309        /* Enable the MMU */
 310        SET_M(base, ctx, 1);
 311}
 312
 313static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
 314{
 315        struct msm_priv *priv;
 316
 317        if (type != IOMMU_DOMAIN_UNMANAGED)
 318                return NULL;
 319
 320        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 321        if (!priv)
 322                goto fail_nomem;
 323
 324        INIT_LIST_HEAD(&priv->list_attached);
 325
 326        priv->domain.geometry.aperture_start = 0;
 327        priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
 328        priv->domain.geometry.force_aperture = true;
 329
 330        return &priv->domain;
 331
 332fail_nomem:
 333        kfree(priv);
 334        return NULL;
 335}
 336
 337static void msm_iommu_domain_free(struct iommu_domain *domain)
 338{
 339        struct msm_priv *priv;
 340        unsigned long flags;
 341
 342        spin_lock_irqsave(&msm_iommu_lock, flags);
 343        priv = to_msm_priv(domain);
 344        kfree(priv);
 345        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 346}
 347
 348static int msm_iommu_domain_config(struct msm_priv *priv)
 349{
 350        spin_lock_init(&priv->pgtlock);
 351
 352        priv->cfg = (struct io_pgtable_cfg) {
 353                .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
 354                .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
 355                .ias = 32,
 356                .oas = 32,
 357                .tlb = &msm_iommu_flush_ops,
 358                .iommu_dev = priv->dev,
 359        };
 360
 361        priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
 362        if (!priv->iop) {
 363                dev_err(priv->dev, "Failed to allocate pgtable\n");
 364                return -EINVAL;
 365        }
 366
 367        msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
 368
 369        return 0;
 370}
 371
 372/* Must be called under msm_iommu_lock */
 373static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
 374{
 375        struct msm_iommu_dev *iommu, *ret = NULL;
 376        struct msm_iommu_ctx_dev *master;
 377
 378        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 379                master = list_first_entry(&iommu->ctx_list,
 380                                          struct msm_iommu_ctx_dev,
 381                                          list);
 382                if (master->of_node == dev->of_node) {
 383                        ret = iommu;
 384                        break;
 385                }
 386        }
 387
 388        return ret;
 389}
 390
 391static int msm_iommu_add_device(struct device *dev)
 392{
 393        struct msm_iommu_dev *iommu;
 394        struct iommu_group *group;
 395        unsigned long flags;
 396
 397        spin_lock_irqsave(&msm_iommu_lock, flags);
 398        iommu = find_iommu_for_dev(dev);
 399        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 400
 401        if (iommu)
 402                iommu_device_link(&iommu->iommu, dev);
 403        else
 404                return -ENODEV;
 405
 406        group = iommu_group_get_for_dev(dev);
 407        if (IS_ERR(group))
 408                return PTR_ERR(group);
 409
 410        iommu_group_put(group);
 411
 412        return 0;
 413}
 414
 415static void msm_iommu_remove_device(struct device *dev)
 416{
 417        struct msm_iommu_dev *iommu;
 418        unsigned long flags;
 419
 420        spin_lock_irqsave(&msm_iommu_lock, flags);
 421        iommu = find_iommu_for_dev(dev);
 422        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 423
 424        if (iommu)
 425                iommu_device_unlink(&iommu->iommu, dev);
 426
 427        iommu_group_remove_device(dev);
 428}
 429
 430static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 431{
 432        int ret = 0;
 433        unsigned long flags;
 434        struct msm_iommu_dev *iommu;
 435        struct msm_priv *priv = to_msm_priv(domain);
 436        struct msm_iommu_ctx_dev *master;
 437
 438        priv->dev = dev;
 439        msm_iommu_domain_config(priv);
 440
 441        spin_lock_irqsave(&msm_iommu_lock, flags);
 442        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 443                master = list_first_entry(&iommu->ctx_list,
 444                                          struct msm_iommu_ctx_dev,
 445                                          list);
 446                if (master->of_node == dev->of_node) {
 447                        ret = __enable_clocks(iommu);
 448                        if (ret)
 449                                goto fail;
 450
 451                        list_for_each_entry(master, &iommu->ctx_list, list) {
 452                                if (master->num) {
 453                                        dev_err(dev, "domain already attached");
 454                                        ret = -EEXIST;
 455                                        goto fail;
 456                                }
 457                                master->num =
 458                                        msm_iommu_alloc_ctx(iommu->context_map,
 459                                                            0, iommu->ncb);
 460                                if (IS_ERR_VALUE(master->num)) {
 461                                        ret = -ENODEV;
 462                                        goto fail;
 463                                }
 464                                config_mids(iommu, master);
 465                                __program_context(iommu->base, master->num,
 466                                                  priv);
 467                        }
 468                        __disable_clocks(iommu);
 469                        list_add(&iommu->dom_node, &priv->list_attached);
 470                }
 471        }
 472
 473fail:
 474        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 475
 476        return ret;
 477}
 478
 479static void msm_iommu_detach_dev(struct iommu_domain *domain,
 480                                 struct device *dev)
 481{
 482        struct msm_priv *priv = to_msm_priv(domain);
 483        unsigned long flags;
 484        struct msm_iommu_dev *iommu;
 485        struct msm_iommu_ctx_dev *master;
 486        int ret;
 487
 488        free_io_pgtable_ops(priv->iop);
 489
 490        spin_lock_irqsave(&msm_iommu_lock, flags);
 491        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 492                ret = __enable_clocks(iommu);
 493                if (ret)
 494                        goto fail;
 495
 496                list_for_each_entry(master, &iommu->ctx_list, list) {
 497                        msm_iommu_free_ctx(iommu->context_map, master->num);
 498                        __reset_context(iommu->base, master->num);
 499                }
 500                __disable_clocks(iommu);
 501        }
 502fail:
 503        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 504}
 505
 506static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
 507                         phys_addr_t pa, size_t len, int prot, gfp_t gfp)
 508{
 509        struct msm_priv *priv = to_msm_priv(domain);
 510        unsigned long flags;
 511        int ret;
 512
 513        spin_lock_irqsave(&priv->pgtlock, flags);
 514        ret = priv->iop->map(priv->iop, iova, pa, len, prot);
 515        spin_unlock_irqrestore(&priv->pgtlock, flags);
 516
 517        return ret;
 518}
 519
 520static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 521                              size_t len, struct iommu_iotlb_gather *gather)
 522{
 523        struct msm_priv *priv = to_msm_priv(domain);
 524        unsigned long flags;
 525
 526        spin_lock_irqsave(&priv->pgtlock, flags);
 527        len = priv->iop->unmap(priv->iop, iova, len, gather);
 528        spin_unlock_irqrestore(&priv->pgtlock, flags);
 529
 530        return len;
 531}
 532
 533static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 534                                          dma_addr_t va)
 535{
 536        struct msm_priv *priv;
 537        struct msm_iommu_dev *iommu;
 538        struct msm_iommu_ctx_dev *master;
 539        unsigned int par;
 540        unsigned long flags;
 541        phys_addr_t ret = 0;
 542
 543        spin_lock_irqsave(&msm_iommu_lock, flags);
 544
 545        priv = to_msm_priv(domain);
 546        iommu = list_first_entry(&priv->list_attached,
 547                                 struct msm_iommu_dev, dom_node);
 548
 549        if (list_empty(&iommu->ctx_list))
 550                goto fail;
 551
 552        master = list_first_entry(&iommu->ctx_list,
 553                                  struct msm_iommu_ctx_dev, list);
 554        if (!master)
 555                goto fail;
 556
 557        ret = __enable_clocks(iommu);
 558        if (ret)
 559                goto fail;
 560
 561        /* Invalidate context TLB */
 562        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 563        SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
 564
 565        par = GET_PAR(iommu->base, master->num);
 566
 567        /* We are dealing with a supersection */
 568        if (GET_NOFAULT_SS(iommu->base, master->num))
 569                ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
 570        else    /* Upper 20 bits from PAR, lower 12 from VA */
 571                ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
 572
 573        if (GET_FAULT(iommu->base, master->num))
 574                ret = 0;
 575
 576        __disable_clocks(iommu);
 577fail:
 578        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 579        return ret;
 580}
 581
 582static bool msm_iommu_capable(enum iommu_cap cap)
 583{
 584        return false;
 585}
 586
 587static void print_ctx_regs(void __iomem *base, int ctx)
 588{
 589        unsigned int fsr = GET_FSR(base, ctx);
 590        pr_err("FAR    = %08x    PAR    = %08x\n",
 591               GET_FAR(base, ctx), GET_PAR(base, ctx));
 592        pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
 593                        (fsr & 0x02) ? "TF " : "",
 594                        (fsr & 0x04) ? "AFF " : "",
 595                        (fsr & 0x08) ? "APF " : "",
 596                        (fsr & 0x10) ? "TLBMF " : "",
 597                        (fsr & 0x20) ? "HTWDEEF " : "",
 598                        (fsr & 0x40) ? "HTWSEEF " : "",
 599                        (fsr & 0x80) ? "MHF " : "",
 600                        (fsr & 0x10000) ? "SL " : "",
 601                        (fsr & 0x40000000) ? "SS " : "",
 602                        (fsr & 0x80000000) ? "MULTI " : "");
 603
 604        pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
 605               GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
 606        pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
 607               GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
 608        pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
 609               GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
 610}
 611
 612static void insert_iommu_master(struct device *dev,
 613                                struct msm_iommu_dev **iommu,
 614                                struct of_phandle_args *spec)
 615{
 616        struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
 617        int sid;
 618
 619        if (list_empty(&(*iommu)->ctx_list)) {
 620                master = kzalloc(sizeof(*master), GFP_ATOMIC);
 621                master->of_node = dev->of_node;
 622                list_add(&master->list, &(*iommu)->ctx_list);
 623                dev->archdata.iommu = master;
 624        }
 625
 626        for (sid = 0; sid < master->num_mids; sid++)
 627                if (master->mids[sid] == spec->args[0]) {
 628                        dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
 629                                 sid);
 630                        return;
 631                }
 632
 633        master->mids[master->num_mids++] = spec->args[0];
 634}
 635
 636static int qcom_iommu_of_xlate(struct device *dev,
 637                               struct of_phandle_args *spec)
 638{
 639        struct msm_iommu_dev *iommu;
 640        unsigned long flags;
 641        int ret = 0;
 642
 643        spin_lock_irqsave(&msm_iommu_lock, flags);
 644        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
 645                if (iommu->dev->of_node == spec->np)
 646                        break;
 647
 648        if (!iommu || iommu->dev->of_node != spec->np) {
 649                ret = -ENODEV;
 650                goto fail;
 651        }
 652
 653        insert_iommu_master(dev, &iommu, spec);
 654fail:
 655        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 656
 657        return ret;
 658}
 659
 660irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
 661{
 662        struct msm_iommu_dev *iommu = dev_id;
 663        unsigned int fsr;
 664        int i, ret;
 665
 666        spin_lock(&msm_iommu_lock);
 667
 668        if (!iommu) {
 669                pr_err("Invalid device ID in context interrupt handler\n");
 670                goto fail;
 671        }
 672
 673        pr_err("Unexpected IOMMU page fault!\n");
 674        pr_err("base = %08x\n", (unsigned int)iommu->base);
 675
 676        ret = __enable_clocks(iommu);
 677        if (ret)
 678                goto fail;
 679
 680        for (i = 0; i < iommu->ncb; i++) {
 681                fsr = GET_FSR(iommu->base, i);
 682                if (fsr) {
 683                        pr_err("Fault occurred in context %d.\n", i);
 684                        pr_err("Interesting registers:\n");
 685                        print_ctx_regs(iommu->base, i);
 686                        SET_FSR(iommu->base, i, 0x4000000F);
 687                }
 688        }
 689        __disable_clocks(iommu);
 690fail:
 691        spin_unlock(&msm_iommu_lock);
 692        return 0;
 693}
 694
 695static struct iommu_ops msm_iommu_ops = {
 696        .capable = msm_iommu_capable,
 697        .domain_alloc = msm_iommu_domain_alloc,
 698        .domain_free = msm_iommu_domain_free,
 699        .attach_dev = msm_iommu_attach_dev,
 700        .detach_dev = msm_iommu_detach_dev,
 701        .map = msm_iommu_map,
 702        .unmap = msm_iommu_unmap,
 703        /*
 704         * Nothing is needed here, the barrier to guarantee
 705         * completion of the tlb sync operation is implicitly
 706         * taken care when the iommu client does a writel before
 707         * kick starting the other master.
 708         */
 709        .iotlb_sync = NULL,
 710        .iova_to_phys = msm_iommu_iova_to_phys,
 711        .add_device = msm_iommu_add_device,
 712        .remove_device = msm_iommu_remove_device,
 713        .device_group = generic_device_group,
 714        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
 715        .of_xlate = qcom_iommu_of_xlate,
 716};
 717
 718static int msm_iommu_probe(struct platform_device *pdev)
 719{
 720        struct resource *r;
 721        resource_size_t ioaddr;
 722        struct msm_iommu_dev *iommu;
 723        int ret, par, val;
 724
 725        iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
 726        if (!iommu)
 727                return -ENODEV;
 728
 729        iommu->dev = &pdev->dev;
 730        INIT_LIST_HEAD(&iommu->ctx_list);
 731
 732        iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
 733        if (IS_ERR(iommu->pclk)) {
 734                dev_err(iommu->dev, "could not get smmu_pclk\n");
 735                return PTR_ERR(iommu->pclk);
 736        }
 737
 738        ret = clk_prepare(iommu->pclk);
 739        if (ret) {
 740                dev_err(iommu->dev, "could not prepare smmu_pclk\n");
 741                return ret;
 742        }
 743
 744        iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
 745        if (IS_ERR(iommu->clk)) {
 746                dev_err(iommu->dev, "could not get iommu_clk\n");
 747                clk_unprepare(iommu->pclk);
 748                return PTR_ERR(iommu->clk);
 749        }
 750
 751        ret = clk_prepare(iommu->clk);
 752        if (ret) {
 753                dev_err(iommu->dev, "could not prepare iommu_clk\n");
 754                clk_unprepare(iommu->pclk);
 755                return ret;
 756        }
 757
 758        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 759        iommu->base = devm_ioremap_resource(iommu->dev, r);
 760        if (IS_ERR(iommu->base)) {
 761                dev_err(iommu->dev, "could not get iommu base\n");
 762                ret = PTR_ERR(iommu->base);
 763                goto fail;
 764        }
 765        ioaddr = r->start;
 766
 767        iommu->irq = platform_get_irq(pdev, 0);
 768        if (iommu->irq < 0) {
 769                ret = -ENODEV;
 770                goto fail;
 771        }
 772
 773        ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
 774        if (ret) {
 775                dev_err(iommu->dev, "could not get ncb\n");
 776                goto fail;
 777        }
 778        iommu->ncb = val;
 779
 780        msm_iommu_reset(iommu->base, iommu->ncb);
 781        SET_M(iommu->base, 0, 1);
 782        SET_PAR(iommu->base, 0, 0);
 783        SET_V2PCFG(iommu->base, 0, 1);
 784        SET_V2PPR(iommu->base, 0, 0);
 785        par = GET_PAR(iommu->base, 0);
 786        SET_V2PCFG(iommu->base, 0, 0);
 787        SET_M(iommu->base, 0, 0);
 788
 789        if (!par) {
 790                pr_err("Invalid PAR value detected\n");
 791                ret = -ENODEV;
 792                goto fail;
 793        }
 794
 795        ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
 796                                        msm_iommu_fault_handler,
 797                                        IRQF_ONESHOT | IRQF_SHARED,
 798                                        "msm_iommu_secure_irpt_handler",
 799                                        iommu);
 800        if (ret) {
 801                pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
 802                goto fail;
 803        }
 804
 805        list_add(&iommu->dev_node, &qcom_iommu_devices);
 806
 807        ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
 808                                     "msm-smmu.%pa", &ioaddr);
 809        if (ret) {
 810                pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
 811                goto fail;
 812        }
 813
 814        iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
 815        iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
 816
 817        ret = iommu_device_register(&iommu->iommu);
 818        if (ret) {
 819                pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
 820                goto fail;
 821        }
 822
 823        bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
 824
 825        pr_info("device mapped at %p, irq %d with %d ctx banks\n",
 826                iommu->base, iommu->irq, iommu->ncb);
 827
 828        return ret;
 829fail:
 830        clk_unprepare(iommu->clk);
 831        clk_unprepare(iommu->pclk);
 832        return ret;
 833}
 834
 835static const struct of_device_id msm_iommu_dt_match[] = {
 836        { .compatible = "qcom,apq8064-iommu" },
 837        {}
 838};
 839
 840static int msm_iommu_remove(struct platform_device *pdev)
 841{
 842        struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
 843
 844        clk_unprepare(iommu->clk);
 845        clk_unprepare(iommu->pclk);
 846        return 0;
 847}
 848
 849static struct platform_driver msm_iommu_driver = {
 850        .driver = {
 851                .name   = "msm_iommu",
 852                .of_match_table = msm_iommu_dt_match,
 853        },
 854        .probe          = msm_iommu_probe,
 855        .remove         = msm_iommu_remove,
 856};
 857
 858static int __init msm_iommu_driver_init(void)
 859{
 860        int ret;
 861
 862        ret = platform_driver_register(&msm_iommu_driver);
 863        if (ret != 0)
 864                pr_err("Failed to register IOMMU driver\n");
 865
 866        return ret;
 867}
 868subsys_initcall(msm_iommu_driver_init);
 869
 870