linux/drivers/iommu/msm_iommu.c
<<
>>
Prefs
   1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License version 2 and
   5 * only version 2 as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15 * 02110-1301, USA.
  16 */
  17
  18#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/platform_device.h>
  22#include <linux/errno.h>
  23#include <linux/io.h>
  24#include <linux/interrupt.h>
  25#include <linux/list.h>
  26#include <linux/spinlock.h>
  27#include <linux/slab.h>
  28#include <linux/iommu.h>
  29#include <linux/clk.h>
  30#include <linux/err.h>
  31#include <linux/of_iommu.h>
  32
  33#include <asm/cacheflush.h>
  34#include <asm/sizes.h>
  35
  36#include "msm_iommu_hw-8xxx.h"
  37#include "msm_iommu.h"
  38#include "io-pgtable.h"
  39
  40#define MRC(reg, processor, op1, crn, crm, op2)                         \
  41__asm__ __volatile__ (                                                  \
  42"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
  43: "=r" (reg))
  44
  45/* bitmap of the page sizes currently supported */
  46#define MSM_IOMMU_PGSIZES       (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  47
  48DEFINE_SPINLOCK(msm_iommu_lock);
  49static LIST_HEAD(qcom_iommu_devices);
  50static struct iommu_ops msm_iommu_ops;
  51
  52struct msm_priv {
  53        struct list_head list_attached;
  54        struct iommu_domain domain;
  55        struct io_pgtable_cfg   cfg;
  56        struct io_pgtable_ops   *iop;
  57        struct device           *dev;
  58        spinlock_t              pgtlock; /* pagetable lock */
  59};
  60
  61static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  62{
  63        return container_of(dom, struct msm_priv, domain);
  64}
  65
  66static int __enable_clocks(struct msm_iommu_dev *iommu)
  67{
  68        int ret;
  69
  70        ret = clk_enable(iommu->pclk);
  71        if (ret)
  72                goto fail;
  73
  74        if (iommu->clk) {
  75                ret = clk_enable(iommu->clk);
  76                if (ret)
  77                        clk_disable(iommu->pclk);
  78        }
  79fail:
  80        return ret;
  81}
  82
  83static void __disable_clocks(struct msm_iommu_dev *iommu)
  84{
  85        if (iommu->clk)
  86                clk_disable(iommu->clk);
  87        clk_disable(iommu->pclk);
  88}
  89
  90static void msm_iommu_reset(void __iomem *base, int ncb)
  91{
  92        int ctx;
  93
  94        SET_RPUE(base, 0);
  95        SET_RPUEIE(base, 0);
  96        SET_ESRRESTORE(base, 0);
  97        SET_TBE(base, 0);
  98        SET_CR(base, 0);
  99        SET_SPDMBE(base, 0);
 100        SET_TESTBUSCR(base, 0);
 101        SET_TLBRSW(base, 0);
 102        SET_GLOBAL_TLBIALL(base, 0);
 103        SET_RPU_ACR(base, 0);
 104        SET_TLBLKCRWE(base, 1);
 105
 106        for (ctx = 0; ctx < ncb; ctx++) {
 107                SET_BPRCOSH(base, ctx, 0);
 108                SET_BPRCISH(base, ctx, 0);
 109                SET_BPRCNSH(base, ctx, 0);
 110                SET_BPSHCFG(base, ctx, 0);
 111                SET_BPMTCFG(base, ctx, 0);
 112                SET_ACTLR(base, ctx, 0);
 113                SET_SCTLR(base, ctx, 0);
 114                SET_FSRRESTORE(base, ctx, 0);
 115                SET_TTBR0(base, ctx, 0);
 116                SET_TTBR1(base, ctx, 0);
 117                SET_TTBCR(base, ctx, 0);
 118                SET_BFBCR(base, ctx, 0);
 119                SET_PAR(base, ctx, 0);
 120                SET_FAR(base, ctx, 0);
 121                SET_CTX_TLBIALL(base, ctx, 0);
 122                SET_TLBFLPTER(base, ctx, 0);
 123                SET_TLBSLPTER(base, ctx, 0);
 124                SET_TLBLKCR(base, ctx, 0);
 125                SET_CONTEXTIDR(base, ctx, 0);
 126        }
 127}
 128
 129static void __flush_iotlb(void *cookie)
 130{
 131        struct msm_priv *priv = cookie;
 132        struct msm_iommu_dev *iommu = NULL;
 133        struct msm_iommu_ctx_dev *master;
 134        int ret = 0;
 135
 136        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 137                ret = __enable_clocks(iommu);
 138                if (ret)
 139                        goto fail;
 140
 141                list_for_each_entry(master, &iommu->ctx_list, list)
 142                        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 143
 144                __disable_clocks(iommu);
 145        }
 146fail:
 147        return;
 148}
 149
 150static void __flush_iotlb_range(unsigned long iova, size_t size,
 151                                size_t granule, bool leaf, void *cookie)
 152{
 153        struct msm_priv *priv = cookie;
 154        struct msm_iommu_dev *iommu = NULL;
 155        struct msm_iommu_ctx_dev *master;
 156        int ret = 0;
 157        int temp_size;
 158
 159        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 160                ret = __enable_clocks(iommu);
 161                if (ret)
 162                        goto fail;
 163
 164                list_for_each_entry(master, &iommu->ctx_list, list) {
 165                        temp_size = size;
 166                        do {
 167                                iova &= TLBIVA_VA;
 168                                iova |= GET_CONTEXTIDR_ASID(iommu->base,
 169                                                            master->num);
 170                                SET_TLBIVA(iommu->base, master->num, iova);
 171                                iova += granule;
 172                        } while (temp_size -= granule);
 173                }
 174
 175                __disable_clocks(iommu);
 176        }
 177
 178fail:
 179        return;
 180}
 181
 182static void __flush_iotlb_sync(void *cookie)
 183{
 184        /*
 185         * Nothing is needed here, the barrier to guarantee
 186         * completion of the tlb sync operation is implicitly
 187         * taken care when the iommu client does a writel before
 188         * kick starting the other master.
 189         */
 190}
 191
 192static const struct iommu_gather_ops msm_iommu_gather_ops = {
 193        .tlb_flush_all = __flush_iotlb,
 194        .tlb_add_flush = __flush_iotlb_range,
 195        .tlb_sync = __flush_iotlb_sync,
 196};
 197
 198static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
 199{
 200        int idx;
 201
 202        do {
 203                idx = find_next_zero_bit(map, end, start);
 204                if (idx == end)
 205                        return -ENOSPC;
 206        } while (test_and_set_bit(idx, map));
 207
 208        return idx;
 209}
 210
 211static void msm_iommu_free_ctx(unsigned long *map, int idx)
 212{
 213        clear_bit(idx, map);
 214}
 215
 216static void config_mids(struct msm_iommu_dev *iommu,
 217                        struct msm_iommu_ctx_dev *master)
 218{
 219        int mid, ctx, i;
 220
 221        for (i = 0; i < master->num_mids; i++) {
 222                mid = master->mids[i];
 223                ctx = master->num;
 224
 225                SET_M2VCBR_N(iommu->base, mid, 0);
 226                SET_CBACR_N(iommu->base, ctx, 0);
 227
 228                /* Set VMID = 0 */
 229                SET_VMID(iommu->base, mid, 0);
 230
 231                /* Set the context number for that MID to this context */
 232                SET_CBNDX(iommu->base, mid, ctx);
 233
 234                /* Set MID associated with this context bank to 0*/
 235                SET_CBVMID(iommu->base, ctx, 0);
 236
 237                /* Set the ASID for TLB tagging for this context */
 238                SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
 239
 240                /* Set security bit override to be Non-secure */
 241                SET_NSCFG(iommu->base, mid, 3);
 242        }
 243}
 244
 245static void __reset_context(void __iomem *base, int ctx)
 246{
 247        SET_BPRCOSH(base, ctx, 0);
 248        SET_BPRCISH(base, ctx, 0);
 249        SET_BPRCNSH(base, ctx, 0);
 250        SET_BPSHCFG(base, ctx, 0);
 251        SET_BPMTCFG(base, ctx, 0);
 252        SET_ACTLR(base, ctx, 0);
 253        SET_SCTLR(base, ctx, 0);
 254        SET_FSRRESTORE(base, ctx, 0);
 255        SET_TTBR0(base, ctx, 0);
 256        SET_TTBR1(base, ctx, 0);
 257        SET_TTBCR(base, ctx, 0);
 258        SET_BFBCR(base, ctx, 0);
 259        SET_PAR(base, ctx, 0);
 260        SET_FAR(base, ctx, 0);
 261        SET_CTX_TLBIALL(base, ctx, 0);
 262        SET_TLBFLPTER(base, ctx, 0);
 263        SET_TLBSLPTER(base, ctx, 0);
 264        SET_TLBLKCR(base, ctx, 0);
 265}
 266
 267static void __program_context(void __iomem *base, int ctx,
 268                              struct msm_priv *priv)
 269{
 270        __reset_context(base, ctx);
 271
 272        /* Turn on TEX Remap */
 273        SET_TRE(base, ctx, 1);
 274        SET_AFE(base, ctx, 1);
 275
 276        /* Set up HTW mode */
 277        /* TLB miss configuration: perform HTW on miss */
 278        SET_TLBMCFG(base, ctx, 0x3);
 279
 280        /* V2P configuration: HTW for access */
 281        SET_V2PCFG(base, ctx, 0x3);
 282
 283        SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
 284        SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
 285        SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
 286
 287        /* Set prrr and nmrr */
 288        SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
 289        SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
 290
 291        /* Invalidate the TLB for this context */
 292        SET_CTX_TLBIALL(base, ctx, 0);
 293
 294        /* Set interrupt number to "secure" interrupt */
 295        SET_IRPTNDX(base, ctx, 0);
 296
 297        /* Enable context fault interrupt */
 298        SET_CFEIE(base, ctx, 1);
 299
 300        /* Stall access on a context fault and let the handler deal with it */
 301        SET_CFCFG(base, ctx, 1);
 302
 303        /* Redirect all cacheable requests to L2 slave port. */
 304        SET_RCISH(base, ctx, 1);
 305        SET_RCOSH(base, ctx, 1);
 306        SET_RCNSH(base, ctx, 1);
 307
 308        /* Turn on BFB prefetch */
 309        SET_BFBDFE(base, ctx, 1);
 310
 311        /* Enable the MMU */
 312        SET_M(base, ctx, 1);
 313}
 314
 315static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
 316{
 317        struct msm_priv *priv;
 318
 319        if (type != IOMMU_DOMAIN_UNMANAGED)
 320                return NULL;
 321
 322        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 323        if (!priv)
 324                goto fail_nomem;
 325
 326        INIT_LIST_HEAD(&priv->list_attached);
 327
 328        priv->domain.geometry.aperture_start = 0;
 329        priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
 330        priv->domain.geometry.force_aperture = true;
 331
 332        return &priv->domain;
 333
 334fail_nomem:
 335        kfree(priv);
 336        return NULL;
 337}
 338
 339static void msm_iommu_domain_free(struct iommu_domain *domain)
 340{
 341        struct msm_priv *priv;
 342        unsigned long flags;
 343
 344        spin_lock_irqsave(&msm_iommu_lock, flags);
 345        priv = to_msm_priv(domain);
 346        kfree(priv);
 347        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 348}
 349
 350static int msm_iommu_domain_config(struct msm_priv *priv)
 351{
 352        spin_lock_init(&priv->pgtlock);
 353
 354        priv->cfg = (struct io_pgtable_cfg) {
 355                .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
 356                .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
 357                .ias = 32,
 358                .oas = 32,
 359                .tlb = &msm_iommu_gather_ops,
 360                .iommu_dev = priv->dev,
 361        };
 362
 363        priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
 364        if (!priv->iop) {
 365                dev_err(priv->dev, "Failed to allocate pgtable\n");
 366                return -EINVAL;
 367        }
 368
 369        msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
 370
 371        return 0;
 372}
 373
 374/* Must be called under msm_iommu_lock */
 375static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
 376{
 377        struct msm_iommu_dev *iommu, *ret = NULL;
 378        struct msm_iommu_ctx_dev *master;
 379
 380        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 381                master = list_first_entry(&iommu->ctx_list,
 382                                          struct msm_iommu_ctx_dev,
 383                                          list);
 384                if (master->of_node == dev->of_node) {
 385                        ret = iommu;
 386                        break;
 387                }
 388        }
 389
 390        return ret;
 391}
 392
 393static int msm_iommu_add_device(struct device *dev)
 394{
 395        struct msm_iommu_dev *iommu;
 396        unsigned long flags;
 397        int ret = 0;
 398
 399        spin_lock_irqsave(&msm_iommu_lock, flags);
 400
 401        iommu = find_iommu_for_dev(dev);
 402        if (iommu)
 403                iommu_device_link(&iommu->iommu, dev);
 404        else
 405                ret = -ENODEV;
 406
 407        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 408
 409        return ret;
 410}
 411
 412static void msm_iommu_remove_device(struct device *dev)
 413{
 414        struct msm_iommu_dev *iommu;
 415        unsigned long flags;
 416
 417        spin_lock_irqsave(&msm_iommu_lock, flags);
 418
 419        iommu = find_iommu_for_dev(dev);
 420        if (iommu)
 421                iommu_device_unlink(&iommu->iommu, dev);
 422
 423        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 424}
 425
 426static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 427{
 428        int ret = 0;
 429        unsigned long flags;
 430        struct msm_iommu_dev *iommu;
 431        struct msm_priv *priv = to_msm_priv(domain);
 432        struct msm_iommu_ctx_dev *master;
 433
 434        priv->dev = dev;
 435        msm_iommu_domain_config(priv);
 436
 437        spin_lock_irqsave(&msm_iommu_lock, flags);
 438        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 439                master = list_first_entry(&iommu->ctx_list,
 440                                          struct msm_iommu_ctx_dev,
 441                                          list);
 442                if (master->of_node == dev->of_node) {
 443                        ret = __enable_clocks(iommu);
 444                        if (ret)
 445                                goto fail;
 446
 447                        list_for_each_entry(master, &iommu->ctx_list, list) {
 448                                if (master->num) {
 449                                        dev_err(dev, "domain already attached");
 450                                        ret = -EEXIST;
 451                                        goto fail;
 452                                }
 453                                master->num =
 454                                        msm_iommu_alloc_ctx(iommu->context_map,
 455                                                            0, iommu->ncb);
 456                                        if (IS_ERR_VALUE(master->num)) {
 457                                                ret = -ENODEV;
 458                                                goto fail;
 459                                        }
 460                                config_mids(iommu, master);
 461                                __program_context(iommu->base, master->num,
 462                                                  priv);
 463                        }
 464                        __disable_clocks(iommu);
 465                        list_add(&iommu->dom_node, &priv->list_attached);
 466                }
 467        }
 468
 469fail:
 470        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 471
 472        return ret;
 473}
 474
 475static void msm_iommu_detach_dev(struct iommu_domain *domain,
 476                                 struct device *dev)
 477{
 478        struct msm_priv *priv = to_msm_priv(domain);
 479        unsigned long flags;
 480        struct msm_iommu_dev *iommu;
 481        struct msm_iommu_ctx_dev *master;
 482        int ret;
 483
 484        free_io_pgtable_ops(priv->iop);
 485
 486        spin_lock_irqsave(&msm_iommu_lock, flags);
 487        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 488                ret = __enable_clocks(iommu);
 489                if (ret)
 490                        goto fail;
 491
 492                list_for_each_entry(master, &iommu->ctx_list, list) {
 493                        msm_iommu_free_ctx(iommu->context_map, master->num);
 494                        __reset_context(iommu->base, master->num);
 495                }
 496                __disable_clocks(iommu);
 497        }
 498fail:
 499        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 500}
 501
 502static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
 503                         phys_addr_t pa, size_t len, int prot)
 504{
 505        struct msm_priv *priv = to_msm_priv(domain);
 506        unsigned long flags;
 507        int ret;
 508
 509        spin_lock_irqsave(&priv->pgtlock, flags);
 510        ret = priv->iop->map(priv->iop, iova, pa, len, prot);
 511        spin_unlock_irqrestore(&priv->pgtlock, flags);
 512
 513        return ret;
 514}
 515
 516static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 517                              size_t len)
 518{
 519        struct msm_priv *priv = to_msm_priv(domain);
 520        unsigned long flags;
 521
 522        spin_lock_irqsave(&priv->pgtlock, flags);
 523        len = priv->iop->unmap(priv->iop, iova, len);
 524        spin_unlock_irqrestore(&priv->pgtlock, flags);
 525
 526        return len;
 527}
 528
 529static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 530                                          dma_addr_t va)
 531{
 532        struct msm_priv *priv;
 533        struct msm_iommu_dev *iommu;
 534        struct msm_iommu_ctx_dev *master;
 535        unsigned int par;
 536        unsigned long flags;
 537        phys_addr_t ret = 0;
 538
 539        spin_lock_irqsave(&msm_iommu_lock, flags);
 540
 541        priv = to_msm_priv(domain);
 542        iommu = list_first_entry(&priv->list_attached,
 543                                 struct msm_iommu_dev, dom_node);
 544
 545        if (list_empty(&iommu->ctx_list))
 546                goto fail;
 547
 548        master = list_first_entry(&iommu->ctx_list,
 549                                  struct msm_iommu_ctx_dev, list);
 550        if (!master)
 551                goto fail;
 552
 553        ret = __enable_clocks(iommu);
 554        if (ret)
 555                goto fail;
 556
 557        /* Invalidate context TLB */
 558        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 559        SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
 560
 561        par = GET_PAR(iommu->base, master->num);
 562
 563        /* We are dealing with a supersection */
 564        if (GET_NOFAULT_SS(iommu->base, master->num))
 565                ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
 566        else    /* Upper 20 bits from PAR, lower 12 from VA */
 567                ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
 568
 569        if (GET_FAULT(iommu->base, master->num))
 570                ret = 0;
 571
 572        __disable_clocks(iommu);
 573fail:
 574        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 575        return ret;
 576}
 577
 578static bool msm_iommu_capable(enum iommu_cap cap)
 579{
 580        return false;
 581}
 582
 583static void print_ctx_regs(void __iomem *base, int ctx)
 584{
 585        unsigned int fsr = GET_FSR(base, ctx);
 586        pr_err("FAR    = %08x    PAR    = %08x\n",
 587               GET_FAR(base, ctx), GET_PAR(base, ctx));
 588        pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
 589                        (fsr & 0x02) ? "TF " : "",
 590                        (fsr & 0x04) ? "AFF " : "",
 591                        (fsr & 0x08) ? "APF " : "",
 592                        (fsr & 0x10) ? "TLBMF " : "",
 593                        (fsr & 0x20) ? "HTWDEEF " : "",
 594                        (fsr & 0x40) ? "HTWSEEF " : "",
 595                        (fsr & 0x80) ? "MHF " : "",
 596                        (fsr & 0x10000) ? "SL " : "",
 597                        (fsr & 0x40000000) ? "SS " : "",
 598                        (fsr & 0x80000000) ? "MULTI " : "");
 599
 600        pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
 601               GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
 602        pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
 603               GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
 604        pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
 605               GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
 606}
 607
 608static void insert_iommu_master(struct device *dev,
 609                                struct msm_iommu_dev **iommu,
 610                                struct of_phandle_args *spec)
 611{
 612        struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
 613        int sid;
 614
 615        if (list_empty(&(*iommu)->ctx_list)) {
 616                master = kzalloc(sizeof(*master), GFP_ATOMIC);
 617                master->of_node = dev->of_node;
 618                list_add(&master->list, &(*iommu)->ctx_list);
 619                dev->archdata.iommu = master;
 620        }
 621
 622        for (sid = 0; sid < master->num_mids; sid++)
 623                if (master->mids[sid] == spec->args[0]) {
 624                        dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
 625                                 sid);
 626                        return;
 627                }
 628
 629        master->mids[master->num_mids++] = spec->args[0];
 630}
 631
 632static int qcom_iommu_of_xlate(struct device *dev,
 633                               struct of_phandle_args *spec)
 634{
 635        struct msm_iommu_dev *iommu;
 636        unsigned long flags;
 637        int ret = 0;
 638
 639        spin_lock_irqsave(&msm_iommu_lock, flags);
 640        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
 641                if (iommu->dev->of_node == spec->np)
 642                        break;
 643
 644        if (!iommu || iommu->dev->of_node != spec->np) {
 645                ret = -ENODEV;
 646                goto fail;
 647        }
 648
 649        insert_iommu_master(dev, &iommu, spec);
 650fail:
 651        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 652
 653        return ret;
 654}
 655
 656irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
 657{
 658        struct msm_iommu_dev *iommu = dev_id;
 659        unsigned int fsr;
 660        int i, ret;
 661
 662        spin_lock(&msm_iommu_lock);
 663
 664        if (!iommu) {
 665                pr_err("Invalid device ID in context interrupt handler\n");
 666                goto fail;
 667        }
 668
 669        pr_err("Unexpected IOMMU page fault!\n");
 670        pr_err("base = %08x\n", (unsigned int)iommu->base);
 671
 672        ret = __enable_clocks(iommu);
 673        if (ret)
 674                goto fail;
 675
 676        for (i = 0; i < iommu->ncb; i++) {
 677                fsr = GET_FSR(iommu->base, i);
 678                if (fsr) {
 679                        pr_err("Fault occurred in context %d.\n", i);
 680                        pr_err("Interesting registers:\n");
 681                        print_ctx_regs(iommu->base, i);
 682                        SET_FSR(iommu->base, i, 0x4000000F);
 683                }
 684        }
 685        __disable_clocks(iommu);
 686fail:
 687        spin_unlock(&msm_iommu_lock);
 688        return 0;
 689}
 690
 691static struct iommu_ops msm_iommu_ops = {
 692        .capable = msm_iommu_capable,
 693        .domain_alloc = msm_iommu_domain_alloc,
 694        .domain_free = msm_iommu_domain_free,
 695        .attach_dev = msm_iommu_attach_dev,
 696        .detach_dev = msm_iommu_detach_dev,
 697        .map = msm_iommu_map,
 698        .unmap = msm_iommu_unmap,
 699        .map_sg = default_iommu_map_sg,
 700        .iova_to_phys = msm_iommu_iova_to_phys,
 701        .add_device = msm_iommu_add_device,
 702        .remove_device = msm_iommu_remove_device,
 703        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
 704        .of_xlate = qcom_iommu_of_xlate,
 705};
 706
 707static int msm_iommu_probe(struct platform_device *pdev)
 708{
 709        struct resource *r;
 710        resource_size_t ioaddr;
 711        struct msm_iommu_dev *iommu;
 712        int ret, par, val;
 713
 714        iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
 715        if (!iommu)
 716                return -ENODEV;
 717
 718        iommu->dev = &pdev->dev;
 719        INIT_LIST_HEAD(&iommu->ctx_list);
 720
 721        iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
 722        if (IS_ERR(iommu->pclk)) {
 723                dev_err(iommu->dev, "could not get smmu_pclk\n");
 724                return PTR_ERR(iommu->pclk);
 725        }
 726
 727        ret = clk_prepare(iommu->pclk);
 728        if (ret) {
 729                dev_err(iommu->dev, "could not prepare smmu_pclk\n");
 730                return ret;
 731        }
 732
 733        iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
 734        if (IS_ERR(iommu->clk)) {
 735                dev_err(iommu->dev, "could not get iommu_clk\n");
 736                clk_unprepare(iommu->pclk);
 737                return PTR_ERR(iommu->clk);
 738        }
 739
 740        ret = clk_prepare(iommu->clk);
 741        if (ret) {
 742                dev_err(iommu->dev, "could not prepare iommu_clk\n");
 743                clk_unprepare(iommu->pclk);
 744                return ret;
 745        }
 746
 747        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 748        iommu->base = devm_ioremap_resource(iommu->dev, r);
 749        if (IS_ERR(iommu->base)) {
 750                dev_err(iommu->dev, "could not get iommu base\n");
 751                ret = PTR_ERR(iommu->base);
 752                goto fail;
 753        }
 754        ioaddr = r->start;
 755
 756        iommu->irq = platform_get_irq(pdev, 0);
 757        if (iommu->irq < 0) {
 758                dev_err(iommu->dev, "could not get iommu irq\n");
 759                ret = -ENODEV;
 760                goto fail;
 761        }
 762
 763        ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
 764        if (ret) {
 765                dev_err(iommu->dev, "could not get ncb\n");
 766                goto fail;
 767        }
 768        iommu->ncb = val;
 769
 770        msm_iommu_reset(iommu->base, iommu->ncb);
 771        SET_M(iommu->base, 0, 1);
 772        SET_PAR(iommu->base, 0, 0);
 773        SET_V2PCFG(iommu->base, 0, 1);
 774        SET_V2PPR(iommu->base, 0, 0);
 775        par = GET_PAR(iommu->base, 0);
 776        SET_V2PCFG(iommu->base, 0, 0);
 777        SET_M(iommu->base, 0, 0);
 778
 779        if (!par) {
 780                pr_err("Invalid PAR value detected\n");
 781                ret = -ENODEV;
 782                goto fail;
 783        }
 784
 785        ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
 786                                        msm_iommu_fault_handler,
 787                                        IRQF_ONESHOT | IRQF_SHARED,
 788                                        "msm_iommu_secure_irpt_handler",
 789                                        iommu);
 790        if (ret) {
 791                pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
 792                goto fail;
 793        }
 794
 795        list_add(&iommu->dev_node, &qcom_iommu_devices);
 796
 797        ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
 798                                     "msm-smmu.%pa", &ioaddr);
 799        if (ret) {
 800                pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
 801                goto fail;
 802        }
 803
 804        iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
 805        iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
 806
 807        ret = iommu_device_register(&iommu->iommu);
 808        if (ret) {
 809                pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
 810                goto fail;
 811        }
 812
 813        pr_info("device mapped at %p, irq %d with %d ctx banks\n",
 814                iommu->base, iommu->irq, iommu->ncb);
 815
 816        return ret;
 817fail:
 818        clk_unprepare(iommu->clk);
 819        clk_unprepare(iommu->pclk);
 820        return ret;
 821}
 822
 823static const struct of_device_id msm_iommu_dt_match[] = {
 824        { .compatible = "qcom,apq8064-iommu" },
 825        {}
 826};
 827
 828static int msm_iommu_remove(struct platform_device *pdev)
 829{
 830        struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
 831
 832        clk_unprepare(iommu->clk);
 833        clk_unprepare(iommu->pclk);
 834        return 0;
 835}
 836
 837static struct platform_driver msm_iommu_driver = {
 838        .driver = {
 839                .name   = "msm_iommu",
 840                .of_match_table = msm_iommu_dt_match,
 841        },
 842        .probe          = msm_iommu_probe,
 843        .remove         = msm_iommu_remove,
 844};
 845
 846static int __init msm_iommu_driver_init(void)
 847{
 848        int ret;
 849
 850        ret = platform_driver_register(&msm_iommu_driver);
 851        if (ret != 0)
 852                pr_err("Failed to register IOMMU driver\n");
 853
 854        return ret;
 855}
 856
 857static void __exit msm_iommu_driver_exit(void)
 858{
 859        platform_driver_unregister(&msm_iommu_driver);
 860}
 861
 862subsys_initcall(msm_iommu_driver_init);
 863module_exit(msm_iommu_driver_exit);
 864
 865static int __init msm_iommu_init(void)
 866{
 867        bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
 868        return 0;
 869}
 870
 871static int __init msm_iommu_of_setup(struct device_node *np)
 872{
 873        msm_iommu_init();
 874        return 0;
 875}
 876
 877IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup);
 878
 879MODULE_LICENSE("GPL v2");
 880MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
 881