linux/drivers/iommu/msm_iommu.c
<<
>>
Prefs
   1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License version 2 and
   5 * only version 2 as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15 * 02110-1301, USA.
  16 */
  17
  18#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/platform_device.h>
  22#include <linux/errno.h>
  23#include <linux/io.h>
  24#include <linux/interrupt.h>
  25#include <linux/list.h>
  26#include <linux/spinlock.h>
  27#include <linux/slab.h>
  28#include <linux/iommu.h>
  29#include <linux/clk.h>
  30#include <linux/err.h>
  31#include <linux/of_iommu.h>
  32
  33#include <asm/cacheflush.h>
  34#include <asm/sizes.h>
  35
  36#include "msm_iommu_hw-8xxx.h"
  37#include "msm_iommu.h"
  38#include "io-pgtable.h"
  39
  40#define MRC(reg, processor, op1, crn, crm, op2)                         \
  41__asm__ __volatile__ (                                                  \
  42"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
  43: "=r" (reg))
  44
  45/* bitmap of the page sizes currently supported */
  46#define MSM_IOMMU_PGSIZES       (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  47
  48DEFINE_SPINLOCK(msm_iommu_lock);
  49static LIST_HEAD(qcom_iommu_devices);
  50static struct iommu_ops msm_iommu_ops;
  51
  52struct msm_priv {
  53        struct list_head list_attached;
  54        struct iommu_domain domain;
  55        struct io_pgtable_cfg   cfg;
  56        struct io_pgtable_ops   *iop;
  57        struct device           *dev;
  58        spinlock_t              pgtlock; /* pagetable lock */
  59};
  60
  61static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  62{
  63        return container_of(dom, struct msm_priv, domain);
  64}
  65
  66static int __enable_clocks(struct msm_iommu_dev *iommu)
  67{
  68        int ret;
  69
  70        ret = clk_enable(iommu->pclk);
  71        if (ret)
  72                goto fail;
  73
  74        if (iommu->clk) {
  75                ret = clk_enable(iommu->clk);
  76                if (ret)
  77                        clk_disable(iommu->pclk);
  78        }
  79fail:
  80        return ret;
  81}
  82
  83static void __disable_clocks(struct msm_iommu_dev *iommu)
  84{
  85        if (iommu->clk)
  86                clk_disable(iommu->clk);
  87        clk_disable(iommu->pclk);
  88}
  89
  90static void msm_iommu_reset(void __iomem *base, int ncb)
  91{
  92        int ctx;
  93
  94        SET_RPUE(base, 0);
  95        SET_RPUEIE(base, 0);
  96        SET_ESRRESTORE(base, 0);
  97        SET_TBE(base, 0);
  98        SET_CR(base, 0);
  99        SET_SPDMBE(base, 0);
 100        SET_TESTBUSCR(base, 0);
 101        SET_TLBRSW(base, 0);
 102        SET_GLOBAL_TLBIALL(base, 0);
 103        SET_RPU_ACR(base, 0);
 104        SET_TLBLKCRWE(base, 1);
 105
 106        for (ctx = 0; ctx < ncb; ctx++) {
 107                SET_BPRCOSH(base, ctx, 0);
 108                SET_BPRCISH(base, ctx, 0);
 109                SET_BPRCNSH(base, ctx, 0);
 110                SET_BPSHCFG(base, ctx, 0);
 111                SET_BPMTCFG(base, ctx, 0);
 112                SET_ACTLR(base, ctx, 0);
 113                SET_SCTLR(base, ctx, 0);
 114                SET_FSRRESTORE(base, ctx, 0);
 115                SET_TTBR0(base, ctx, 0);
 116                SET_TTBR1(base, ctx, 0);
 117                SET_TTBCR(base, ctx, 0);
 118                SET_BFBCR(base, ctx, 0);
 119                SET_PAR(base, ctx, 0);
 120                SET_FAR(base, ctx, 0);
 121                SET_CTX_TLBIALL(base, ctx, 0);
 122                SET_TLBFLPTER(base, ctx, 0);
 123                SET_TLBSLPTER(base, ctx, 0);
 124                SET_TLBLKCR(base, ctx, 0);
 125                SET_CONTEXTIDR(base, ctx, 0);
 126        }
 127}
 128
 129static void __flush_iotlb(void *cookie)
 130{
 131        struct msm_priv *priv = cookie;
 132        struct msm_iommu_dev *iommu = NULL;
 133        struct msm_iommu_ctx_dev *master;
 134        int ret = 0;
 135
 136        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 137                ret = __enable_clocks(iommu);
 138                if (ret)
 139                        goto fail;
 140
 141                list_for_each_entry(master, &iommu->ctx_list, list)
 142                        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 143
 144                __disable_clocks(iommu);
 145        }
 146fail:
 147        return;
 148}
 149
 150static void __flush_iotlb_range(unsigned long iova, size_t size,
 151                                size_t granule, bool leaf, void *cookie)
 152{
 153        struct msm_priv *priv = cookie;
 154        struct msm_iommu_dev *iommu = NULL;
 155        struct msm_iommu_ctx_dev *master;
 156        int ret = 0;
 157        int temp_size;
 158
 159        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 160                ret = __enable_clocks(iommu);
 161                if (ret)
 162                        goto fail;
 163
 164                list_for_each_entry(master, &iommu->ctx_list, list) {
 165                        temp_size = size;
 166                        do {
 167                                iova &= TLBIVA_VA;
 168                                iova |= GET_CONTEXTIDR_ASID(iommu->base,
 169                                                            master->num);
 170                                SET_TLBIVA(iommu->base, master->num, iova);
 171                                iova += granule;
 172                        } while (temp_size -= granule);
 173                }
 174
 175                __disable_clocks(iommu);
 176        }
 177
 178fail:
 179        return;
 180}
 181
 182static void __flush_iotlb_sync(void *cookie)
 183{
 184        /*
 185         * Nothing is needed here, the barrier to guarantee
 186         * completion of the tlb sync operation is implicitly
 187         * taken care when the iommu client does a writel before
 188         * kick starting the other master.
 189         */
 190}
 191
 192static const struct iommu_gather_ops msm_iommu_gather_ops = {
 193        .tlb_flush_all = __flush_iotlb,
 194        .tlb_add_flush = __flush_iotlb_range,
 195        .tlb_sync = __flush_iotlb_sync,
 196};
 197
 198static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
 199{
 200        int idx;
 201
 202        do {
 203                idx = find_next_zero_bit(map, end, start);
 204                if (idx == end)
 205                        return -ENOSPC;
 206        } while (test_and_set_bit(idx, map));
 207
 208        return idx;
 209}
 210
 211static void msm_iommu_free_ctx(unsigned long *map, int idx)
 212{
 213        clear_bit(idx, map);
 214}
 215
 216static void config_mids(struct msm_iommu_dev *iommu,
 217                        struct msm_iommu_ctx_dev *master)
 218{
 219        int mid, ctx, i;
 220
 221        for (i = 0; i < master->num_mids; i++) {
 222                mid = master->mids[i];
 223                ctx = master->num;
 224
 225                SET_M2VCBR_N(iommu->base, mid, 0);
 226                SET_CBACR_N(iommu->base, ctx, 0);
 227
 228                /* Set VMID = 0 */
 229                SET_VMID(iommu->base, mid, 0);
 230
 231                /* Set the context number for that MID to this context */
 232                SET_CBNDX(iommu->base, mid, ctx);
 233
 234                /* Set MID associated with this context bank to 0*/
 235                SET_CBVMID(iommu->base, ctx, 0);
 236
 237                /* Set the ASID for TLB tagging for this context */
 238                SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
 239
 240                /* Set security bit override to be Non-secure */
 241                SET_NSCFG(iommu->base, mid, 3);
 242        }
 243}
 244
 245static void __reset_context(void __iomem *base, int ctx)
 246{
 247        SET_BPRCOSH(base, ctx, 0);
 248        SET_BPRCISH(base, ctx, 0);
 249        SET_BPRCNSH(base, ctx, 0);
 250        SET_BPSHCFG(base, ctx, 0);
 251        SET_BPMTCFG(base, ctx, 0);
 252        SET_ACTLR(base, ctx, 0);
 253        SET_SCTLR(base, ctx, 0);
 254        SET_FSRRESTORE(base, ctx, 0);
 255        SET_TTBR0(base, ctx, 0);
 256        SET_TTBR1(base, ctx, 0);
 257        SET_TTBCR(base, ctx, 0);
 258        SET_BFBCR(base, ctx, 0);
 259        SET_PAR(base, ctx, 0);
 260        SET_FAR(base, ctx, 0);
 261        SET_CTX_TLBIALL(base, ctx, 0);
 262        SET_TLBFLPTER(base, ctx, 0);
 263        SET_TLBSLPTER(base, ctx, 0);
 264        SET_TLBLKCR(base, ctx, 0);
 265}
 266
 267static void __program_context(void __iomem *base, int ctx,
 268                              struct msm_priv *priv)
 269{
 270        __reset_context(base, ctx);
 271
 272        /* Turn on TEX Remap */
 273        SET_TRE(base, ctx, 1);
 274        SET_AFE(base, ctx, 1);
 275
 276        /* Set up HTW mode */
 277        /* TLB miss configuration: perform HTW on miss */
 278        SET_TLBMCFG(base, ctx, 0x3);
 279
 280        /* V2P configuration: HTW for access */
 281        SET_V2PCFG(base, ctx, 0x3);
 282
 283        SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
 284        SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
 285        SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
 286
 287        /* Set prrr and nmrr */
 288        SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
 289        SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
 290
 291        /* Invalidate the TLB for this context */
 292        SET_CTX_TLBIALL(base, ctx, 0);
 293
 294        /* Set interrupt number to "secure" interrupt */
 295        SET_IRPTNDX(base, ctx, 0);
 296
 297        /* Enable context fault interrupt */
 298        SET_CFEIE(base, ctx, 1);
 299
 300        /* Stall access on a context fault and let the handler deal with it */
 301        SET_CFCFG(base, ctx, 1);
 302
 303        /* Redirect all cacheable requests to L2 slave port. */
 304        SET_RCISH(base, ctx, 1);
 305        SET_RCOSH(base, ctx, 1);
 306        SET_RCNSH(base, ctx, 1);
 307
 308        /* Turn on BFB prefetch */
 309        SET_BFBDFE(base, ctx, 1);
 310
 311        /* Enable the MMU */
 312        SET_M(base, ctx, 1);
 313}
 314
 315static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
 316{
 317        struct msm_priv *priv;
 318
 319        if (type != IOMMU_DOMAIN_UNMANAGED)
 320                return NULL;
 321
 322        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 323        if (!priv)
 324                goto fail_nomem;
 325
 326        INIT_LIST_HEAD(&priv->list_attached);
 327
 328        priv->domain.geometry.aperture_start = 0;
 329        priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
 330        priv->domain.geometry.force_aperture = true;
 331
 332        return &priv->domain;
 333
 334fail_nomem:
 335        kfree(priv);
 336        return NULL;
 337}
 338
 339static void msm_iommu_domain_free(struct iommu_domain *domain)
 340{
 341        struct msm_priv *priv;
 342        unsigned long flags;
 343
 344        spin_lock_irqsave(&msm_iommu_lock, flags);
 345        priv = to_msm_priv(domain);
 346        kfree(priv);
 347        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 348}
 349
 350static int msm_iommu_domain_config(struct msm_priv *priv)
 351{
 352        spin_lock_init(&priv->pgtlock);
 353
 354        priv->cfg = (struct io_pgtable_cfg) {
 355                .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
 356                .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
 357                .ias = 32,
 358                .oas = 32,
 359                .tlb = &msm_iommu_gather_ops,
 360                .iommu_dev = priv->dev,
 361        };
 362
 363        priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
 364        if (!priv->iop) {
 365                dev_err(priv->dev, "Failed to allocate pgtable\n");
 366                return -EINVAL;
 367        }
 368
 369        msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
 370
 371        return 0;
 372}
 373
 374static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 375{
 376        int ret = 0;
 377        unsigned long flags;
 378        struct msm_iommu_dev *iommu;
 379        struct msm_priv *priv = to_msm_priv(domain);
 380        struct msm_iommu_ctx_dev *master;
 381
 382        priv->dev = dev;
 383        msm_iommu_domain_config(priv);
 384
 385        spin_lock_irqsave(&msm_iommu_lock, flags);
 386        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 387                master = list_first_entry(&iommu->ctx_list,
 388                                          struct msm_iommu_ctx_dev,
 389                                          list);
 390                if (master->of_node == dev->of_node) {
 391                        ret = __enable_clocks(iommu);
 392                        if (ret)
 393                                goto fail;
 394
 395                        list_for_each_entry(master, &iommu->ctx_list, list) {
 396                                if (master->num) {
 397                                        dev_err(dev, "domain already attached");
 398                                        ret = -EEXIST;
 399                                        goto fail;
 400                                }
 401                                master->num =
 402                                        msm_iommu_alloc_ctx(iommu->context_map,
 403                                                            0, iommu->ncb);
 404                                        if (IS_ERR_VALUE(master->num)) {
 405                                                ret = -ENODEV;
 406                                                goto fail;
 407                                        }
 408                                config_mids(iommu, master);
 409                                __program_context(iommu->base, master->num,
 410                                                  priv);
 411                        }
 412                        __disable_clocks(iommu);
 413                        list_add(&iommu->dom_node, &priv->list_attached);
 414                }
 415        }
 416
 417fail:
 418        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 419
 420        return ret;
 421}
 422
 423static void msm_iommu_detach_dev(struct iommu_domain *domain,
 424                                 struct device *dev)
 425{
 426        struct msm_priv *priv = to_msm_priv(domain);
 427        unsigned long flags;
 428        struct msm_iommu_dev *iommu;
 429        struct msm_iommu_ctx_dev *master;
 430        int ret;
 431
 432        free_io_pgtable_ops(priv->iop);
 433
 434        spin_lock_irqsave(&msm_iommu_lock, flags);
 435        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 436                ret = __enable_clocks(iommu);
 437                if (ret)
 438                        goto fail;
 439
 440                list_for_each_entry(master, &iommu->ctx_list, list) {
 441                        msm_iommu_free_ctx(iommu->context_map, master->num);
 442                        __reset_context(iommu->base, master->num);
 443                }
 444                __disable_clocks(iommu);
 445        }
 446fail:
 447        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 448}
 449
 450static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
 451                         phys_addr_t pa, size_t len, int prot)
 452{
 453        struct msm_priv *priv = to_msm_priv(domain);
 454        unsigned long flags;
 455        int ret;
 456
 457        spin_lock_irqsave(&priv->pgtlock, flags);
 458        ret = priv->iop->map(priv->iop, iova, pa, len, prot);
 459        spin_unlock_irqrestore(&priv->pgtlock, flags);
 460
 461        return ret;
 462}
 463
 464static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 465                              size_t len)
 466{
 467        struct msm_priv *priv = to_msm_priv(domain);
 468        unsigned long flags;
 469
 470        spin_lock_irqsave(&priv->pgtlock, flags);
 471        len = priv->iop->unmap(priv->iop, iova, len);
 472        spin_unlock_irqrestore(&priv->pgtlock, flags);
 473
 474        return len;
 475}
 476
 477static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 478                                          dma_addr_t va)
 479{
 480        struct msm_priv *priv;
 481        struct msm_iommu_dev *iommu;
 482        struct msm_iommu_ctx_dev *master;
 483        unsigned int par;
 484        unsigned long flags;
 485        phys_addr_t ret = 0;
 486
 487        spin_lock_irqsave(&msm_iommu_lock, flags);
 488
 489        priv = to_msm_priv(domain);
 490        iommu = list_first_entry(&priv->list_attached,
 491                                 struct msm_iommu_dev, dom_node);
 492
 493        if (list_empty(&iommu->ctx_list))
 494                goto fail;
 495
 496        master = list_first_entry(&iommu->ctx_list,
 497                                  struct msm_iommu_ctx_dev, list);
 498        if (!master)
 499                goto fail;
 500
 501        ret = __enable_clocks(iommu);
 502        if (ret)
 503                goto fail;
 504
 505        /* Invalidate context TLB */
 506        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 507        SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
 508
 509        par = GET_PAR(iommu->base, master->num);
 510
 511        /* We are dealing with a supersection */
 512        if (GET_NOFAULT_SS(iommu->base, master->num))
 513                ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
 514        else    /* Upper 20 bits from PAR, lower 12 from VA */
 515                ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
 516
 517        if (GET_FAULT(iommu->base, master->num))
 518                ret = 0;
 519
 520        __disable_clocks(iommu);
 521fail:
 522        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 523        return ret;
 524}
 525
 526static bool msm_iommu_capable(enum iommu_cap cap)
 527{
 528        return false;
 529}
 530
 531static void print_ctx_regs(void __iomem *base, int ctx)
 532{
 533        unsigned int fsr = GET_FSR(base, ctx);
 534        pr_err("FAR    = %08x    PAR    = %08x\n",
 535               GET_FAR(base, ctx), GET_PAR(base, ctx));
 536        pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
 537                        (fsr & 0x02) ? "TF " : "",
 538                        (fsr & 0x04) ? "AFF " : "",
 539                        (fsr & 0x08) ? "APF " : "",
 540                        (fsr & 0x10) ? "TLBMF " : "",
 541                        (fsr & 0x20) ? "HTWDEEF " : "",
 542                        (fsr & 0x40) ? "HTWSEEF " : "",
 543                        (fsr & 0x80) ? "MHF " : "",
 544                        (fsr & 0x10000) ? "SL " : "",
 545                        (fsr & 0x40000000) ? "SS " : "",
 546                        (fsr & 0x80000000) ? "MULTI " : "");
 547
 548        pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
 549               GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
 550        pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
 551               GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
 552        pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
 553               GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
 554}
 555
 556static void insert_iommu_master(struct device *dev,
 557                                struct msm_iommu_dev **iommu,
 558                                struct of_phandle_args *spec)
 559{
 560        struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
 561        int sid;
 562
 563        if (list_empty(&(*iommu)->ctx_list)) {
 564                master = kzalloc(sizeof(*master), GFP_ATOMIC);
 565                master->of_node = dev->of_node;
 566                list_add(&master->list, &(*iommu)->ctx_list);
 567                dev->archdata.iommu = master;
 568        }
 569
 570        for (sid = 0; sid < master->num_mids; sid++)
 571                if (master->mids[sid] == spec->args[0]) {
 572                        dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
 573                                 sid);
 574                        return;
 575                }
 576
 577        master->mids[master->num_mids++] = spec->args[0];
 578}
 579
 580static int qcom_iommu_of_xlate(struct device *dev,
 581                               struct of_phandle_args *spec)
 582{
 583        struct msm_iommu_dev *iommu;
 584        unsigned long flags;
 585        int ret = 0;
 586
 587        spin_lock_irqsave(&msm_iommu_lock, flags);
 588        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
 589                if (iommu->dev->of_node == spec->np)
 590                        break;
 591
 592        if (!iommu || iommu->dev->of_node != spec->np) {
 593                ret = -ENODEV;
 594                goto fail;
 595        }
 596
 597        insert_iommu_master(dev, &iommu, spec);
 598fail:
 599        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 600
 601        return ret;
 602}
 603
 604irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
 605{
 606        struct msm_iommu_dev *iommu = dev_id;
 607        unsigned int fsr;
 608        int i, ret;
 609
 610        spin_lock(&msm_iommu_lock);
 611
 612        if (!iommu) {
 613                pr_err("Invalid device ID in context interrupt handler\n");
 614                goto fail;
 615        }
 616
 617        pr_err("Unexpected IOMMU page fault!\n");
 618        pr_err("base = %08x\n", (unsigned int)iommu->base);
 619
 620        ret = __enable_clocks(iommu);
 621        if (ret)
 622                goto fail;
 623
 624        for (i = 0; i < iommu->ncb; i++) {
 625                fsr = GET_FSR(iommu->base, i);
 626                if (fsr) {
 627                        pr_err("Fault occurred in context %d.\n", i);
 628                        pr_err("Interesting registers:\n");
 629                        print_ctx_regs(iommu->base, i);
 630                        SET_FSR(iommu->base, i, 0x4000000F);
 631                }
 632        }
 633        __disable_clocks(iommu);
 634fail:
 635        spin_unlock(&msm_iommu_lock);
 636        return 0;
 637}
 638
 639static struct iommu_ops msm_iommu_ops = {
 640        .capable = msm_iommu_capable,
 641        .domain_alloc = msm_iommu_domain_alloc,
 642        .domain_free = msm_iommu_domain_free,
 643        .attach_dev = msm_iommu_attach_dev,
 644        .detach_dev = msm_iommu_detach_dev,
 645        .map = msm_iommu_map,
 646        .unmap = msm_iommu_unmap,
 647        .map_sg = default_iommu_map_sg,
 648        .iova_to_phys = msm_iommu_iova_to_phys,
 649        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
 650        .of_xlate = qcom_iommu_of_xlate,
 651};
 652
 653static int msm_iommu_probe(struct platform_device *pdev)
 654{
 655        struct resource *r;
 656        struct msm_iommu_dev *iommu;
 657        int ret, par, val;
 658
 659        iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
 660        if (!iommu)
 661                return -ENODEV;
 662
 663        iommu->dev = &pdev->dev;
 664        INIT_LIST_HEAD(&iommu->ctx_list);
 665
 666        iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
 667        if (IS_ERR(iommu->pclk)) {
 668                dev_err(iommu->dev, "could not get smmu_pclk\n");
 669                return PTR_ERR(iommu->pclk);
 670        }
 671
 672        ret = clk_prepare(iommu->pclk);
 673        if (ret) {
 674                dev_err(iommu->dev, "could not prepare smmu_pclk\n");
 675                return ret;
 676        }
 677
 678        iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
 679        if (IS_ERR(iommu->clk)) {
 680                dev_err(iommu->dev, "could not get iommu_clk\n");
 681                clk_unprepare(iommu->pclk);
 682                return PTR_ERR(iommu->clk);
 683        }
 684
 685        ret = clk_prepare(iommu->clk);
 686        if (ret) {
 687                dev_err(iommu->dev, "could not prepare iommu_clk\n");
 688                clk_unprepare(iommu->pclk);
 689                return ret;
 690        }
 691
 692        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 693        iommu->base = devm_ioremap_resource(iommu->dev, r);
 694        if (IS_ERR(iommu->base)) {
 695                dev_err(iommu->dev, "could not get iommu base\n");
 696                ret = PTR_ERR(iommu->base);
 697                goto fail;
 698        }
 699
 700        iommu->irq = platform_get_irq(pdev, 0);
 701        if (iommu->irq < 0) {
 702                dev_err(iommu->dev, "could not get iommu irq\n");
 703                ret = -ENODEV;
 704                goto fail;
 705        }
 706
 707        ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
 708        if (ret) {
 709                dev_err(iommu->dev, "could not get ncb\n");
 710                goto fail;
 711        }
 712        iommu->ncb = val;
 713
 714        msm_iommu_reset(iommu->base, iommu->ncb);
 715        SET_M(iommu->base, 0, 1);
 716        SET_PAR(iommu->base, 0, 0);
 717        SET_V2PCFG(iommu->base, 0, 1);
 718        SET_V2PPR(iommu->base, 0, 0);
 719        par = GET_PAR(iommu->base, 0);
 720        SET_V2PCFG(iommu->base, 0, 0);
 721        SET_M(iommu->base, 0, 0);
 722
 723        if (!par) {
 724                pr_err("Invalid PAR value detected\n");
 725                ret = -ENODEV;
 726                goto fail;
 727        }
 728
 729        ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
 730                                        msm_iommu_fault_handler,
 731                                        IRQF_ONESHOT | IRQF_SHARED,
 732                                        "msm_iommu_secure_irpt_handler",
 733                                        iommu);
 734        if (ret) {
 735                pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
 736                goto fail;
 737        }
 738
 739        list_add(&iommu->dev_node, &qcom_iommu_devices);
 740        of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
 741
 742        pr_info("device mapped at %p, irq %d with %d ctx banks\n",
 743                iommu->base, iommu->irq, iommu->ncb);
 744
 745        return ret;
 746fail:
 747        clk_unprepare(iommu->clk);
 748        clk_unprepare(iommu->pclk);
 749        return ret;
 750}
 751
 752static const struct of_device_id msm_iommu_dt_match[] = {
 753        { .compatible = "qcom,apq8064-iommu" },
 754        {}
 755};
 756
 757static int msm_iommu_remove(struct platform_device *pdev)
 758{
 759        struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
 760
 761        clk_unprepare(iommu->clk);
 762        clk_unprepare(iommu->pclk);
 763        return 0;
 764}
 765
 766static struct platform_driver msm_iommu_driver = {
 767        .driver = {
 768                .name   = "msm_iommu",
 769                .of_match_table = msm_iommu_dt_match,
 770        },
 771        .probe          = msm_iommu_probe,
 772        .remove         = msm_iommu_remove,
 773};
 774
 775static int __init msm_iommu_driver_init(void)
 776{
 777        int ret;
 778
 779        ret = platform_driver_register(&msm_iommu_driver);
 780        if (ret != 0)
 781                pr_err("Failed to register IOMMU driver\n");
 782
 783        return ret;
 784}
 785
 786static void __exit msm_iommu_driver_exit(void)
 787{
 788        platform_driver_unregister(&msm_iommu_driver);
 789}
 790
 791subsys_initcall(msm_iommu_driver_init);
 792module_exit(msm_iommu_driver_exit);
 793
 794static int __init msm_iommu_init(void)
 795{
 796        bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
 797        return 0;
 798}
 799
 800static int __init msm_iommu_of_setup(struct device_node *np)
 801{
 802        msm_iommu_init();
 803        return 0;
 804}
 805
 806IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup);
 807
 808MODULE_LICENSE("GPL v2");
 809MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
 810