linux/drivers/iommu/msm_iommu.c
<<
>>
Prefs
   1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License version 2 and
   5 * only version 2 as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15 * 02110-1301, USA.
  16 */
  17
  18#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/platform_device.h>
  22#include <linux/errno.h>
  23#include <linux/io.h>
  24#include <linux/interrupt.h>
  25#include <linux/list.h>
  26#include <linux/spinlock.h>
  27#include <linux/slab.h>
  28#include <linux/iommu.h>
  29#include <linux/clk.h>
  30#include <linux/err.h>
  31#include <linux/of_iommu.h>
  32
  33#include <asm/cacheflush.h>
  34#include <asm/sizes.h>
  35
  36#include "msm_iommu_hw-8xxx.h"
  37#include "msm_iommu.h"
  38#include "io-pgtable.h"
  39
  40#define MRC(reg, processor, op1, crn, crm, op2)                         \
  41__asm__ __volatile__ (                                                  \
  42"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
  43: "=r" (reg))
  44
  45/* bitmap of the page sizes currently supported */
  46#define MSM_IOMMU_PGSIZES       (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  47
  48DEFINE_SPINLOCK(msm_iommu_lock);
  49static LIST_HEAD(qcom_iommu_devices);
  50static struct iommu_ops msm_iommu_ops;
  51
  52struct msm_priv {
  53        struct list_head list_attached;
  54        struct iommu_domain domain;
  55        struct io_pgtable_cfg   cfg;
  56        struct io_pgtable_ops   *iop;
  57        struct device           *dev;
  58        spinlock_t              pgtlock; /* pagetable lock */
  59};
  60
  61static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  62{
  63        return container_of(dom, struct msm_priv, domain);
  64}
  65
  66static int __enable_clocks(struct msm_iommu_dev *iommu)
  67{
  68        int ret;
  69
  70        ret = clk_enable(iommu->pclk);
  71        if (ret)
  72                goto fail;
  73
  74        if (iommu->clk) {
  75                ret = clk_enable(iommu->clk);
  76                if (ret)
  77                        clk_disable(iommu->pclk);
  78        }
  79fail:
  80        return ret;
  81}
  82
  83static void __disable_clocks(struct msm_iommu_dev *iommu)
  84{
  85        if (iommu->clk)
  86                clk_disable(iommu->clk);
  87        clk_disable(iommu->pclk);
  88}
  89
  90static void msm_iommu_reset(void __iomem *base, int ncb)
  91{
  92        int ctx;
  93
  94        SET_RPUE(base, 0);
  95        SET_RPUEIE(base, 0);
  96        SET_ESRRESTORE(base, 0);
  97        SET_TBE(base, 0);
  98        SET_CR(base, 0);
  99        SET_SPDMBE(base, 0);
 100        SET_TESTBUSCR(base, 0);
 101        SET_TLBRSW(base, 0);
 102        SET_GLOBAL_TLBIALL(base, 0);
 103        SET_RPU_ACR(base, 0);
 104        SET_TLBLKCRWE(base, 1);
 105
 106        for (ctx = 0; ctx < ncb; ctx++) {
 107                SET_BPRCOSH(base, ctx, 0);
 108                SET_BPRCISH(base, ctx, 0);
 109                SET_BPRCNSH(base, ctx, 0);
 110                SET_BPSHCFG(base, ctx, 0);
 111                SET_BPMTCFG(base, ctx, 0);
 112                SET_ACTLR(base, ctx, 0);
 113                SET_SCTLR(base, ctx, 0);
 114                SET_FSRRESTORE(base, ctx, 0);
 115                SET_TTBR0(base, ctx, 0);
 116                SET_TTBR1(base, ctx, 0);
 117                SET_TTBCR(base, ctx, 0);
 118                SET_BFBCR(base, ctx, 0);
 119                SET_PAR(base, ctx, 0);
 120                SET_FAR(base, ctx, 0);
 121                SET_CTX_TLBIALL(base, ctx, 0);
 122                SET_TLBFLPTER(base, ctx, 0);
 123                SET_TLBSLPTER(base, ctx, 0);
 124                SET_TLBLKCR(base, ctx, 0);
 125                SET_CONTEXTIDR(base, ctx, 0);
 126        }
 127}
 128
 129static void __flush_iotlb(void *cookie)
 130{
 131        struct msm_priv *priv = cookie;
 132        struct msm_iommu_dev *iommu = NULL;
 133        struct msm_iommu_ctx_dev *master;
 134        int ret = 0;
 135
 136        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 137                ret = __enable_clocks(iommu);
 138                if (ret)
 139                        goto fail;
 140
 141                list_for_each_entry(master, &iommu->ctx_list, list)
 142                        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 143
 144                __disable_clocks(iommu);
 145        }
 146fail:
 147        return;
 148}
 149
 150static void __flush_iotlb_range(unsigned long iova, size_t size,
 151                                size_t granule, bool leaf, void *cookie)
 152{
 153        struct msm_priv *priv = cookie;
 154        struct msm_iommu_dev *iommu = NULL;
 155        struct msm_iommu_ctx_dev *master;
 156        int ret = 0;
 157        int temp_size;
 158
 159        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 160                ret = __enable_clocks(iommu);
 161                if (ret)
 162                        goto fail;
 163
 164                list_for_each_entry(master, &iommu->ctx_list, list) {
 165                        temp_size = size;
 166                        do {
 167                                iova &= TLBIVA_VA;
 168                                iova |= GET_CONTEXTIDR_ASID(iommu->base,
 169                                                            master->num);
 170                                SET_TLBIVA(iommu->base, master->num, iova);
 171                                iova += granule;
 172                        } while (temp_size -= granule);
 173                }
 174
 175                __disable_clocks(iommu);
 176        }
 177
 178fail:
 179        return;
 180}
 181
 182static void __flush_iotlb_sync(void *cookie)
 183{
 184        /*
 185         * Nothing is needed here, the barrier to guarantee
 186         * completion of the tlb sync operation is implicitly
 187         * taken care when the iommu client does a writel before
 188         * kick starting the other master.
 189         */
 190}
 191
 192static const struct iommu_gather_ops msm_iommu_gather_ops = {
 193        .tlb_flush_all = __flush_iotlb,
 194        .tlb_add_flush = __flush_iotlb_range,
 195        .tlb_sync = __flush_iotlb_sync,
 196};
 197
 198static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
 199{
 200        int idx;
 201
 202        do {
 203                idx = find_next_zero_bit(map, end, start);
 204                if (idx == end)
 205                        return -ENOSPC;
 206        } while (test_and_set_bit(idx, map));
 207
 208        return idx;
 209}
 210
 211static void msm_iommu_free_ctx(unsigned long *map, int idx)
 212{
 213        clear_bit(idx, map);
 214}
 215
 216static void config_mids(struct msm_iommu_dev *iommu,
 217                        struct msm_iommu_ctx_dev *master)
 218{
 219        int mid, ctx, i;
 220
 221        for (i = 0; i < master->num_mids; i++) {
 222                mid = master->mids[i];
 223                ctx = master->num;
 224
 225                SET_M2VCBR_N(iommu->base, mid, 0);
 226                SET_CBACR_N(iommu->base, ctx, 0);
 227
 228                /* Set VMID = 0 */
 229                SET_VMID(iommu->base, mid, 0);
 230
 231                /* Set the context number for that MID to this context */
 232                SET_CBNDX(iommu->base, mid, ctx);
 233
 234                /* Set MID associated with this context bank to 0*/
 235                SET_CBVMID(iommu->base, ctx, 0);
 236
 237                /* Set the ASID for TLB tagging for this context */
 238                SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
 239
 240                /* Set security bit override to be Non-secure */
 241                SET_NSCFG(iommu->base, mid, 3);
 242        }
 243}
 244
 245static void __reset_context(void __iomem *base, int ctx)
 246{
 247        SET_BPRCOSH(base, ctx, 0);
 248        SET_BPRCISH(base, ctx, 0);
 249        SET_BPRCNSH(base, ctx, 0);
 250        SET_BPSHCFG(base, ctx, 0);
 251        SET_BPMTCFG(base, ctx, 0);
 252        SET_ACTLR(base, ctx, 0);
 253        SET_SCTLR(base, ctx, 0);
 254        SET_FSRRESTORE(base, ctx, 0);
 255        SET_TTBR0(base, ctx, 0);
 256        SET_TTBR1(base, ctx, 0);
 257        SET_TTBCR(base, ctx, 0);
 258        SET_BFBCR(base, ctx, 0);
 259        SET_PAR(base, ctx, 0);
 260        SET_FAR(base, ctx, 0);
 261        SET_CTX_TLBIALL(base, ctx, 0);
 262        SET_TLBFLPTER(base, ctx, 0);
 263        SET_TLBSLPTER(base, ctx, 0);
 264        SET_TLBLKCR(base, ctx, 0);
 265}
 266
 267static void __program_context(void __iomem *base, int ctx,
 268                              struct msm_priv *priv)
 269{
 270        __reset_context(base, ctx);
 271
 272        /* Turn on TEX Remap */
 273        SET_TRE(base, ctx, 1);
 274        SET_AFE(base, ctx, 1);
 275
 276        /* Set up HTW mode */
 277        /* TLB miss configuration: perform HTW on miss */
 278        SET_TLBMCFG(base, ctx, 0x3);
 279
 280        /* V2P configuration: HTW for access */
 281        SET_V2PCFG(base, ctx, 0x3);
 282
 283        SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
 284        SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
 285        SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
 286
 287        /* Set prrr and nmrr */
 288        SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
 289        SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
 290
 291        /* Invalidate the TLB for this context */
 292        SET_CTX_TLBIALL(base, ctx, 0);
 293
 294        /* Set interrupt number to "secure" interrupt */
 295        SET_IRPTNDX(base, ctx, 0);
 296
 297        /* Enable context fault interrupt */
 298        SET_CFEIE(base, ctx, 1);
 299
 300        /* Stall access on a context fault and let the handler deal with it */
 301        SET_CFCFG(base, ctx, 1);
 302
 303        /* Redirect all cacheable requests to L2 slave port. */
 304        SET_RCISH(base, ctx, 1);
 305        SET_RCOSH(base, ctx, 1);
 306        SET_RCNSH(base, ctx, 1);
 307
 308        /* Turn on BFB prefetch */
 309        SET_BFBDFE(base, ctx, 1);
 310
 311        /* Enable the MMU */
 312        SET_M(base, ctx, 1);
 313}
 314
 315static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
 316{
 317        struct msm_priv *priv;
 318
 319        if (type != IOMMU_DOMAIN_UNMANAGED)
 320                return NULL;
 321
 322        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 323        if (!priv)
 324                goto fail_nomem;
 325
 326        INIT_LIST_HEAD(&priv->list_attached);
 327
 328        priv->domain.geometry.aperture_start = 0;
 329        priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
 330        priv->domain.geometry.force_aperture = true;
 331
 332        return &priv->domain;
 333
 334fail_nomem:
 335        kfree(priv);
 336        return NULL;
 337}
 338
 339static void msm_iommu_domain_free(struct iommu_domain *domain)
 340{
 341        struct msm_priv *priv;
 342        unsigned long flags;
 343
 344        spin_lock_irqsave(&msm_iommu_lock, flags);
 345        priv = to_msm_priv(domain);
 346        kfree(priv);
 347        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 348}
 349
 350static int msm_iommu_domain_config(struct msm_priv *priv)
 351{
 352        spin_lock_init(&priv->pgtlock);
 353
 354        priv->cfg = (struct io_pgtable_cfg) {
 355                .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
 356                .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
 357                .ias = 32,
 358                .oas = 32,
 359                .tlb = &msm_iommu_gather_ops,
 360                .iommu_dev = priv->dev,
 361        };
 362
 363        priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
 364        if (!priv->iop) {
 365                dev_err(priv->dev, "Failed to allocate pgtable\n");
 366                return -EINVAL;
 367        }
 368
 369        msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
 370
 371        return 0;
 372}
 373
 374/* Must be called under msm_iommu_lock */
 375static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
 376{
 377        struct msm_iommu_dev *iommu, *ret = NULL;
 378        struct msm_iommu_ctx_dev *master;
 379
 380        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 381                master = list_first_entry(&iommu->ctx_list,
 382                                          struct msm_iommu_ctx_dev,
 383                                          list);
 384                if (master->of_node == dev->of_node) {
 385                        ret = iommu;
 386                        break;
 387                }
 388        }
 389
 390        return ret;
 391}
 392
 393static int msm_iommu_add_device(struct device *dev)
 394{
 395        struct msm_iommu_dev *iommu;
 396        struct iommu_group *group;
 397        unsigned long flags;
 398        int ret = 0;
 399
 400        spin_lock_irqsave(&msm_iommu_lock, flags);
 401
 402        iommu = find_iommu_for_dev(dev);
 403        if (iommu)
 404                iommu_device_link(&iommu->iommu, dev);
 405        else
 406                ret = -ENODEV;
 407
 408        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 409
 410        if (ret)
 411                return ret;
 412
 413        group = iommu_group_get_for_dev(dev);
 414        if (IS_ERR(group))
 415                return PTR_ERR(group);
 416
 417        iommu_group_put(group);
 418
 419        return 0;
 420}
 421
 422static void msm_iommu_remove_device(struct device *dev)
 423{
 424        struct msm_iommu_dev *iommu;
 425        unsigned long flags;
 426
 427        spin_lock_irqsave(&msm_iommu_lock, flags);
 428
 429        iommu = find_iommu_for_dev(dev);
 430        if (iommu)
 431                iommu_device_unlink(&iommu->iommu, dev);
 432
 433        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 434
 435        iommu_group_remove_device(dev);
 436}
 437
 438static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 439{
 440        int ret = 0;
 441        unsigned long flags;
 442        struct msm_iommu_dev *iommu;
 443        struct msm_priv *priv = to_msm_priv(domain);
 444        struct msm_iommu_ctx_dev *master;
 445
 446        priv->dev = dev;
 447        msm_iommu_domain_config(priv);
 448
 449        spin_lock_irqsave(&msm_iommu_lock, flags);
 450        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
 451                master = list_first_entry(&iommu->ctx_list,
 452                                          struct msm_iommu_ctx_dev,
 453                                          list);
 454                if (master->of_node == dev->of_node) {
 455                        ret = __enable_clocks(iommu);
 456                        if (ret)
 457                                goto fail;
 458
 459                        list_for_each_entry(master, &iommu->ctx_list, list) {
 460                                if (master->num) {
 461                                        dev_err(dev, "domain already attached");
 462                                        ret = -EEXIST;
 463                                        goto fail;
 464                                }
 465                                master->num =
 466                                        msm_iommu_alloc_ctx(iommu->context_map,
 467                                                            0, iommu->ncb);
 468                                        if (IS_ERR_VALUE(master->num)) {
 469                                                ret = -ENODEV;
 470                                                goto fail;
 471                                        }
 472                                config_mids(iommu, master);
 473                                __program_context(iommu->base, master->num,
 474                                                  priv);
 475                        }
 476                        __disable_clocks(iommu);
 477                        list_add(&iommu->dom_node, &priv->list_attached);
 478                }
 479        }
 480
 481fail:
 482        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 483
 484        return ret;
 485}
 486
 487static void msm_iommu_detach_dev(struct iommu_domain *domain,
 488                                 struct device *dev)
 489{
 490        struct msm_priv *priv = to_msm_priv(domain);
 491        unsigned long flags;
 492        struct msm_iommu_dev *iommu;
 493        struct msm_iommu_ctx_dev *master;
 494        int ret;
 495
 496        free_io_pgtable_ops(priv->iop);
 497
 498        spin_lock_irqsave(&msm_iommu_lock, flags);
 499        list_for_each_entry(iommu, &priv->list_attached, dom_node) {
 500                ret = __enable_clocks(iommu);
 501                if (ret)
 502                        goto fail;
 503
 504                list_for_each_entry(master, &iommu->ctx_list, list) {
 505                        msm_iommu_free_ctx(iommu->context_map, master->num);
 506                        __reset_context(iommu->base, master->num);
 507                }
 508                __disable_clocks(iommu);
 509        }
 510fail:
 511        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 512}
 513
 514static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
 515                         phys_addr_t pa, size_t len, int prot)
 516{
 517        struct msm_priv *priv = to_msm_priv(domain);
 518        unsigned long flags;
 519        int ret;
 520
 521        spin_lock_irqsave(&priv->pgtlock, flags);
 522        ret = priv->iop->map(priv->iop, iova, pa, len, prot);
 523        spin_unlock_irqrestore(&priv->pgtlock, flags);
 524
 525        return ret;
 526}
 527
 528static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 529                              size_t len)
 530{
 531        struct msm_priv *priv = to_msm_priv(domain);
 532        unsigned long flags;
 533
 534        spin_lock_irqsave(&priv->pgtlock, flags);
 535        len = priv->iop->unmap(priv->iop, iova, len);
 536        spin_unlock_irqrestore(&priv->pgtlock, flags);
 537
 538        return len;
 539}
 540
 541static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 542                                          dma_addr_t va)
 543{
 544        struct msm_priv *priv;
 545        struct msm_iommu_dev *iommu;
 546        struct msm_iommu_ctx_dev *master;
 547        unsigned int par;
 548        unsigned long flags;
 549        phys_addr_t ret = 0;
 550
 551        spin_lock_irqsave(&msm_iommu_lock, flags);
 552
 553        priv = to_msm_priv(domain);
 554        iommu = list_first_entry(&priv->list_attached,
 555                                 struct msm_iommu_dev, dom_node);
 556
 557        if (list_empty(&iommu->ctx_list))
 558                goto fail;
 559
 560        master = list_first_entry(&iommu->ctx_list,
 561                                  struct msm_iommu_ctx_dev, list);
 562        if (!master)
 563                goto fail;
 564
 565        ret = __enable_clocks(iommu);
 566        if (ret)
 567                goto fail;
 568
 569        /* Invalidate context TLB */
 570        SET_CTX_TLBIALL(iommu->base, master->num, 0);
 571        SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
 572
 573        par = GET_PAR(iommu->base, master->num);
 574
 575        /* We are dealing with a supersection */
 576        if (GET_NOFAULT_SS(iommu->base, master->num))
 577                ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
 578        else    /* Upper 20 bits from PAR, lower 12 from VA */
 579                ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
 580
 581        if (GET_FAULT(iommu->base, master->num))
 582                ret = 0;
 583
 584        __disable_clocks(iommu);
 585fail:
 586        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 587        return ret;
 588}
 589
 590static bool msm_iommu_capable(enum iommu_cap cap)
 591{
 592        return false;
 593}
 594
 595static void print_ctx_regs(void __iomem *base, int ctx)
 596{
 597        unsigned int fsr = GET_FSR(base, ctx);
 598        pr_err("FAR    = %08x    PAR    = %08x\n",
 599               GET_FAR(base, ctx), GET_PAR(base, ctx));
 600        pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
 601                        (fsr & 0x02) ? "TF " : "",
 602                        (fsr & 0x04) ? "AFF " : "",
 603                        (fsr & 0x08) ? "APF " : "",
 604                        (fsr & 0x10) ? "TLBMF " : "",
 605                        (fsr & 0x20) ? "HTWDEEF " : "",
 606                        (fsr & 0x40) ? "HTWSEEF " : "",
 607                        (fsr & 0x80) ? "MHF " : "",
 608                        (fsr & 0x10000) ? "SL " : "",
 609                        (fsr & 0x40000000) ? "SS " : "",
 610                        (fsr & 0x80000000) ? "MULTI " : "");
 611
 612        pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
 613               GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
 614        pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
 615               GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
 616        pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
 617               GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
 618}
 619
 620static void insert_iommu_master(struct device *dev,
 621                                struct msm_iommu_dev **iommu,
 622                                struct of_phandle_args *spec)
 623{
 624        struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
 625        int sid;
 626
 627        if (list_empty(&(*iommu)->ctx_list)) {
 628                master = kzalloc(sizeof(*master), GFP_ATOMIC);
 629                master->of_node = dev->of_node;
 630                list_add(&master->list, &(*iommu)->ctx_list);
 631                dev->archdata.iommu = master;
 632        }
 633
 634        for (sid = 0; sid < master->num_mids; sid++)
 635                if (master->mids[sid] == spec->args[0]) {
 636                        dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
 637                                 sid);
 638                        return;
 639                }
 640
 641        master->mids[master->num_mids++] = spec->args[0];
 642}
 643
 644static int qcom_iommu_of_xlate(struct device *dev,
 645                               struct of_phandle_args *spec)
 646{
 647        struct msm_iommu_dev *iommu;
 648        unsigned long flags;
 649        int ret = 0;
 650
 651        spin_lock_irqsave(&msm_iommu_lock, flags);
 652        list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
 653                if (iommu->dev->of_node == spec->np)
 654                        break;
 655
 656        if (!iommu || iommu->dev->of_node != spec->np) {
 657                ret = -ENODEV;
 658                goto fail;
 659        }
 660
 661        insert_iommu_master(dev, &iommu, spec);
 662fail:
 663        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 664
 665        return ret;
 666}
 667
 668irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
 669{
 670        struct msm_iommu_dev *iommu = dev_id;
 671        unsigned int fsr;
 672        int i, ret;
 673
 674        spin_lock(&msm_iommu_lock);
 675
 676        if (!iommu) {
 677                pr_err("Invalid device ID in context interrupt handler\n");
 678                goto fail;
 679        }
 680
 681        pr_err("Unexpected IOMMU page fault!\n");
 682        pr_err("base = %08x\n", (unsigned int)iommu->base);
 683
 684        ret = __enable_clocks(iommu);
 685        if (ret)
 686                goto fail;
 687
 688        for (i = 0; i < iommu->ncb; i++) {
 689                fsr = GET_FSR(iommu->base, i);
 690                if (fsr) {
 691                        pr_err("Fault occurred in context %d.\n", i);
 692                        pr_err("Interesting registers:\n");
 693                        print_ctx_regs(iommu->base, i);
 694                        SET_FSR(iommu->base, i, 0x4000000F);
 695                }
 696        }
 697        __disable_clocks(iommu);
 698fail:
 699        spin_unlock(&msm_iommu_lock);
 700        return 0;
 701}
 702
 703static struct iommu_ops msm_iommu_ops = {
 704        .capable = msm_iommu_capable,
 705        .domain_alloc = msm_iommu_domain_alloc,
 706        .domain_free = msm_iommu_domain_free,
 707        .attach_dev = msm_iommu_attach_dev,
 708        .detach_dev = msm_iommu_detach_dev,
 709        .map = msm_iommu_map,
 710        .unmap = msm_iommu_unmap,
 711        .iova_to_phys = msm_iommu_iova_to_phys,
 712        .add_device = msm_iommu_add_device,
 713        .remove_device = msm_iommu_remove_device,
 714        .device_group = generic_device_group,
 715        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
 716        .of_xlate = qcom_iommu_of_xlate,
 717};
 718
 719static int msm_iommu_probe(struct platform_device *pdev)
 720{
 721        struct resource *r;
 722        resource_size_t ioaddr;
 723        struct msm_iommu_dev *iommu;
 724        int ret, par, val;
 725
 726        iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
 727        if (!iommu)
 728                return -ENODEV;
 729
 730        iommu->dev = &pdev->dev;
 731        INIT_LIST_HEAD(&iommu->ctx_list);
 732
 733        iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
 734        if (IS_ERR(iommu->pclk)) {
 735                dev_err(iommu->dev, "could not get smmu_pclk\n");
 736                return PTR_ERR(iommu->pclk);
 737        }
 738
 739        ret = clk_prepare(iommu->pclk);
 740        if (ret) {
 741                dev_err(iommu->dev, "could not prepare smmu_pclk\n");
 742                return ret;
 743        }
 744
 745        iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
 746        if (IS_ERR(iommu->clk)) {
 747                dev_err(iommu->dev, "could not get iommu_clk\n");
 748                clk_unprepare(iommu->pclk);
 749                return PTR_ERR(iommu->clk);
 750        }
 751
 752        ret = clk_prepare(iommu->clk);
 753        if (ret) {
 754                dev_err(iommu->dev, "could not prepare iommu_clk\n");
 755                clk_unprepare(iommu->pclk);
 756                return ret;
 757        }
 758
 759        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 760        iommu->base = devm_ioremap_resource(iommu->dev, r);
 761        if (IS_ERR(iommu->base)) {
 762                dev_err(iommu->dev, "could not get iommu base\n");
 763                ret = PTR_ERR(iommu->base);
 764                goto fail;
 765        }
 766        ioaddr = r->start;
 767
 768        iommu->irq = platform_get_irq(pdev, 0);
 769        if (iommu->irq < 0) {
 770                dev_err(iommu->dev, "could not get iommu irq\n");
 771                ret = -ENODEV;
 772                goto fail;
 773        }
 774
 775        ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
 776        if (ret) {
 777                dev_err(iommu->dev, "could not get ncb\n");
 778                goto fail;
 779        }
 780        iommu->ncb = val;
 781
 782        msm_iommu_reset(iommu->base, iommu->ncb);
 783        SET_M(iommu->base, 0, 1);
 784        SET_PAR(iommu->base, 0, 0);
 785        SET_V2PCFG(iommu->base, 0, 1);
 786        SET_V2PPR(iommu->base, 0, 0);
 787        par = GET_PAR(iommu->base, 0);
 788        SET_V2PCFG(iommu->base, 0, 0);
 789        SET_M(iommu->base, 0, 0);
 790
 791        if (!par) {
 792                pr_err("Invalid PAR value detected\n");
 793                ret = -ENODEV;
 794                goto fail;
 795        }
 796
 797        ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
 798                                        msm_iommu_fault_handler,
 799                                        IRQF_ONESHOT | IRQF_SHARED,
 800                                        "msm_iommu_secure_irpt_handler",
 801                                        iommu);
 802        if (ret) {
 803                pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
 804                goto fail;
 805        }
 806
 807        list_add(&iommu->dev_node, &qcom_iommu_devices);
 808
 809        ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
 810                                     "msm-smmu.%pa", &ioaddr);
 811        if (ret) {
 812                pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
 813                goto fail;
 814        }
 815
 816        iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
 817        iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
 818
 819        ret = iommu_device_register(&iommu->iommu);
 820        if (ret) {
 821                pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
 822                goto fail;
 823        }
 824
 825        bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
 826
 827        pr_info("device mapped at %p, irq %d with %d ctx banks\n",
 828                iommu->base, iommu->irq, iommu->ncb);
 829
 830        return ret;
 831fail:
 832        clk_unprepare(iommu->clk);
 833        clk_unprepare(iommu->pclk);
 834        return ret;
 835}
 836
 837static const struct of_device_id msm_iommu_dt_match[] = {
 838        { .compatible = "qcom,apq8064-iommu" },
 839        {}
 840};
 841
 842static int msm_iommu_remove(struct platform_device *pdev)
 843{
 844        struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
 845
 846        clk_unprepare(iommu->clk);
 847        clk_unprepare(iommu->pclk);
 848        return 0;
 849}
 850
 851static struct platform_driver msm_iommu_driver = {
 852        .driver = {
 853                .name   = "msm_iommu",
 854                .of_match_table = msm_iommu_dt_match,
 855        },
 856        .probe          = msm_iommu_probe,
 857        .remove         = msm_iommu_remove,
 858};
 859
 860static int __init msm_iommu_driver_init(void)
 861{
 862        int ret;
 863
 864        ret = platform_driver_register(&msm_iommu_driver);
 865        if (ret != 0)
 866                pr_err("Failed to register IOMMU driver\n");
 867
 868        return ret;
 869}
 870
 871static void __exit msm_iommu_driver_exit(void)
 872{
 873        platform_driver_unregister(&msm_iommu_driver);
 874}
 875
 876subsys_initcall(msm_iommu_driver_init);
 877module_exit(msm_iommu_driver_exit);
 878
 879IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu");
 880
 881MODULE_LICENSE("GPL v2");
 882MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
 883