linux/drivers/iommu/msm_iommu.c
<<
>>
Prefs
   1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License version 2 and
   5 * only version 2 as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15 * 02110-1301, USA.
  16 */
  17
  18#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/platform_device.h>
  22#include <linux/errno.h>
  23#include <linux/io.h>
  24#include <linux/interrupt.h>
  25#include <linux/list.h>
  26#include <linux/spinlock.h>
  27#include <linux/slab.h>
  28#include <linux/iommu.h>
  29#include <linux/clk.h>
  30
  31#include <asm/cacheflush.h>
  32#include <asm/sizes.h>
  33
  34#include "msm_iommu_hw-8xxx.h"
  35#include "msm_iommu.h"
  36
  37#define MRC(reg, processor, op1, crn, crm, op2)                         \
  38__asm__ __volatile__ (                                                  \
  39"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
  40: "=r" (reg))
  41
  42#define RCP15_PRRR(reg)         MRC(reg, p15, 0, c10, c2, 0)
  43#define RCP15_NMRR(reg)         MRC(reg, p15, 0, c10, c2, 1)
  44
  45/* bitmap of the page sizes currently supported */
  46#define MSM_IOMMU_PGSIZES       (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  47
  48static int msm_iommu_tex_class[4];
  49
  50DEFINE_SPINLOCK(msm_iommu_lock);
  51
  52struct msm_priv {
  53        unsigned long *pgtable;
  54        struct list_head list_attached;
  55        struct iommu_domain domain;
  56};
  57
  58static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  59{
  60        return container_of(dom, struct msm_priv, domain);
  61}
  62
  63static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
  64{
  65        int ret;
  66
  67        ret = clk_enable(drvdata->pclk);
  68        if (ret)
  69                goto fail;
  70
  71        if (drvdata->clk) {
  72                ret = clk_enable(drvdata->clk);
  73                if (ret)
  74                        clk_disable(drvdata->pclk);
  75        }
  76fail:
  77        return ret;
  78}
  79
  80static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
  81{
  82        clk_disable(drvdata->clk);
  83        clk_disable(drvdata->pclk);
  84}
  85
  86static int __flush_iotlb(struct iommu_domain *domain)
  87{
  88        struct msm_priv *priv = to_msm_priv(domain);
  89        struct msm_iommu_drvdata *iommu_drvdata;
  90        struct msm_iommu_ctx_drvdata *ctx_drvdata;
  91        int ret = 0;
  92#ifndef CONFIG_IOMMU_PGTABLES_L2
  93        unsigned long *fl_table = priv->pgtable;
  94        int i;
  95
  96        if (!list_empty(&priv->list_attached)) {
  97                dmac_flush_range(fl_table, fl_table + SZ_16K);
  98
  99                for (i = 0; i < NUM_FL_PTE; i++)
 100                        if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
 101                                void *sl_table = __va(fl_table[i] &
 102                                                                FL_BASE_MASK);
 103                                dmac_flush_range(sl_table, sl_table + SZ_4K);
 104                        }
 105        }
 106#endif
 107
 108        list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
 109
 110                BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
 111
 112                iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
 113                BUG_ON(!iommu_drvdata);
 114
 115                ret = __enable_clocks(iommu_drvdata);
 116                if (ret)
 117                        goto fail;
 118
 119                SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
 120                __disable_clocks(iommu_drvdata);
 121        }
 122fail:
 123        return ret;
 124}
 125
 126static void __reset_context(void __iomem *base, int ctx)
 127{
 128        SET_BPRCOSH(base, ctx, 0);
 129        SET_BPRCISH(base, ctx, 0);
 130        SET_BPRCNSH(base, ctx, 0);
 131        SET_BPSHCFG(base, ctx, 0);
 132        SET_BPMTCFG(base, ctx, 0);
 133        SET_ACTLR(base, ctx, 0);
 134        SET_SCTLR(base, ctx, 0);
 135        SET_FSRRESTORE(base, ctx, 0);
 136        SET_TTBR0(base, ctx, 0);
 137        SET_TTBR1(base, ctx, 0);
 138        SET_TTBCR(base, ctx, 0);
 139        SET_BFBCR(base, ctx, 0);
 140        SET_PAR(base, ctx, 0);
 141        SET_FAR(base, ctx, 0);
 142        SET_CTX_TLBIALL(base, ctx, 0);
 143        SET_TLBFLPTER(base, ctx, 0);
 144        SET_TLBSLPTER(base, ctx, 0);
 145        SET_TLBLKCR(base, ctx, 0);
 146        SET_PRRR(base, ctx, 0);
 147        SET_NMRR(base, ctx, 0);
 148}
 149
 150static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
 151{
 152        unsigned int prrr, nmrr;
 153        __reset_context(base, ctx);
 154
 155        /* Set up HTW mode */
 156        /* TLB miss configuration: perform HTW on miss */
 157        SET_TLBMCFG(base, ctx, 0x3);
 158
 159        /* V2P configuration: HTW for access */
 160        SET_V2PCFG(base, ctx, 0x3);
 161
 162        SET_TTBCR(base, ctx, 0);
 163        SET_TTBR0_PA(base, ctx, (pgtable >> 14));
 164
 165        /* Invalidate the TLB for this context */
 166        SET_CTX_TLBIALL(base, ctx, 0);
 167
 168        /* Set interrupt number to "secure" interrupt */
 169        SET_IRPTNDX(base, ctx, 0);
 170
 171        /* Enable context fault interrupt */
 172        SET_CFEIE(base, ctx, 1);
 173
 174        /* Stall access on a context fault and let the handler deal with it */
 175        SET_CFCFG(base, ctx, 1);
 176
 177        /* Redirect all cacheable requests to L2 slave port. */
 178        SET_RCISH(base, ctx, 1);
 179        SET_RCOSH(base, ctx, 1);
 180        SET_RCNSH(base, ctx, 1);
 181
 182        /* Turn on TEX Remap */
 183        SET_TRE(base, ctx, 1);
 184
 185        /* Set TEX remap attributes */
 186        RCP15_PRRR(prrr);
 187        RCP15_NMRR(nmrr);
 188        SET_PRRR(base, ctx, prrr);
 189        SET_NMRR(base, ctx, nmrr);
 190
 191        /* Turn on BFB prefetch */
 192        SET_BFBDFE(base, ctx, 1);
 193
 194#ifdef CONFIG_IOMMU_PGTABLES_L2
 195        /* Configure page tables as inner-cacheable and shareable to reduce
 196         * the TLB miss penalty.
 197         */
 198        SET_TTBR0_SH(base, ctx, 1);
 199        SET_TTBR1_SH(base, ctx, 1);
 200
 201        SET_TTBR0_NOS(base, ctx, 1);
 202        SET_TTBR1_NOS(base, ctx, 1);
 203
 204        SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
 205        SET_TTBR0_IRGNL(base, ctx, 1);
 206
 207        SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
 208        SET_TTBR1_IRGNL(base, ctx, 1);
 209
 210        SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
 211        SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
 212#endif
 213
 214        /* Enable the MMU */
 215        SET_M(base, ctx, 1);
 216}
 217
 218static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
 219{
 220        struct msm_priv *priv;
 221
 222        if (type != IOMMU_DOMAIN_UNMANAGED)
 223                return NULL;
 224
 225        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 226        if (!priv)
 227                goto fail_nomem;
 228
 229        INIT_LIST_HEAD(&priv->list_attached);
 230        priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
 231                                                          get_order(SZ_16K));
 232
 233        if (!priv->pgtable)
 234                goto fail_nomem;
 235
 236        memset(priv->pgtable, 0, SZ_16K);
 237
 238        priv->domain.geometry.aperture_start = 0;
 239        priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
 240        priv->domain.geometry.force_aperture = true;
 241
 242        return &priv->domain;
 243
 244fail_nomem:
 245        kfree(priv);
 246        return NULL;
 247}
 248
 249static void msm_iommu_domain_free(struct iommu_domain *domain)
 250{
 251        struct msm_priv *priv;
 252        unsigned long flags;
 253        unsigned long *fl_table;
 254        int i;
 255
 256        spin_lock_irqsave(&msm_iommu_lock, flags);
 257        priv = to_msm_priv(domain);
 258
 259        fl_table = priv->pgtable;
 260
 261        for (i = 0; i < NUM_FL_PTE; i++)
 262                if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
 263                        free_page((unsigned long) __va(((fl_table[i]) &
 264                                                        FL_BASE_MASK)));
 265
 266        free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
 267        priv->pgtable = NULL;
 268
 269        kfree(priv);
 270        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 271}
 272
 273static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 274{
 275        struct msm_priv *priv;
 276        struct msm_iommu_ctx_dev *ctx_dev;
 277        struct msm_iommu_drvdata *iommu_drvdata;
 278        struct msm_iommu_ctx_drvdata *ctx_drvdata;
 279        struct msm_iommu_ctx_drvdata *tmp_drvdata;
 280        int ret = 0;
 281        unsigned long flags;
 282
 283        spin_lock_irqsave(&msm_iommu_lock, flags);
 284
 285        priv = to_msm_priv(domain);
 286
 287        if (!dev) {
 288                ret = -EINVAL;
 289                goto fail;
 290        }
 291
 292        iommu_drvdata = dev_get_drvdata(dev->parent);
 293        ctx_drvdata = dev_get_drvdata(dev);
 294        ctx_dev = dev->platform_data;
 295
 296        if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
 297                ret = -EINVAL;
 298                goto fail;
 299        }
 300
 301        if (!list_empty(&ctx_drvdata->attached_elm)) {
 302                ret = -EBUSY;
 303                goto fail;
 304        }
 305
 306        list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
 307                if (tmp_drvdata == ctx_drvdata) {
 308                        ret = -EBUSY;
 309                        goto fail;
 310                }
 311
 312        ret = __enable_clocks(iommu_drvdata);
 313        if (ret)
 314                goto fail;
 315
 316        __program_context(iommu_drvdata->base, ctx_dev->num,
 317                          __pa(priv->pgtable));
 318
 319        __disable_clocks(iommu_drvdata);
 320        list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
 321        ret = __flush_iotlb(domain);
 322
 323fail:
 324        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 325        return ret;
 326}
 327
 328static void msm_iommu_detach_dev(struct iommu_domain *domain,
 329                                 struct device *dev)
 330{
 331        struct msm_priv *priv;
 332        struct msm_iommu_ctx_dev *ctx_dev;
 333        struct msm_iommu_drvdata *iommu_drvdata;
 334        struct msm_iommu_ctx_drvdata *ctx_drvdata;
 335        unsigned long flags;
 336        int ret;
 337
 338        spin_lock_irqsave(&msm_iommu_lock, flags);
 339        priv = to_msm_priv(domain);
 340
 341        if (!dev)
 342                goto fail;
 343
 344        iommu_drvdata = dev_get_drvdata(dev->parent);
 345        ctx_drvdata = dev_get_drvdata(dev);
 346        ctx_dev = dev->platform_data;
 347
 348        if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
 349                goto fail;
 350
 351        ret = __flush_iotlb(domain);
 352        if (ret)
 353                goto fail;
 354
 355        ret = __enable_clocks(iommu_drvdata);
 356        if (ret)
 357                goto fail;
 358
 359        __reset_context(iommu_drvdata->base, ctx_dev->num);
 360        __disable_clocks(iommu_drvdata);
 361        list_del_init(&ctx_drvdata->attached_elm);
 362
 363fail:
 364        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 365}
 366
 367static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
 368                         phys_addr_t pa, size_t len, int prot)
 369{
 370        struct msm_priv *priv;
 371        unsigned long flags;
 372        unsigned long *fl_table;
 373        unsigned long *fl_pte;
 374        unsigned long fl_offset;
 375        unsigned long *sl_table;
 376        unsigned long *sl_pte;
 377        unsigned long sl_offset;
 378        unsigned int pgprot;
 379        int ret = 0, tex, sh;
 380
 381        spin_lock_irqsave(&msm_iommu_lock, flags);
 382
 383        sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
 384        tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
 385
 386        if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
 387                ret = -EINVAL;
 388                goto fail;
 389        }
 390
 391        priv = to_msm_priv(domain);
 392
 393        fl_table = priv->pgtable;
 394
 395        if (len != SZ_16M && len != SZ_1M &&
 396            len != SZ_64K && len != SZ_4K) {
 397                pr_debug("Bad size: %d\n", len);
 398                ret = -EINVAL;
 399                goto fail;
 400        }
 401
 402        if (!fl_table) {
 403                pr_debug("Null page table\n");
 404                ret = -EINVAL;
 405                goto fail;
 406        }
 407
 408        if (len == SZ_16M || len == SZ_1M) {
 409                pgprot = sh ? FL_SHARED : 0;
 410                pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
 411                pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
 412                pgprot |= tex & 0x04 ? FL_TEX0 : 0;
 413        } else  {
 414                pgprot = sh ? SL_SHARED : 0;
 415                pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
 416                pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
 417                pgprot |= tex & 0x04 ? SL_TEX0 : 0;
 418        }
 419
 420        fl_offset = FL_OFFSET(va);      /* Upper 12 bits */
 421        fl_pte = fl_table + fl_offset;  /* int pointers, 4 bytes */
 422
 423        if (len == SZ_16M) {
 424                int i = 0;
 425                for (i = 0; i < 16; i++)
 426                        *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
 427                                  FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
 428                                  FL_SHARED | FL_NG | pgprot;
 429        }
 430
 431        if (len == SZ_1M)
 432                *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
 433                                            FL_TYPE_SECT | FL_SHARED | pgprot;
 434
 435        /* Need a 2nd level table */
 436        if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
 437                unsigned long *sl;
 438                sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
 439                                                        get_order(SZ_4K));
 440
 441                if (!sl) {
 442                        pr_debug("Could not allocate second level table\n");
 443                        ret = -ENOMEM;
 444                        goto fail;
 445                }
 446
 447                memset(sl, 0, SZ_4K);
 448                *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
 449        }
 450
 451        sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
 452        sl_offset = SL_OFFSET(va);
 453        sl_pte = sl_table + sl_offset;
 454
 455
 456        if (len == SZ_4K)
 457                *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
 458                                          SL_SHARED | SL_TYPE_SMALL | pgprot;
 459
 460        if (len == SZ_64K) {
 461                int i;
 462
 463                for (i = 0; i < 16; i++)
 464                        *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
 465                            SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
 466        }
 467
 468        ret = __flush_iotlb(domain);
 469fail:
 470        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 471        return ret;
 472}
 473
 474static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
 475                            size_t len)
 476{
 477        struct msm_priv *priv;
 478        unsigned long flags;
 479        unsigned long *fl_table;
 480        unsigned long *fl_pte;
 481        unsigned long fl_offset;
 482        unsigned long *sl_table;
 483        unsigned long *sl_pte;
 484        unsigned long sl_offset;
 485        int i, ret = 0;
 486
 487        spin_lock_irqsave(&msm_iommu_lock, flags);
 488
 489        priv = to_msm_priv(domain);
 490
 491        fl_table = priv->pgtable;
 492
 493        if (len != SZ_16M && len != SZ_1M &&
 494            len != SZ_64K && len != SZ_4K) {
 495                pr_debug("Bad length: %d\n", len);
 496                goto fail;
 497        }
 498
 499        if (!fl_table) {
 500                pr_debug("Null page table\n");
 501                goto fail;
 502        }
 503
 504        fl_offset = FL_OFFSET(va);      /* Upper 12 bits */
 505        fl_pte = fl_table + fl_offset;  /* int pointers, 4 bytes */
 506
 507        if (*fl_pte == 0) {
 508                pr_debug("First level PTE is 0\n");
 509                goto fail;
 510        }
 511
 512        /* Unmap supersection */
 513        if (len == SZ_16M)
 514                for (i = 0; i < 16; i++)
 515                        *(fl_pte+i) = 0;
 516
 517        if (len == SZ_1M)
 518                *fl_pte = 0;
 519
 520        sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
 521        sl_offset = SL_OFFSET(va);
 522        sl_pte = sl_table + sl_offset;
 523
 524        if (len == SZ_64K) {
 525                for (i = 0; i < 16; i++)
 526                        *(sl_pte+i) = 0;
 527        }
 528
 529        if (len == SZ_4K)
 530                *sl_pte = 0;
 531
 532        if (len == SZ_4K || len == SZ_64K) {
 533                int used = 0;
 534
 535                for (i = 0; i < NUM_SL_PTE; i++)
 536                        if (sl_table[i])
 537                                used = 1;
 538                if (!used) {
 539                        free_page((unsigned long)sl_table);
 540                        *fl_pte = 0;
 541                }
 542        }
 543
 544        ret = __flush_iotlb(domain);
 545
 546fail:
 547        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 548
 549        /* the IOMMU API requires us to return how many bytes were unmapped */
 550        len = ret ? 0 : len;
 551        return len;
 552}
 553
 554static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 555                                          dma_addr_t va)
 556{
 557        struct msm_priv *priv;
 558        struct msm_iommu_drvdata *iommu_drvdata;
 559        struct msm_iommu_ctx_drvdata *ctx_drvdata;
 560        unsigned int par;
 561        unsigned long flags;
 562        void __iomem *base;
 563        phys_addr_t ret = 0;
 564        int ctx;
 565
 566        spin_lock_irqsave(&msm_iommu_lock, flags);
 567
 568        priv = to_msm_priv(domain);
 569        if (list_empty(&priv->list_attached))
 570                goto fail;
 571
 572        ctx_drvdata = list_entry(priv->list_attached.next,
 573                                 struct msm_iommu_ctx_drvdata, attached_elm);
 574        iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
 575
 576        base = iommu_drvdata->base;
 577        ctx = ctx_drvdata->num;
 578
 579        ret = __enable_clocks(iommu_drvdata);
 580        if (ret)
 581                goto fail;
 582
 583        /* Invalidate context TLB */
 584        SET_CTX_TLBIALL(base, ctx, 0);
 585        SET_V2PPR(base, ctx, va & V2Pxx_VA);
 586
 587        par = GET_PAR(base, ctx);
 588
 589        /* We are dealing with a supersection */
 590        if (GET_NOFAULT_SS(base, ctx))
 591                ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
 592        else    /* Upper 20 bits from PAR, lower 12 from VA */
 593                ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
 594
 595        if (GET_FAULT(base, ctx))
 596                ret = 0;
 597
 598        __disable_clocks(iommu_drvdata);
 599fail:
 600        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 601        return ret;
 602}
 603
 604static bool msm_iommu_capable(enum iommu_cap cap)
 605{
 606        return false;
 607}
 608
 609static void print_ctx_regs(void __iomem *base, int ctx)
 610{
 611        unsigned int fsr = GET_FSR(base, ctx);
 612        pr_err("FAR    = %08x    PAR    = %08x\n",
 613               GET_FAR(base, ctx), GET_PAR(base, ctx));
 614        pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
 615                        (fsr & 0x02) ? "TF " : "",
 616                        (fsr & 0x04) ? "AFF " : "",
 617                        (fsr & 0x08) ? "APF " : "",
 618                        (fsr & 0x10) ? "TLBMF " : "",
 619                        (fsr & 0x20) ? "HTWDEEF " : "",
 620                        (fsr & 0x40) ? "HTWSEEF " : "",
 621                        (fsr & 0x80) ? "MHF " : "",
 622                        (fsr & 0x10000) ? "SL " : "",
 623                        (fsr & 0x40000000) ? "SS " : "",
 624                        (fsr & 0x80000000) ? "MULTI " : "");
 625
 626        pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
 627               GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
 628        pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
 629               GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
 630        pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
 631               GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
 632        pr_err("PRRR   = %08x    NMRR   = %08x\n",
 633               GET_PRRR(base, ctx), GET_NMRR(base, ctx));
 634}
 635
 636irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
 637{
 638        struct msm_iommu_drvdata *drvdata = dev_id;
 639        void __iomem *base;
 640        unsigned int fsr;
 641        int i, ret;
 642
 643        spin_lock(&msm_iommu_lock);
 644
 645        if (!drvdata) {
 646                pr_err("Invalid device ID in context interrupt handler\n");
 647                goto fail;
 648        }
 649
 650        base = drvdata->base;
 651
 652        pr_err("Unexpected IOMMU page fault!\n");
 653        pr_err("base = %08x\n", (unsigned int) base);
 654
 655        ret = __enable_clocks(drvdata);
 656        if (ret)
 657                goto fail;
 658
 659        for (i = 0; i < drvdata->ncb; i++) {
 660                fsr = GET_FSR(base, i);
 661                if (fsr) {
 662                        pr_err("Fault occurred in context %d.\n", i);
 663                        pr_err("Interesting registers:\n");
 664                        print_ctx_regs(base, i);
 665                        SET_FSR(base, i, 0x4000000F);
 666                }
 667        }
 668        __disable_clocks(drvdata);
 669fail:
 670        spin_unlock(&msm_iommu_lock);
 671        return 0;
 672}
 673
 674static const struct iommu_ops msm_iommu_ops = {
 675        .capable = msm_iommu_capable,
 676        .domain_alloc = msm_iommu_domain_alloc,
 677        .domain_free = msm_iommu_domain_free,
 678        .attach_dev = msm_iommu_attach_dev,
 679        .detach_dev = msm_iommu_detach_dev,
 680        .map = msm_iommu_map,
 681        .unmap = msm_iommu_unmap,
 682        .map_sg = default_iommu_map_sg,
 683        .iova_to_phys = msm_iommu_iova_to_phys,
 684        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
 685};
 686
 687static int __init get_tex_class(int icp, int ocp, int mt, int nos)
 688{
 689        int i = 0;
 690        unsigned int prrr = 0;
 691        unsigned int nmrr = 0;
 692        int c_icp, c_ocp, c_mt, c_nos;
 693
 694        RCP15_PRRR(prrr);
 695        RCP15_NMRR(nmrr);
 696
 697        for (i = 0; i < NUM_TEX_CLASS; i++) {
 698                c_nos = PRRR_NOS(prrr, i);
 699                c_mt = PRRR_MT(prrr, i);
 700                c_icp = NMRR_ICP(nmrr, i);
 701                c_ocp = NMRR_OCP(nmrr, i);
 702
 703                if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
 704                        return i;
 705        }
 706
 707        return -ENODEV;
 708}
 709
 710static void __init setup_iommu_tex_classes(void)
 711{
 712        msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
 713                        get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
 714
 715        msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
 716                        get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
 717
 718        msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
 719                        get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
 720
 721        msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
 722                        get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
 723}
 724
 725static int __init msm_iommu_init(void)
 726{
 727        setup_iommu_tex_classes();
 728        bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
 729        return 0;
 730}
 731
 732subsys_initcall(msm_iommu_init);
 733
 734MODULE_LICENSE("GPL v2");
 735MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
 736