linux/drivers/iommu/exynos-iommu.c
<<
>>
Prefs
   1/* linux/drivers/iommu/exynos_iommu.c
   2 *
   3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   4 *              http://www.samsung.com
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
  12#define DEBUG
  13#endif
  14
  15#include <linux/io.h>
  16#include <linux/interrupt.h>
  17#include <linux/platform_device.h>
  18#include <linux/slab.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/clk.h>
  21#include <linux/err.h>
  22#include <linux/mm.h>
  23#include <linux/iommu.h>
  24#include <linux/errno.h>
  25#include <linux/list.h>
  26#include <linux/memblock.h>
  27#include <linux/export.h>
  28
  29#include <asm/cacheflush.h>
  30#include <asm/pgtable.h>
  31
  32#include <mach/sysmmu.h>
  33
  34/* We does not consider super section mapping (16MB) */
  35#define SECT_ORDER 20
  36#define LPAGE_ORDER 16
  37#define SPAGE_ORDER 12
  38
  39#define SECT_SIZE (1 << SECT_ORDER)
  40#define LPAGE_SIZE (1 << LPAGE_ORDER)
  41#define SPAGE_SIZE (1 << SPAGE_ORDER)
  42
  43#define SECT_MASK (~(SECT_SIZE - 1))
  44#define LPAGE_MASK (~(LPAGE_SIZE - 1))
  45#define SPAGE_MASK (~(SPAGE_SIZE - 1))
  46
  47#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
  48#define lv1ent_page(sent) ((*(sent) & 3) == 1)
  49#define lv1ent_section(sent) ((*(sent) & 3) == 2)
  50
  51#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
  52#define lv2ent_small(pent) ((*(pent) & 2) == 2)
  53#define lv2ent_large(pent) ((*(pent) & 3) == 1)
  54
  55#define section_phys(sent) (*(sent) & SECT_MASK)
  56#define section_offs(iova) ((iova) & 0xFFFFF)
  57#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
  58#define lpage_offs(iova) ((iova) & 0xFFFF)
  59#define spage_phys(pent) (*(pent) & SPAGE_MASK)
  60#define spage_offs(iova) ((iova) & 0xFFF)
  61
  62#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
  63#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
  64
  65#define NUM_LV1ENTRIES 4096
  66#define NUM_LV2ENTRIES 256
  67
  68#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
  69
  70#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
  71
  72#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
  73
  74#define mk_lv1ent_sect(pa) ((pa) | 2)
  75#define mk_lv1ent_page(pa) ((pa) | 1)
  76#define mk_lv2ent_lpage(pa) ((pa) | 1)
  77#define mk_lv2ent_spage(pa) ((pa) | 2)
  78
  79#define CTRL_ENABLE     0x5
  80#define CTRL_BLOCK      0x7
  81#define CTRL_DISABLE    0x0
  82
  83#define REG_MMU_CTRL            0x000
  84#define REG_MMU_CFG             0x004
  85#define REG_MMU_STATUS          0x008
  86#define REG_MMU_FLUSH           0x00C
  87#define REG_MMU_FLUSH_ENTRY     0x010
  88#define REG_PT_BASE_ADDR        0x014
  89#define REG_INT_STATUS          0x018
  90#define REG_INT_CLEAR           0x01C
  91
  92#define REG_PAGE_FAULT_ADDR     0x024
  93#define REG_AW_FAULT_ADDR       0x028
  94#define REG_AR_FAULT_ADDR       0x02C
  95#define REG_DEFAULT_SLAVE_ADDR  0x030
  96
  97#define REG_MMU_VERSION         0x034
  98
  99#define REG_PB0_SADDR           0x04C
 100#define REG_PB0_EADDR           0x050
 101#define REG_PB1_SADDR           0x054
 102#define REG_PB1_EADDR           0x058
 103
 104static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
 105{
 106        return pgtable + lv1ent_offset(iova);
 107}
 108
 109static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
 110{
 111        return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
 112}
 113
 114enum exynos_sysmmu_inttype {
 115        SYSMMU_PAGEFAULT,
 116        SYSMMU_AR_MULTIHIT,
 117        SYSMMU_AW_MULTIHIT,
 118        SYSMMU_BUSERROR,
 119        SYSMMU_AR_SECURITY,
 120        SYSMMU_AR_ACCESS,
 121        SYSMMU_AW_SECURITY,
 122        SYSMMU_AW_PROTECTION, /* 7 */
 123        SYSMMU_FAULT_UNKNOWN,
 124        SYSMMU_FAULTS_NUM
 125};
 126
 127/*
 128 * @itype: type of fault.
 129 * @pgtable_base: the physical address of page table base. This is 0 if @itype
 130 *                is SYSMMU_BUSERROR.
 131 * @fault_addr: the device (virtual) address that the System MMU tried to
 132 *             translated. This is 0 if @itype is SYSMMU_BUSERROR.
 133 */
 134typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
 135                        unsigned long pgtable_base, unsigned long fault_addr);
 136
 137static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
 138        REG_PAGE_FAULT_ADDR,
 139        REG_AR_FAULT_ADDR,
 140        REG_AW_FAULT_ADDR,
 141        REG_DEFAULT_SLAVE_ADDR,
 142        REG_AR_FAULT_ADDR,
 143        REG_AR_FAULT_ADDR,
 144        REG_AW_FAULT_ADDR,
 145        REG_AW_FAULT_ADDR
 146};
 147
 148static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
 149        "PAGE FAULT",
 150        "AR MULTI-HIT FAULT",
 151        "AW MULTI-HIT FAULT",
 152        "BUS ERROR",
 153        "AR SECURITY PROTECTION FAULT",
 154        "AR ACCESS PROTECTION FAULT",
 155        "AW SECURITY PROTECTION FAULT",
 156        "AW ACCESS PROTECTION FAULT",
 157        "UNKNOWN FAULT"
 158};
 159
 160struct exynos_iommu_domain {
 161        struct list_head clients; /* list of sysmmu_drvdata.node */
 162        unsigned long *pgtable; /* lv1 page table, 16KB */
 163        short *lv2entcnt; /* free lv2 entry counter for each section */
 164        spinlock_t lock; /* lock for this structure */
 165        spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
 166};
 167
 168struct sysmmu_drvdata {
 169        struct list_head node; /* entry of exynos_iommu_domain.clients */
 170        struct device *sysmmu;  /* System MMU's device descriptor */
 171        struct device *dev;     /* Owner of system MMU */
 172        char *dbgname;
 173        int nsfrs;
 174        void __iomem **sfrbases;
 175        struct clk *clk[2];
 176        int activations;
 177        rwlock_t lock;
 178        struct iommu_domain *domain;
 179        sysmmu_fault_handler_t fault_handler;
 180        unsigned long pgtable;
 181};
 182
 183static bool set_sysmmu_active(struct sysmmu_drvdata *data)
 184{
 185        /* return true if the System MMU was not active previously
 186           and it needs to be initialized */
 187        return ++data->activations == 1;
 188}
 189
 190static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
 191{
 192        /* return true if the System MMU is needed to be disabled */
 193        BUG_ON(data->activations < 1);
 194        return --data->activations == 0;
 195}
 196
 197static bool is_sysmmu_active(struct sysmmu_drvdata *data)
 198{
 199        return data->activations > 0;
 200}
 201
 202static void sysmmu_unblock(void __iomem *sfrbase)
 203{
 204        __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
 205}
 206
 207static bool sysmmu_block(void __iomem *sfrbase)
 208{
 209        int i = 120;
 210
 211        __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
 212        while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
 213                --i;
 214
 215        if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
 216                sysmmu_unblock(sfrbase);
 217                return false;
 218        }
 219
 220        return true;
 221}
 222
 223static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
 224{
 225        __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
 226}
 227
 228static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
 229                                                unsigned long iova)
 230{
 231        __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
 232}
 233
 234static void __sysmmu_set_ptbase(void __iomem *sfrbase,
 235                                       unsigned long pgd)
 236{
 237        __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
 238        __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
 239
 240        __sysmmu_tlb_invalidate(sfrbase);
 241}
 242
 243static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
 244                                                unsigned long size, int idx)
 245{
 246        __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
 247        __raw_writel(size - 1 + base,  sfrbase + REG_PB0_EADDR + idx * 8);
 248}
 249
 250void exynos_sysmmu_set_prefbuf(struct device *dev,
 251                                unsigned long base0, unsigned long size0,
 252                                unsigned long base1, unsigned long size1)
 253{
 254        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 255        unsigned long flags;
 256        int i;
 257
 258        BUG_ON((base0 + size0) <= base0);
 259        BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
 260
 261        read_lock_irqsave(&data->lock, flags);
 262        if (!is_sysmmu_active(data))
 263                goto finish;
 264
 265        for (i = 0; i < data->nsfrs; i++) {
 266                if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
 267                        if (!sysmmu_block(data->sfrbases[i]))
 268                                continue;
 269
 270                        if (size1 == 0) {
 271                                if (size0 <= SZ_128K) {
 272                                        base1 = base0;
 273                                        size1 = size0;
 274                                } else {
 275                                        size1 = size0 -
 276                                                ALIGN(size0 / 2, SZ_64K);
 277                                        size0 = size0 - size1;
 278                                        base1 = base0 + size0;
 279                                }
 280                        }
 281
 282                        __sysmmu_set_prefbuf(
 283                                        data->sfrbases[i], base0, size0, 0);
 284                        __sysmmu_set_prefbuf(
 285                                        data->sfrbases[i], base1, size1, 1);
 286
 287                        sysmmu_unblock(data->sfrbases[i]);
 288                }
 289        }
 290finish:
 291        read_unlock_irqrestore(&data->lock, flags);
 292}
 293
 294static void __set_fault_handler(struct sysmmu_drvdata *data,
 295                                        sysmmu_fault_handler_t handler)
 296{
 297        unsigned long flags;
 298
 299        write_lock_irqsave(&data->lock, flags);
 300        data->fault_handler = handler;
 301        write_unlock_irqrestore(&data->lock, flags);
 302}
 303
 304void exynos_sysmmu_set_fault_handler(struct device *dev,
 305                                        sysmmu_fault_handler_t handler)
 306{
 307        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 308
 309        __set_fault_handler(data, handler);
 310}
 311
 312static int default_fault_handler(enum exynos_sysmmu_inttype itype,
 313                     unsigned long pgtable_base, unsigned long fault_addr)
 314{
 315        unsigned long *ent;
 316
 317        if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
 318                itype = SYSMMU_FAULT_UNKNOWN;
 319
 320        pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
 321                        sysmmu_fault_name[itype], fault_addr, pgtable_base);
 322
 323        ent = section_entry(__va(pgtable_base), fault_addr);
 324        pr_err("\tLv1 entry: 0x%lx\n", *ent);
 325
 326        if (lv1ent_page(ent)) {
 327                ent = page_entry(ent, fault_addr);
 328                pr_err("\t Lv2 entry: 0x%lx\n", *ent);
 329        }
 330
 331        pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
 332
 333        BUG();
 334
 335        return 0;
 336}
 337
 338static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 339{
 340        /* SYSMMU is in blocked when interrupt occurred. */
 341        struct sysmmu_drvdata *data = dev_id;
 342        struct resource *irqres;
 343        struct platform_device *pdev;
 344        enum exynos_sysmmu_inttype itype;
 345        unsigned long addr = -1;
 346
 347        int i, ret = -ENOSYS;
 348
 349        read_lock(&data->lock);
 350
 351        WARN_ON(!is_sysmmu_active(data));
 352
 353        pdev = to_platform_device(data->sysmmu);
 354        for (i = 0; i < (pdev->num_resources / 2); i++) {
 355                irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
 356                if (irqres && ((int)irqres->start == irq))
 357                        break;
 358        }
 359
 360        if (i == pdev->num_resources) {
 361                itype = SYSMMU_FAULT_UNKNOWN;
 362        } else {
 363                itype = (enum exynos_sysmmu_inttype)
 364                        __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
 365                if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
 366                        itype = SYSMMU_FAULT_UNKNOWN;
 367                else
 368                        addr = __raw_readl(
 369                                data->sfrbases[i] + fault_reg_offset[itype]);
 370        }
 371
 372        if (data->domain)
 373                ret = report_iommu_fault(data->domain, data->dev,
 374                                addr, itype);
 375
 376        if ((ret == -ENOSYS) && data->fault_handler) {
 377                unsigned long base = data->pgtable;
 378                if (itype != SYSMMU_FAULT_UNKNOWN)
 379                        base = __raw_readl(
 380                                        data->sfrbases[i] + REG_PT_BASE_ADDR);
 381                ret = data->fault_handler(itype, base, addr);
 382        }
 383
 384        if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
 385                __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
 386        else
 387                dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
 388                                data->dbgname, sysmmu_fault_name[itype]);
 389
 390        if (itype != SYSMMU_FAULT_UNKNOWN)
 391                sysmmu_unblock(data->sfrbases[i]);
 392
 393        read_unlock(&data->lock);
 394
 395        return IRQ_HANDLED;
 396}
 397
 398static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
 399{
 400        unsigned long flags;
 401        bool disabled = false;
 402        int i;
 403
 404        write_lock_irqsave(&data->lock, flags);
 405
 406        if (!set_sysmmu_inactive(data))
 407                goto finish;
 408
 409        for (i = 0; i < data->nsfrs; i++)
 410                __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
 411
 412        if (data->clk[1])
 413                clk_disable(data->clk[1]);
 414        if (data->clk[0])
 415                clk_disable(data->clk[0]);
 416
 417        disabled = true;
 418        data->pgtable = 0;
 419        data->domain = NULL;
 420finish:
 421        write_unlock_irqrestore(&data->lock, flags);
 422
 423        if (disabled)
 424                dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
 425        else
 426                dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
 427                                        data->dbgname, data->activations);
 428
 429        return disabled;
 430}
 431
 432/* __exynos_sysmmu_enable: Enables System MMU
 433 *
 434 * returns -error if an error occurred and System MMU is not enabled,
 435 * 0 if the System MMU has been just enabled and 1 if System MMU was already
 436 * enabled before.
 437 */
 438static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
 439                        unsigned long pgtable, struct iommu_domain *domain)
 440{
 441        int i, ret = 0;
 442        unsigned long flags;
 443
 444        write_lock_irqsave(&data->lock, flags);
 445
 446        if (!set_sysmmu_active(data)) {
 447                if (WARN_ON(pgtable != data->pgtable)) {
 448                        ret = -EBUSY;
 449                        set_sysmmu_inactive(data);
 450                } else {
 451                        ret = 1;
 452                }
 453
 454                dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
 455                goto finish;
 456        }
 457
 458        if (data->clk[0])
 459                clk_enable(data->clk[0]);
 460        if (data->clk[1])
 461                clk_enable(data->clk[1]);
 462
 463        data->pgtable = pgtable;
 464
 465        for (i = 0; i < data->nsfrs; i++) {
 466                __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
 467
 468                if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
 469                        /* System MMU version is 3.x */
 470                        __raw_writel((1 << 12) | (2 << 28),
 471                                        data->sfrbases[i] + REG_MMU_CFG);
 472                        __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
 473                        __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
 474                }
 475
 476                __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
 477        }
 478
 479        data->domain = domain;
 480
 481        dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
 482finish:
 483        write_unlock_irqrestore(&data->lock, flags);
 484
 485        return ret;
 486}
 487
 488int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
 489{
 490        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 491        int ret;
 492
 493        BUG_ON(!memblock_is_memory(pgtable));
 494
 495        ret = pm_runtime_get_sync(data->sysmmu);
 496        if (ret < 0) {
 497                dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
 498                return ret;
 499        }
 500
 501        ret = __exynos_sysmmu_enable(data, pgtable, NULL);
 502        if (WARN_ON(ret < 0)) {
 503                pm_runtime_put(data->sysmmu);
 504                dev_err(data->sysmmu,
 505                        "(%s) Already enabled with page table %#lx\n",
 506                        data->dbgname, data->pgtable);
 507        } else {
 508                data->dev = dev;
 509        }
 510
 511        return ret;
 512}
 513
 514static bool exynos_sysmmu_disable(struct device *dev)
 515{
 516        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 517        bool disabled;
 518
 519        disabled = __exynos_sysmmu_disable(data);
 520        pm_runtime_put(data->sysmmu);
 521
 522        return disabled;
 523}
 524
 525static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
 526{
 527        unsigned long flags;
 528        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 529
 530        read_lock_irqsave(&data->lock, flags);
 531
 532        if (is_sysmmu_active(data)) {
 533                int i;
 534                for (i = 0; i < data->nsfrs; i++) {
 535                        if (sysmmu_block(data->sfrbases[i])) {
 536                                __sysmmu_tlb_invalidate_entry(
 537                                                data->sfrbases[i], iova);
 538                                sysmmu_unblock(data->sfrbases[i]);
 539                        }
 540                }
 541        } else {
 542                dev_dbg(data->sysmmu,
 543                        "(%s) Disabled. Skipping invalidating TLB.\n",
 544                        data->dbgname);
 545        }
 546
 547        read_unlock_irqrestore(&data->lock, flags);
 548}
 549
 550void exynos_sysmmu_tlb_invalidate(struct device *dev)
 551{
 552        unsigned long flags;
 553        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 554
 555        read_lock_irqsave(&data->lock, flags);
 556
 557        if (is_sysmmu_active(data)) {
 558                int i;
 559                for (i = 0; i < data->nsfrs; i++) {
 560                        if (sysmmu_block(data->sfrbases[i])) {
 561                                __sysmmu_tlb_invalidate(data->sfrbases[i]);
 562                                sysmmu_unblock(data->sfrbases[i]);
 563                        }
 564                }
 565        } else {
 566                dev_dbg(data->sysmmu,
 567                        "(%s) Disabled. Skipping invalidating TLB.\n",
 568                        data->dbgname);
 569        }
 570
 571        read_unlock_irqrestore(&data->lock, flags);
 572}
 573
 574static int exynos_sysmmu_probe(struct platform_device *pdev)
 575{
 576        int i, ret;
 577        struct device *dev;
 578        struct sysmmu_drvdata *data;
 579
 580        dev = &pdev->dev;
 581
 582        data = kzalloc(sizeof(*data), GFP_KERNEL);
 583        if (!data) {
 584                dev_dbg(dev, "Not enough memory\n");
 585                ret = -ENOMEM;
 586                goto err_alloc;
 587        }
 588
 589        ret = dev_set_drvdata(dev, data);
 590        if (ret) {
 591                dev_dbg(dev, "Unabled to initialize driver data\n");
 592                goto err_init;
 593        }
 594
 595        data->nsfrs = pdev->num_resources / 2;
 596        data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
 597                                                                GFP_KERNEL);
 598        if (data->sfrbases == NULL) {
 599                dev_dbg(dev, "Not enough memory\n");
 600                ret = -ENOMEM;
 601                goto err_init;
 602        }
 603
 604        for (i = 0; i < data->nsfrs; i++) {
 605                struct resource *res;
 606                res = platform_get_resource(pdev, IORESOURCE_MEM, i);
 607                if (!res) {
 608                        dev_dbg(dev, "Unable to find IOMEM region\n");
 609                        ret = -ENOENT;
 610                        goto err_res;
 611                }
 612
 613                data->sfrbases[i] = ioremap(res->start, resource_size(res));
 614                if (!data->sfrbases[i]) {
 615                        dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
 616                                                        res->start);
 617                        ret = -ENOENT;
 618                        goto err_res;
 619                }
 620        }
 621
 622        for (i = 0; i < data->nsfrs; i++) {
 623                ret = platform_get_irq(pdev, i);
 624                if (ret <= 0) {
 625                        dev_dbg(dev, "Unable to find IRQ resource\n");
 626                        goto err_irq;
 627                }
 628
 629                ret = request_irq(ret, exynos_sysmmu_irq, 0,
 630                                        dev_name(dev), data);
 631                if (ret) {
 632                        dev_dbg(dev, "Unabled to register interrupt handler\n");
 633                        goto err_irq;
 634                }
 635        }
 636
 637        if (dev_get_platdata(dev)) {
 638                char *deli, *beg;
 639                struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
 640
 641                beg = platdata->clockname;
 642
 643                for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
 644                        /* NOTHING */;
 645
 646                if (*deli == '\0')
 647                        deli = NULL;
 648                else
 649                        *deli = '\0';
 650
 651                data->clk[0] = clk_get(dev, beg);
 652                if (IS_ERR(data->clk[0])) {
 653                        data->clk[0] = NULL;
 654                        dev_dbg(dev, "No clock descriptor registered\n");
 655                }
 656
 657                if (data->clk[0] && deli) {
 658                        *deli = ',';
 659                        data->clk[1] = clk_get(dev, deli + 1);
 660                        if (IS_ERR(data->clk[1]))
 661                                data->clk[1] = NULL;
 662                }
 663
 664                data->dbgname = platdata->dbgname;
 665        }
 666
 667        data->sysmmu = dev;
 668        rwlock_init(&data->lock);
 669        INIT_LIST_HEAD(&data->node);
 670
 671        __set_fault_handler(data, &default_fault_handler);
 672
 673        if (dev->parent)
 674                pm_runtime_enable(dev);
 675
 676        dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
 677        return 0;
 678err_irq:
 679        while (i-- > 0) {
 680                int irq;
 681
 682                irq = platform_get_irq(pdev, i);
 683                free_irq(irq, data);
 684        }
 685err_res:
 686        while (data->nsfrs-- > 0)
 687                iounmap(data->sfrbases[data->nsfrs]);
 688        kfree(data->sfrbases);
 689err_init:
 690        kfree(data);
 691err_alloc:
 692        dev_err(dev, "Failed to initialize\n");
 693        return ret;
 694}
 695
 696static struct platform_driver exynos_sysmmu_driver = {
 697        .probe          = exynos_sysmmu_probe,
 698        .driver         = {
 699                .owner          = THIS_MODULE,
 700                .name           = "exynos-sysmmu",
 701        }
 702};
 703
 704static inline void pgtable_flush(void *vastart, void *vaend)
 705{
 706        dmac_flush_range(vastart, vaend);
 707        outer_flush_range(virt_to_phys(vastart),
 708                                virt_to_phys(vaend));
 709}
 710
 711static int exynos_iommu_domain_init(struct iommu_domain *domain)
 712{
 713        struct exynos_iommu_domain *priv;
 714
 715        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 716        if (!priv)
 717                return -ENOMEM;
 718
 719        priv->pgtable = (unsigned long *)__get_free_pages(
 720                                                GFP_KERNEL | __GFP_ZERO, 2);
 721        if (!priv->pgtable)
 722                goto err_pgtable;
 723
 724        priv->lv2entcnt = (short *)__get_free_pages(
 725                                                GFP_KERNEL | __GFP_ZERO, 1);
 726        if (!priv->lv2entcnt)
 727                goto err_counter;
 728
 729        pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
 730
 731        spin_lock_init(&priv->lock);
 732        spin_lock_init(&priv->pgtablelock);
 733        INIT_LIST_HEAD(&priv->clients);
 734
 735        domain->geometry.aperture_start = 0;
 736        domain->geometry.aperture_end   = ~0UL;
 737        domain->geometry.force_aperture = true;
 738
 739        domain->priv = priv;
 740        return 0;
 741
 742err_counter:
 743        free_pages((unsigned long)priv->pgtable, 2);
 744err_pgtable:
 745        kfree(priv);
 746        return -ENOMEM;
 747}
 748
 749static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
 750{
 751        struct exynos_iommu_domain *priv = domain->priv;
 752        struct sysmmu_drvdata *data;
 753        unsigned long flags;
 754        int i;
 755
 756        WARN_ON(!list_empty(&priv->clients));
 757
 758        spin_lock_irqsave(&priv->lock, flags);
 759
 760        list_for_each_entry(data, &priv->clients, node) {
 761                while (!exynos_sysmmu_disable(data->dev))
 762                        ; /* until System MMU is actually disabled */
 763        }
 764
 765        spin_unlock_irqrestore(&priv->lock, flags);
 766
 767        for (i = 0; i < NUM_LV1ENTRIES; i++)
 768                if (lv1ent_page(priv->pgtable + i))
 769                        kfree(__va(lv2table_base(priv->pgtable + i)));
 770
 771        free_pages((unsigned long)priv->pgtable, 2);
 772        free_pages((unsigned long)priv->lv2entcnt, 1);
 773        kfree(domain->priv);
 774        domain->priv = NULL;
 775}
 776
 777static int exynos_iommu_attach_device(struct iommu_domain *domain,
 778                                   struct device *dev)
 779{
 780        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 781        struct exynos_iommu_domain *priv = domain->priv;
 782        unsigned long flags;
 783        int ret;
 784
 785        ret = pm_runtime_get_sync(data->sysmmu);
 786        if (ret < 0)
 787                return ret;
 788
 789        ret = 0;
 790
 791        spin_lock_irqsave(&priv->lock, flags);
 792
 793        ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
 794
 795        if (ret == 0) {
 796                /* 'data->node' must not be appeared in priv->clients */
 797                BUG_ON(!list_empty(&data->node));
 798                data->dev = dev;
 799                list_add_tail(&data->node, &priv->clients);
 800        }
 801
 802        spin_unlock_irqrestore(&priv->lock, flags);
 803
 804        if (ret < 0) {
 805                dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
 806                                __func__, __pa(priv->pgtable));
 807                pm_runtime_put(data->sysmmu);
 808        } else if (ret > 0) {
 809                dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
 810                                        __func__, __pa(priv->pgtable));
 811        } else {
 812                dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
 813                                        __func__, __pa(priv->pgtable));
 814        }
 815
 816        return ret;
 817}
 818
 819static void exynos_iommu_detach_device(struct iommu_domain *domain,
 820                                    struct device *dev)
 821{
 822        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 823        struct exynos_iommu_domain *priv = domain->priv;
 824        struct list_head *pos;
 825        unsigned long flags;
 826        bool found = false;
 827
 828        spin_lock_irqsave(&priv->lock, flags);
 829
 830        list_for_each(pos, &priv->clients) {
 831                if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
 832                        found = true;
 833                        break;
 834                }
 835        }
 836
 837        if (!found)
 838                goto finish;
 839
 840        if (__exynos_sysmmu_disable(data)) {
 841                dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
 842                                        __func__, __pa(priv->pgtable));
 843                list_del_init(&data->node);
 844
 845        } else {
 846                dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
 847                                        __func__, __pa(priv->pgtable));
 848        }
 849
 850finish:
 851        spin_unlock_irqrestore(&priv->lock, flags);
 852
 853        if (found)
 854                pm_runtime_put(data->sysmmu);
 855}
 856
 857static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
 858                                        short *pgcounter)
 859{
 860        if (lv1ent_fault(sent)) {
 861                unsigned long *pent;
 862
 863                pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
 864                BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
 865                if (!pent)
 866                        return NULL;
 867
 868                *sent = mk_lv1ent_page(__pa(pent));
 869                *pgcounter = NUM_LV2ENTRIES;
 870                pgtable_flush(pent, pent + NUM_LV2ENTRIES);
 871                pgtable_flush(sent, sent + 1);
 872        }
 873
 874        return page_entry(sent, iova);
 875}
 876
 877static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
 878{
 879        if (lv1ent_section(sent))
 880                return -EADDRINUSE;
 881
 882        if (lv1ent_page(sent)) {
 883                if (*pgcnt != NUM_LV2ENTRIES)
 884                        return -EADDRINUSE;
 885
 886                kfree(page_entry(sent, 0));
 887
 888                *pgcnt = 0;
 889        }
 890
 891        *sent = mk_lv1ent_sect(paddr);
 892
 893        pgtable_flush(sent, sent + 1);
 894
 895        return 0;
 896}
 897
 898static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
 899                                                                short *pgcnt)
 900{
 901        if (size == SPAGE_SIZE) {
 902                if (!lv2ent_fault(pent))
 903                        return -EADDRINUSE;
 904
 905                *pent = mk_lv2ent_spage(paddr);
 906                pgtable_flush(pent, pent + 1);
 907                *pgcnt -= 1;
 908        } else { /* size == LPAGE_SIZE */
 909                int i;
 910                for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
 911                        if (!lv2ent_fault(pent)) {
 912                                memset(pent, 0, sizeof(*pent) * i);
 913                                return -EADDRINUSE;
 914                        }
 915
 916                        *pent = mk_lv2ent_lpage(paddr);
 917                }
 918                pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
 919                *pgcnt -= SPAGES_PER_LPAGE;
 920        }
 921
 922        return 0;
 923}
 924
 925static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
 926                         phys_addr_t paddr, size_t size, int prot)
 927{
 928        struct exynos_iommu_domain *priv = domain->priv;
 929        unsigned long *entry;
 930        unsigned long flags;
 931        int ret = -ENOMEM;
 932
 933        BUG_ON(priv->pgtable == NULL);
 934
 935        spin_lock_irqsave(&priv->pgtablelock, flags);
 936
 937        entry = section_entry(priv->pgtable, iova);
 938
 939        if (size == SECT_SIZE) {
 940                ret = lv1set_section(entry, paddr,
 941                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
 942        } else {
 943                unsigned long *pent;
 944
 945                pent = alloc_lv2entry(entry, iova,
 946                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
 947
 948                if (!pent)
 949                        ret = -ENOMEM;
 950                else
 951                        ret = lv2set_page(pent, paddr, size,
 952                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
 953        }
 954
 955        if (ret) {
 956                pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
 957                                                        __func__, iova, size);
 958        }
 959
 960        spin_unlock_irqrestore(&priv->pgtablelock, flags);
 961
 962        return ret;
 963}
 964
 965static size_t exynos_iommu_unmap(struct iommu_domain *domain,
 966                                               unsigned long iova, size_t size)
 967{
 968        struct exynos_iommu_domain *priv = domain->priv;
 969        struct sysmmu_drvdata *data;
 970        unsigned long flags;
 971        unsigned long *ent;
 972
 973        BUG_ON(priv->pgtable == NULL);
 974
 975        spin_lock_irqsave(&priv->pgtablelock, flags);
 976
 977        ent = section_entry(priv->pgtable, iova);
 978
 979        if (lv1ent_section(ent)) {
 980                BUG_ON(size < SECT_SIZE);
 981
 982                *ent = 0;
 983                pgtable_flush(ent, ent + 1);
 984                size = SECT_SIZE;
 985                goto done;
 986        }
 987
 988        if (unlikely(lv1ent_fault(ent))) {
 989                if (size > SECT_SIZE)
 990                        size = SECT_SIZE;
 991                goto done;
 992        }
 993
 994        /* lv1ent_page(sent) == true here */
 995
 996        ent = page_entry(ent, iova);
 997
 998        if (unlikely(lv2ent_fault(ent))) {
 999                size = SPAGE_SIZE;
1000                goto done;
1001        }
1002
1003        if (lv2ent_small(ent)) {
1004                *ent = 0;
1005                size = SPAGE_SIZE;
1006                priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1007                goto done;
1008        }
1009
1010        /* lv1ent_large(ent) == true here */
1011        BUG_ON(size < LPAGE_SIZE);
1012
1013        memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1014
1015        size = LPAGE_SIZE;
1016        priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1017done:
1018        spin_unlock_irqrestore(&priv->pgtablelock, flags);
1019
1020        spin_lock_irqsave(&priv->lock, flags);
1021        list_for_each_entry(data, &priv->clients, node)
1022                sysmmu_tlb_invalidate_entry(data->dev, iova);
1023        spin_unlock_irqrestore(&priv->lock, flags);
1024
1025
1026        return size;
1027}
1028
1029static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
1030                                          dma_addr_t iova)
1031{
1032        struct exynos_iommu_domain *priv = domain->priv;
1033        unsigned long *entry;
1034        unsigned long flags;
1035        phys_addr_t phys = 0;
1036
1037        spin_lock_irqsave(&priv->pgtablelock, flags);
1038
1039        entry = section_entry(priv->pgtable, iova);
1040
1041        if (lv1ent_section(entry)) {
1042                phys = section_phys(entry) + section_offs(iova);
1043        } else if (lv1ent_page(entry)) {
1044                entry = page_entry(entry, iova);
1045
1046                if (lv2ent_large(entry))
1047                        phys = lpage_phys(entry) + lpage_offs(iova);
1048                else if (lv2ent_small(entry))
1049                        phys = spage_phys(entry) + spage_offs(iova);
1050        }
1051
1052        spin_unlock_irqrestore(&priv->pgtablelock, flags);
1053
1054        return phys;
1055}
1056
1057static struct iommu_ops exynos_iommu_ops = {
1058        .domain_init = &exynos_iommu_domain_init,
1059        .domain_destroy = &exynos_iommu_domain_destroy,
1060        .attach_dev = &exynos_iommu_attach_device,
1061        .detach_dev = &exynos_iommu_detach_device,
1062        .map = &exynos_iommu_map,
1063        .unmap = &exynos_iommu_unmap,
1064        .iova_to_phys = &exynos_iommu_iova_to_phys,
1065        .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1066};
1067
1068static int __init exynos_iommu_init(void)
1069{
1070        int ret;
1071
1072        ret = platform_driver_register(&exynos_sysmmu_driver);
1073
1074        if (ret == 0)
1075                bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1076
1077        return ret;
1078}
1079subsys_initcall(exynos_iommu_init);
1080