linux/drivers/iommu/exynos-iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
   4 *              http://www.samsung.com
   5 */
   6
   7#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
   8#define DEBUG
   9#endif
  10
  11#include <linux/clk.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/err.h>
  14#include <linux/io.h>
  15#include <linux/iommu.h>
  16#include <linux/interrupt.h>
  17#include <linux/kmemleak.h>
  18#include <linux/list.h>
  19#include <linux/of.h>
  20#include <linux/of_platform.h>
  21#include <linux/platform_device.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/slab.h>
  24
  25typedef u32 sysmmu_iova_t;
  26typedef u32 sysmmu_pte_t;
  27
  28/* We do not consider super section mapping (16MB) */
  29#define SECT_ORDER 20
  30#define LPAGE_ORDER 16
  31#define SPAGE_ORDER 12
  32
  33#define SECT_SIZE (1 << SECT_ORDER)
  34#define LPAGE_SIZE (1 << LPAGE_ORDER)
  35#define SPAGE_SIZE (1 << SPAGE_ORDER)
  36
  37#define SECT_MASK (~(SECT_SIZE - 1))
  38#define LPAGE_MASK (~(LPAGE_SIZE - 1))
  39#define SPAGE_MASK (~(SPAGE_SIZE - 1))
  40
  41#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
  42                           ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
  43#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
  44#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
  45#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
  46                          ((*(sent) & 3) == 1))
  47#define lv1ent_section(sent) ((*(sent) & 3) == 2)
  48
  49#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
  50#define lv2ent_small(pent) ((*(pent) & 2) == 2)
  51#define lv2ent_large(pent) ((*(pent) & 3) == 1)
  52
  53/*
  54 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
  55 * v5.0 introduced support for 36bit physical address space by shifting
  56 * all page entry values by 4 bits.
  57 * All SYSMMU controllers in the system support the address spaces of the same
  58 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
  59 * value (0 or 4).
  60 */
  61static short PG_ENT_SHIFT = -1;
  62#define SYSMMU_PG_ENT_SHIFT 0
  63#define SYSMMU_V5_PG_ENT_SHIFT 4
  64
  65static const sysmmu_pte_t *LV1_PROT;
  66static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
  67        ((0 << 15) | (0 << 10)), /* no access */
  68        ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
  69        ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
  70        ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
  71};
  72static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
  73        (0 << 4), /* no access */
  74        (1 << 4), /* IOMMU_READ only */
  75        (2 << 4), /* IOMMU_WRITE only */
  76        (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
  77};
  78
  79static const sysmmu_pte_t *LV2_PROT;
  80static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
  81        ((0 << 9) | (0 << 4)), /* no access */
  82        ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
  83        ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
  84        ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
  85};
  86static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
  87        (0 << 2), /* no access */
  88        (1 << 2), /* IOMMU_READ only */
  89        (2 << 2), /* IOMMU_WRITE only */
  90        (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
  91};
  92
  93#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
  94
  95#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
  96#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
  97#define section_offs(iova) (iova & (SECT_SIZE - 1))
  98#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
  99#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
 100#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
 101#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
 102
 103#define NUM_LV1ENTRIES 4096
 104#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
 105
 106static u32 lv1ent_offset(sysmmu_iova_t iova)
 107{
 108        return iova >> SECT_ORDER;
 109}
 110
 111static u32 lv2ent_offset(sysmmu_iova_t iova)
 112{
 113        return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
 114}
 115
 116#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
 117#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
 118
 119#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
 120#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
 121
 122#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
 123#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
 124#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
 125#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
 126
 127#define CTRL_ENABLE     0x5
 128#define CTRL_BLOCK      0x7
 129#define CTRL_DISABLE    0x0
 130
 131#define CFG_LRU         0x1
 132#define CFG_EAP         (1 << 2)
 133#define CFG_QOS(n)      ((n & 0xF) << 7)
 134#define CFG_ACGEN       (1 << 24) /* System MMU 3.3 only */
 135#define CFG_SYSSEL      (1 << 22) /* System MMU 3.2 only */
 136#define CFG_FLPDCACHE   (1 << 20) /* System MMU 3.2+ only */
 137
 138/* common registers */
 139#define REG_MMU_CTRL            0x000
 140#define REG_MMU_CFG             0x004
 141#define REG_MMU_STATUS          0x008
 142#define REG_MMU_VERSION         0x034
 143
 144#define MMU_MAJ_VER(val)        ((val) >> 7)
 145#define MMU_MIN_VER(val)        ((val) & 0x7F)
 146#define MMU_RAW_VER(reg)        (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
 147
 148#define MAKE_MMU_VER(maj, min)  ((((maj) & 0xF) << 7) | ((min) & 0x7F))
 149
 150/* v1.x - v3.x registers */
 151#define REG_MMU_FLUSH           0x00C
 152#define REG_MMU_FLUSH_ENTRY     0x010
 153#define REG_PT_BASE_ADDR        0x014
 154#define REG_INT_STATUS          0x018
 155#define REG_INT_CLEAR           0x01C
 156
 157#define REG_PAGE_FAULT_ADDR     0x024
 158#define REG_AW_FAULT_ADDR       0x028
 159#define REG_AR_FAULT_ADDR       0x02C
 160#define REG_DEFAULT_SLAVE_ADDR  0x030
 161
 162/* v5.x registers */
 163#define REG_V5_PT_BASE_PFN      0x00C
 164#define REG_V5_MMU_FLUSH_ALL    0x010
 165#define REG_V5_MMU_FLUSH_ENTRY  0x014
 166#define REG_V5_MMU_FLUSH_RANGE  0x018
 167#define REG_V5_MMU_FLUSH_START  0x020
 168#define REG_V5_MMU_FLUSH_END    0x024
 169#define REG_V5_INT_STATUS       0x060
 170#define REG_V5_INT_CLEAR        0x064
 171#define REG_V5_FAULT_AR_VA      0x070
 172#define REG_V5_FAULT_AW_VA      0x080
 173
 174#define has_sysmmu(dev)         (dev_iommu_priv_get(dev) != NULL)
 175
 176static struct device *dma_dev;
 177static struct kmem_cache *lv2table_kmem_cache;
 178static sysmmu_pte_t *zero_lv2_table;
 179#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
 180
 181static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
 182{
 183        return pgtable + lv1ent_offset(iova);
 184}
 185
 186static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
 187{
 188        return (sysmmu_pte_t *)phys_to_virt(
 189                                lv2table_base(sent)) + lv2ent_offset(iova);
 190}
 191
 192/*
 193 * IOMMU fault information register
 194 */
 195struct sysmmu_fault_info {
 196        unsigned int bit;       /* bit number in STATUS register */
 197        unsigned short addr_reg; /* register to read VA fault address */
 198        const char *name;       /* human readable fault name */
 199        unsigned int type;      /* fault type for report_iommu_fault */
 200};
 201
 202static const struct sysmmu_fault_info sysmmu_faults[] = {
 203        { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
 204        { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
 205        { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
 206        { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
 207        { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
 208        { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
 209        { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
 210        { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
 211};
 212
 213static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
 214        { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
 215        { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
 216        { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
 217        { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
 218        { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
 219        { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
 220        { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
 221        { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
 222        { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
 223        { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
 224};
 225
 226/*
 227 * This structure is attached to dev->iommu->priv of the master device
 228 * on device add, contains a list of SYSMMU controllers defined by device tree,
 229 * which are bound to given master device. It is usually referenced by 'owner'
 230 * pointer.
 231*/
 232struct exynos_iommu_owner {
 233        struct list_head controllers;   /* list of sysmmu_drvdata.owner_node */
 234        struct iommu_domain *domain;    /* domain this device is attached */
 235        struct mutex rpm_lock;          /* for runtime pm of all sysmmus */
 236};
 237
 238/*
 239 * This structure exynos specific generalization of struct iommu_domain.
 240 * It contains list of SYSMMU controllers from all master devices, which has
 241 * been attached to this domain and page tables of IO address space defined by
 242 * it. It is usually referenced by 'domain' pointer.
 243 */
 244struct exynos_iommu_domain {
 245        struct list_head clients; /* list of sysmmu_drvdata.domain_node */
 246        sysmmu_pte_t *pgtable;  /* lv1 page table, 16KB */
 247        short *lv2entcnt;       /* free lv2 entry counter for each section */
 248        spinlock_t lock;        /* lock for modyfying list of clients */
 249        spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
 250        struct iommu_domain domain; /* generic domain data structure */
 251};
 252
 253/*
 254 * This structure hold all data of a single SYSMMU controller, this includes
 255 * hw resources like registers and clocks, pointers and list nodes to connect
 256 * it to all other structures, internal state and parameters read from device
 257 * tree. It is usually referenced by 'data' pointer.
 258 */
 259struct sysmmu_drvdata {
 260        struct device *sysmmu;          /* SYSMMU controller device */
 261        struct device *master;          /* master device (owner) */
 262        struct device_link *link;       /* runtime PM link to master */
 263        void __iomem *sfrbase;          /* our registers */
 264        struct clk *clk;                /* SYSMMU's clock */
 265        struct clk *aclk;               /* SYSMMU's aclk clock */
 266        struct clk *pclk;               /* SYSMMU's pclk clock */
 267        struct clk *clk_master;         /* master's device clock */
 268        spinlock_t lock;                /* lock for modyfying state */
 269        bool active;                    /* current status */
 270        struct exynos_iommu_domain *domain; /* domain we belong to */
 271        struct list_head domain_node;   /* node for domain clients list */
 272        struct list_head owner_node;    /* node for owner controllers list */
 273        phys_addr_t pgtable;            /* assigned page table structure */
 274        unsigned int version;           /* our version */
 275
 276        struct iommu_device iommu;      /* IOMMU core handle */
 277};
 278
 279static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
 280{
 281        return container_of(dom, struct exynos_iommu_domain, domain);
 282}
 283
 284static void sysmmu_unblock(struct sysmmu_drvdata *data)
 285{
 286        writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
 287}
 288
 289static bool sysmmu_block(struct sysmmu_drvdata *data)
 290{
 291        int i = 120;
 292
 293        writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
 294        while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
 295                --i;
 296
 297        if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
 298                sysmmu_unblock(data);
 299                return false;
 300        }
 301
 302        return true;
 303}
 304
 305static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
 306{
 307        if (MMU_MAJ_VER(data->version) < 5)
 308                writel(0x1, data->sfrbase + REG_MMU_FLUSH);
 309        else
 310                writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
 311}
 312
 313static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
 314                                sysmmu_iova_t iova, unsigned int num_inv)
 315{
 316        unsigned int i;
 317
 318        if (MMU_MAJ_VER(data->version) < 5) {
 319                for (i = 0; i < num_inv; i++) {
 320                        writel((iova & SPAGE_MASK) | 1,
 321                                     data->sfrbase + REG_MMU_FLUSH_ENTRY);
 322                        iova += SPAGE_SIZE;
 323                }
 324        } else {
 325                if (num_inv == 1) {
 326                        writel((iova & SPAGE_MASK) | 1,
 327                                     data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
 328                } else {
 329                        writel((iova & SPAGE_MASK),
 330                                     data->sfrbase + REG_V5_MMU_FLUSH_START);
 331                        writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
 332                                     data->sfrbase + REG_V5_MMU_FLUSH_END);
 333                        writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
 334                }
 335        }
 336}
 337
 338static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
 339{
 340        if (MMU_MAJ_VER(data->version) < 5)
 341                writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
 342        else
 343                writel(pgd >> PAGE_SHIFT,
 344                             data->sfrbase + REG_V5_PT_BASE_PFN);
 345
 346        __sysmmu_tlb_invalidate(data);
 347}
 348
 349static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
 350{
 351        BUG_ON(clk_prepare_enable(data->clk_master));
 352        BUG_ON(clk_prepare_enable(data->clk));
 353        BUG_ON(clk_prepare_enable(data->pclk));
 354        BUG_ON(clk_prepare_enable(data->aclk));
 355}
 356
 357static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
 358{
 359        clk_disable_unprepare(data->aclk);
 360        clk_disable_unprepare(data->pclk);
 361        clk_disable_unprepare(data->clk);
 362        clk_disable_unprepare(data->clk_master);
 363}
 364
 365static void __sysmmu_get_version(struct sysmmu_drvdata *data)
 366{
 367        u32 ver;
 368
 369        __sysmmu_enable_clocks(data);
 370
 371        ver = readl(data->sfrbase + REG_MMU_VERSION);
 372
 373        /* controllers on some SoCs don't report proper version */
 374        if (ver == 0x80000001u)
 375                data->version = MAKE_MMU_VER(1, 0);
 376        else
 377                data->version = MMU_RAW_VER(ver);
 378
 379        dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
 380                MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
 381
 382        __sysmmu_disable_clocks(data);
 383}
 384
 385static void show_fault_information(struct sysmmu_drvdata *data,
 386                                   const struct sysmmu_fault_info *finfo,
 387                                   sysmmu_iova_t fault_addr)
 388{
 389        sysmmu_pte_t *ent;
 390
 391        dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
 392                dev_name(data->master), finfo->name, fault_addr);
 393        dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
 394        ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
 395        dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
 396        if (lv1ent_page(ent)) {
 397                ent = page_entry(ent, fault_addr);
 398                dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
 399        }
 400}
 401
 402static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 403{
 404        /* SYSMMU is in blocked state when interrupt occurred. */
 405        struct sysmmu_drvdata *data = dev_id;
 406        const struct sysmmu_fault_info *finfo;
 407        unsigned int i, n, itype;
 408        sysmmu_iova_t fault_addr;
 409        unsigned short reg_status, reg_clear;
 410        int ret = -ENOSYS;
 411
 412        WARN_ON(!data->active);
 413
 414        if (MMU_MAJ_VER(data->version) < 5) {
 415                reg_status = REG_INT_STATUS;
 416                reg_clear = REG_INT_CLEAR;
 417                finfo = sysmmu_faults;
 418                n = ARRAY_SIZE(sysmmu_faults);
 419        } else {
 420                reg_status = REG_V5_INT_STATUS;
 421                reg_clear = REG_V5_INT_CLEAR;
 422                finfo = sysmmu_v5_faults;
 423                n = ARRAY_SIZE(sysmmu_v5_faults);
 424        }
 425
 426        spin_lock(&data->lock);
 427
 428        clk_enable(data->clk_master);
 429
 430        itype = __ffs(readl(data->sfrbase + reg_status));
 431        for (i = 0; i < n; i++, finfo++)
 432                if (finfo->bit == itype)
 433                        break;
 434        /* unknown/unsupported fault */
 435        BUG_ON(i == n);
 436
 437        /* print debug message */
 438        fault_addr = readl(data->sfrbase + finfo->addr_reg);
 439        show_fault_information(data, finfo, fault_addr);
 440
 441        if (data->domain)
 442                ret = report_iommu_fault(&data->domain->domain,
 443                                        data->master, fault_addr, finfo->type);
 444        /* fault is not recovered by fault handler */
 445        BUG_ON(ret != 0);
 446
 447        writel(1 << itype, data->sfrbase + reg_clear);
 448
 449        sysmmu_unblock(data);
 450
 451        clk_disable(data->clk_master);
 452
 453        spin_unlock(&data->lock);
 454
 455        return IRQ_HANDLED;
 456}
 457
 458static void __sysmmu_disable(struct sysmmu_drvdata *data)
 459{
 460        unsigned long flags;
 461
 462        clk_enable(data->clk_master);
 463
 464        spin_lock_irqsave(&data->lock, flags);
 465        writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
 466        writel(0, data->sfrbase + REG_MMU_CFG);
 467        data->active = false;
 468        spin_unlock_irqrestore(&data->lock, flags);
 469
 470        __sysmmu_disable_clocks(data);
 471}
 472
 473static void __sysmmu_init_config(struct sysmmu_drvdata *data)
 474{
 475        unsigned int cfg;
 476
 477        if (data->version <= MAKE_MMU_VER(3, 1))
 478                cfg = CFG_LRU | CFG_QOS(15);
 479        else if (data->version <= MAKE_MMU_VER(3, 2))
 480                cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
 481        else
 482                cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
 483
 484        cfg |= CFG_EAP; /* enable access protection bits check */
 485
 486        writel(cfg, data->sfrbase + REG_MMU_CFG);
 487}
 488
 489static void __sysmmu_enable(struct sysmmu_drvdata *data)
 490{
 491        unsigned long flags;
 492
 493        __sysmmu_enable_clocks(data);
 494
 495        spin_lock_irqsave(&data->lock, flags);
 496        writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
 497        __sysmmu_init_config(data);
 498        __sysmmu_set_ptbase(data, data->pgtable);
 499        writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
 500        data->active = true;
 501        spin_unlock_irqrestore(&data->lock, flags);
 502
 503        /*
 504         * SYSMMU driver keeps master's clock enabled only for the short
 505         * time, while accessing the registers. For performing address
 506         * translation during DMA transaction it relies on the client
 507         * driver to enable it.
 508         */
 509        clk_disable(data->clk_master);
 510}
 511
 512static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
 513                                            sysmmu_iova_t iova)
 514{
 515        unsigned long flags;
 516
 517        spin_lock_irqsave(&data->lock, flags);
 518        if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
 519                clk_enable(data->clk_master);
 520                if (sysmmu_block(data)) {
 521                        if (data->version >= MAKE_MMU_VER(5, 0))
 522                                __sysmmu_tlb_invalidate(data);
 523                        else
 524                                __sysmmu_tlb_invalidate_entry(data, iova, 1);
 525                        sysmmu_unblock(data);
 526                }
 527                clk_disable(data->clk_master);
 528        }
 529        spin_unlock_irqrestore(&data->lock, flags);
 530}
 531
 532static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
 533                                        sysmmu_iova_t iova, size_t size)
 534{
 535        unsigned long flags;
 536
 537        spin_lock_irqsave(&data->lock, flags);
 538        if (data->active) {
 539                unsigned int num_inv = 1;
 540
 541                clk_enable(data->clk_master);
 542
 543                /*
 544                 * L2TLB invalidation required
 545                 * 4KB page: 1 invalidation
 546                 * 64KB page: 16 invalidations
 547                 * 1MB page: 64 invalidations
 548                 * because it is set-associative TLB
 549                 * with 8-way and 64 sets.
 550                 * 1MB page can be cached in one of all sets.
 551                 * 64KB page can be one of 16 consecutive sets.
 552                 */
 553                if (MMU_MAJ_VER(data->version) == 2)
 554                        num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
 555
 556                if (sysmmu_block(data)) {
 557                        __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
 558                        sysmmu_unblock(data);
 559                }
 560                clk_disable(data->clk_master);
 561        }
 562        spin_unlock_irqrestore(&data->lock, flags);
 563}
 564
 565static const struct iommu_ops exynos_iommu_ops;
 566
 567static int exynos_sysmmu_probe(struct platform_device *pdev)
 568{
 569        int irq, ret;
 570        struct device *dev = &pdev->dev;
 571        struct sysmmu_drvdata *data;
 572        struct resource *res;
 573
 574        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
 575        if (!data)
 576                return -ENOMEM;
 577
 578        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 579        data->sfrbase = devm_ioremap_resource(dev, res);
 580        if (IS_ERR(data->sfrbase))
 581                return PTR_ERR(data->sfrbase);
 582
 583        irq = platform_get_irq(pdev, 0);
 584        if (irq <= 0)
 585                return irq;
 586
 587        ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
 588                                dev_name(dev), data);
 589        if (ret) {
 590                dev_err(dev, "Unabled to register handler of irq %d\n", irq);
 591                return ret;
 592        }
 593
 594        data->clk = devm_clk_get(dev, "sysmmu");
 595        if (PTR_ERR(data->clk) == -ENOENT)
 596                data->clk = NULL;
 597        else if (IS_ERR(data->clk))
 598                return PTR_ERR(data->clk);
 599
 600        data->aclk = devm_clk_get(dev, "aclk");
 601        if (PTR_ERR(data->aclk) == -ENOENT)
 602                data->aclk = NULL;
 603        else if (IS_ERR(data->aclk))
 604                return PTR_ERR(data->aclk);
 605
 606        data->pclk = devm_clk_get(dev, "pclk");
 607        if (PTR_ERR(data->pclk) == -ENOENT)
 608                data->pclk = NULL;
 609        else if (IS_ERR(data->pclk))
 610                return PTR_ERR(data->pclk);
 611
 612        if (!data->clk && (!data->aclk || !data->pclk)) {
 613                dev_err(dev, "Failed to get device clock(s)!\n");
 614                return -ENOSYS;
 615        }
 616
 617        data->clk_master = devm_clk_get(dev, "master");
 618        if (PTR_ERR(data->clk_master) == -ENOENT)
 619                data->clk_master = NULL;
 620        else if (IS_ERR(data->clk_master))
 621                return PTR_ERR(data->clk_master);
 622
 623        data->sysmmu = dev;
 624        spin_lock_init(&data->lock);
 625
 626        ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
 627                                     dev_name(data->sysmmu));
 628        if (ret)
 629                return ret;
 630
 631        ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
 632        if (ret)
 633                return ret;
 634
 635        platform_set_drvdata(pdev, data);
 636
 637        __sysmmu_get_version(data);
 638        if (PG_ENT_SHIFT < 0) {
 639                if (MMU_MAJ_VER(data->version) < 5) {
 640                        PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
 641                        LV1_PROT = SYSMMU_LV1_PROT;
 642                        LV2_PROT = SYSMMU_LV2_PROT;
 643                } else {
 644                        PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
 645                        LV1_PROT = SYSMMU_V5_LV1_PROT;
 646                        LV2_PROT = SYSMMU_V5_LV2_PROT;
 647                }
 648        }
 649
 650        /*
 651         * use the first registered sysmmu device for performing
 652         * dma mapping operations on iommu page tables (cpu cache flush)
 653         */
 654        if (!dma_dev)
 655                dma_dev = &pdev->dev;
 656
 657        pm_runtime_enable(dev);
 658
 659        return 0;
 660}
 661
 662static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
 663{
 664        struct sysmmu_drvdata *data = dev_get_drvdata(dev);
 665        struct device *master = data->master;
 666
 667        if (master) {
 668                struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
 669
 670                mutex_lock(&owner->rpm_lock);
 671                if (data->domain) {
 672                        dev_dbg(data->sysmmu, "saving state\n");
 673                        __sysmmu_disable(data);
 674                }
 675                mutex_unlock(&owner->rpm_lock);
 676        }
 677        return 0;
 678}
 679
 680static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
 681{
 682        struct sysmmu_drvdata *data = dev_get_drvdata(dev);
 683        struct device *master = data->master;
 684
 685        if (master) {
 686                struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
 687
 688                mutex_lock(&owner->rpm_lock);
 689                if (data->domain) {
 690                        dev_dbg(data->sysmmu, "restoring state\n");
 691                        __sysmmu_enable(data);
 692                }
 693                mutex_unlock(&owner->rpm_lock);
 694        }
 695        return 0;
 696}
 697
 698static const struct dev_pm_ops sysmmu_pm_ops = {
 699        SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
 700        SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
 701                                pm_runtime_force_resume)
 702};
 703
 704static const struct of_device_id sysmmu_of_match[] = {
 705        { .compatible   = "samsung,exynos-sysmmu", },
 706        { },
 707};
 708
 709static struct platform_driver exynos_sysmmu_driver __refdata = {
 710        .probe  = exynos_sysmmu_probe,
 711        .driver = {
 712                .name           = "exynos-sysmmu",
 713                .of_match_table = sysmmu_of_match,
 714                .pm             = &sysmmu_pm_ops,
 715                .suppress_bind_attrs = true,
 716        }
 717};
 718
 719static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
 720{
 721        dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
 722                                DMA_TO_DEVICE);
 723        *ent = cpu_to_le32(val);
 724        dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
 725                                   DMA_TO_DEVICE);
 726}
 727
 728static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
 729{
 730        struct exynos_iommu_domain *domain;
 731        dma_addr_t handle;
 732        int i;
 733
 734        /* Check if correct PTE offsets are initialized */
 735        BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
 736
 737        if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
 738                return NULL;
 739
 740        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 741        if (!domain)
 742                return NULL;
 743
 744        domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
 745        if (!domain->pgtable)
 746                goto err_pgtable;
 747
 748        domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
 749        if (!domain->lv2entcnt)
 750                goto err_counter;
 751
 752        /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
 753        for (i = 0; i < NUM_LV1ENTRIES; i++)
 754                domain->pgtable[i] = ZERO_LV2LINK;
 755
 756        handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
 757                                DMA_TO_DEVICE);
 758        /* For mapping page table entries we rely on dma == phys */
 759        BUG_ON(handle != virt_to_phys(domain->pgtable));
 760        if (dma_mapping_error(dma_dev, handle))
 761                goto err_lv2ent;
 762
 763        spin_lock_init(&domain->lock);
 764        spin_lock_init(&domain->pgtablelock);
 765        INIT_LIST_HEAD(&domain->clients);
 766
 767        domain->domain.geometry.aperture_start = 0;
 768        domain->domain.geometry.aperture_end   = ~0UL;
 769        domain->domain.geometry.force_aperture = true;
 770
 771        return &domain->domain;
 772
 773err_lv2ent:
 774        free_pages((unsigned long)domain->lv2entcnt, 1);
 775err_counter:
 776        free_pages((unsigned long)domain->pgtable, 2);
 777err_pgtable:
 778        kfree(domain);
 779        return NULL;
 780}
 781
 782static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
 783{
 784        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
 785        struct sysmmu_drvdata *data, *next;
 786        unsigned long flags;
 787        int i;
 788
 789        WARN_ON(!list_empty(&domain->clients));
 790
 791        spin_lock_irqsave(&domain->lock, flags);
 792
 793        list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
 794                spin_lock(&data->lock);
 795                __sysmmu_disable(data);
 796                data->pgtable = 0;
 797                data->domain = NULL;
 798                list_del_init(&data->domain_node);
 799                spin_unlock(&data->lock);
 800        }
 801
 802        spin_unlock_irqrestore(&domain->lock, flags);
 803
 804        dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
 805                         DMA_TO_DEVICE);
 806
 807        for (i = 0; i < NUM_LV1ENTRIES; i++)
 808                if (lv1ent_page(domain->pgtable + i)) {
 809                        phys_addr_t base = lv2table_base(domain->pgtable + i);
 810
 811                        dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
 812                                         DMA_TO_DEVICE);
 813                        kmem_cache_free(lv2table_kmem_cache,
 814                                        phys_to_virt(base));
 815                }
 816
 817        free_pages((unsigned long)domain->pgtable, 2);
 818        free_pages((unsigned long)domain->lv2entcnt, 1);
 819        kfree(domain);
 820}
 821
 822static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
 823                                    struct device *dev)
 824{
 825        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
 826        struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
 827        phys_addr_t pagetable = virt_to_phys(domain->pgtable);
 828        struct sysmmu_drvdata *data, *next;
 829        unsigned long flags;
 830
 831        if (!has_sysmmu(dev) || owner->domain != iommu_domain)
 832                return;
 833
 834        mutex_lock(&owner->rpm_lock);
 835
 836        list_for_each_entry(data, &owner->controllers, owner_node) {
 837                pm_runtime_get_noresume(data->sysmmu);
 838                if (pm_runtime_active(data->sysmmu))
 839                        __sysmmu_disable(data);
 840                pm_runtime_put(data->sysmmu);
 841        }
 842
 843        spin_lock_irqsave(&domain->lock, flags);
 844        list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
 845                spin_lock(&data->lock);
 846                data->pgtable = 0;
 847                data->domain = NULL;
 848                list_del_init(&data->domain_node);
 849                spin_unlock(&data->lock);
 850        }
 851        owner->domain = NULL;
 852        spin_unlock_irqrestore(&domain->lock, flags);
 853
 854        mutex_unlock(&owner->rpm_lock);
 855
 856        dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
 857                &pagetable);
 858}
 859
 860static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
 861                                   struct device *dev)
 862{
 863        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
 864        struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
 865        struct sysmmu_drvdata *data;
 866        phys_addr_t pagetable = virt_to_phys(domain->pgtable);
 867        unsigned long flags;
 868
 869        if (!has_sysmmu(dev))
 870                return -ENODEV;
 871
 872        if (owner->domain)
 873                exynos_iommu_detach_device(owner->domain, dev);
 874
 875        mutex_lock(&owner->rpm_lock);
 876
 877        spin_lock_irqsave(&domain->lock, flags);
 878        list_for_each_entry(data, &owner->controllers, owner_node) {
 879                spin_lock(&data->lock);
 880                data->pgtable = pagetable;
 881                data->domain = domain;
 882                list_add_tail(&data->domain_node, &domain->clients);
 883                spin_unlock(&data->lock);
 884        }
 885        owner->domain = iommu_domain;
 886        spin_unlock_irqrestore(&domain->lock, flags);
 887
 888        list_for_each_entry(data, &owner->controllers, owner_node) {
 889                pm_runtime_get_noresume(data->sysmmu);
 890                if (pm_runtime_active(data->sysmmu))
 891                        __sysmmu_enable(data);
 892                pm_runtime_put(data->sysmmu);
 893        }
 894
 895        mutex_unlock(&owner->rpm_lock);
 896
 897        dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
 898                &pagetable);
 899
 900        return 0;
 901}
 902
 903static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
 904                sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
 905{
 906        if (lv1ent_section(sent)) {
 907                WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
 908                return ERR_PTR(-EADDRINUSE);
 909        }
 910
 911        if (lv1ent_fault(sent)) {
 912                dma_addr_t handle;
 913                sysmmu_pte_t *pent;
 914                bool need_flush_flpd_cache = lv1ent_zero(sent);
 915
 916                pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
 917                BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
 918                if (!pent)
 919                        return ERR_PTR(-ENOMEM);
 920
 921                exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
 922                kmemleak_ignore(pent);
 923                *pgcounter = NUM_LV2ENTRIES;
 924                handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
 925                                        DMA_TO_DEVICE);
 926                if (dma_mapping_error(dma_dev, handle)) {
 927                        kmem_cache_free(lv2table_kmem_cache, pent);
 928                        return ERR_PTR(-EADDRINUSE);
 929                }
 930
 931                /*
 932                 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
 933                 * FLPD cache may cache the address of zero_l2_table. This
 934                 * function replaces the zero_l2_table with new L2 page table
 935                 * to write valid mappings.
 936                 * Accessing the valid area may cause page fault since FLPD
 937                 * cache may still cache zero_l2_table for the valid area
 938                 * instead of new L2 page table that has the mapping
 939                 * information of the valid area.
 940                 * Thus any replacement of zero_l2_table with other valid L2
 941                 * page table must involve FLPD cache invalidation for System
 942                 * MMU v3.3.
 943                 * FLPD cache invalidation is performed with TLB invalidation
 944                 * by VPN without blocking. It is safe to invalidate TLB without
 945                 * blocking because the target address of TLB invalidation is
 946                 * not currently mapped.
 947                 */
 948                if (need_flush_flpd_cache) {
 949                        struct sysmmu_drvdata *data;
 950
 951                        spin_lock(&domain->lock);
 952                        list_for_each_entry(data, &domain->clients, domain_node)
 953                                sysmmu_tlb_invalidate_flpdcache(data, iova);
 954                        spin_unlock(&domain->lock);
 955                }
 956        }
 957
 958        return page_entry(sent, iova);
 959}
 960
 961static int lv1set_section(struct exynos_iommu_domain *domain,
 962                          sysmmu_pte_t *sent, sysmmu_iova_t iova,
 963                          phys_addr_t paddr, int prot, short *pgcnt)
 964{
 965        if (lv1ent_section(sent)) {
 966                WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
 967                        iova);
 968                return -EADDRINUSE;
 969        }
 970
 971        if (lv1ent_page(sent)) {
 972                if (*pgcnt != NUM_LV2ENTRIES) {
 973                        WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
 974                                iova);
 975                        return -EADDRINUSE;
 976                }
 977
 978                kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
 979                *pgcnt = 0;
 980        }
 981
 982        exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
 983
 984        spin_lock(&domain->lock);
 985        if (lv1ent_page_zero(sent)) {
 986                struct sysmmu_drvdata *data;
 987                /*
 988                 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
 989                 * entry by speculative prefetch of SLPD which has no mapping.
 990                 */
 991                list_for_each_entry(data, &domain->clients, domain_node)
 992                        sysmmu_tlb_invalidate_flpdcache(data, iova);
 993        }
 994        spin_unlock(&domain->lock);
 995
 996        return 0;
 997}
 998
 999static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1000                       int prot, short *pgcnt)
1001{
1002        if (size == SPAGE_SIZE) {
1003                if (WARN_ON(!lv2ent_fault(pent)))
1004                        return -EADDRINUSE;
1005
1006                exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
1007                *pgcnt -= 1;
1008        } else { /* size == LPAGE_SIZE */
1009                int i;
1010                dma_addr_t pent_base = virt_to_phys(pent);
1011
1012                dma_sync_single_for_cpu(dma_dev, pent_base,
1013                                        sizeof(*pent) * SPAGES_PER_LPAGE,
1014                                        DMA_TO_DEVICE);
1015                for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1016                        if (WARN_ON(!lv2ent_fault(pent))) {
1017                                if (i > 0)
1018                                        memset(pent - i, 0, sizeof(*pent) * i);
1019                                return -EADDRINUSE;
1020                        }
1021
1022                        *pent = mk_lv2ent_lpage(paddr, prot);
1023                }
1024                dma_sync_single_for_device(dma_dev, pent_base,
1025                                           sizeof(*pent) * SPAGES_PER_LPAGE,
1026                                           DMA_TO_DEVICE);
1027                *pgcnt -= SPAGES_PER_LPAGE;
1028        }
1029
1030        return 0;
1031}
1032
1033/*
1034 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1035 *
1036 * System MMU v3.x has advanced logic to improve address translation
1037 * performance with caching more page table entries by a page table walk.
1038 * However, the logic has a bug that while caching faulty page table entries,
1039 * System MMU reports page fault if the cached fault entry is hit even though
1040 * the fault entry is updated to a valid entry after the entry is cached.
1041 * To prevent caching faulty page table entries which may be updated to valid
1042 * entries later, the virtual memory manager should care about the workaround
1043 * for the problem. The following describes the workaround.
1044 *
1045 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1046 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1047 *
1048 * Precisely, any start address of I/O virtual region must be aligned with
1049 * the following sizes for System MMU v3.1 and v3.2.
1050 * System MMU v3.1: 128KiB
1051 * System MMU v3.2: 256KiB
1052 *
1053 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1054 * more workarounds.
1055 * - Any two consecutive I/O virtual regions must have a hole of size larger
1056 *   than or equal to 128KiB.
1057 * - Start address of an I/O virtual region must be aligned by 128KiB.
1058 */
1059static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1060                            unsigned long l_iova, phys_addr_t paddr, size_t size,
1061                            int prot, gfp_t gfp)
1062{
1063        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1064        sysmmu_pte_t *entry;
1065        sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1066        unsigned long flags;
1067        int ret = -ENOMEM;
1068
1069        BUG_ON(domain->pgtable == NULL);
1070        prot &= SYSMMU_SUPPORTED_PROT_BITS;
1071
1072        spin_lock_irqsave(&domain->pgtablelock, flags);
1073
1074        entry = section_entry(domain->pgtable, iova);
1075
1076        if (size == SECT_SIZE) {
1077                ret = lv1set_section(domain, entry, iova, paddr, prot,
1078                                     &domain->lv2entcnt[lv1ent_offset(iova)]);
1079        } else {
1080                sysmmu_pte_t *pent;
1081
1082                pent = alloc_lv2entry(domain, entry, iova,
1083                                      &domain->lv2entcnt[lv1ent_offset(iova)]);
1084
1085                if (IS_ERR(pent))
1086                        ret = PTR_ERR(pent);
1087                else
1088                        ret = lv2set_page(pent, paddr, size, prot,
1089                                       &domain->lv2entcnt[lv1ent_offset(iova)]);
1090        }
1091
1092        if (ret)
1093                pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1094                        __func__, ret, size, iova);
1095
1096        spin_unlock_irqrestore(&domain->pgtablelock, flags);
1097
1098        return ret;
1099}
1100
1101static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1102                                              sysmmu_iova_t iova, size_t size)
1103{
1104        struct sysmmu_drvdata *data;
1105        unsigned long flags;
1106
1107        spin_lock_irqsave(&domain->lock, flags);
1108
1109        list_for_each_entry(data, &domain->clients, domain_node)
1110                sysmmu_tlb_invalidate_entry(data, iova, size);
1111
1112        spin_unlock_irqrestore(&domain->lock, flags);
1113}
1114
1115static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1116                                 unsigned long l_iova, size_t size,
1117                                 struct iommu_iotlb_gather *gather)
1118{
1119        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1120        sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1121        sysmmu_pte_t *ent;
1122        size_t err_pgsize;
1123        unsigned long flags;
1124
1125        BUG_ON(domain->pgtable == NULL);
1126
1127        spin_lock_irqsave(&domain->pgtablelock, flags);
1128
1129        ent = section_entry(domain->pgtable, iova);
1130
1131        if (lv1ent_section(ent)) {
1132                if (WARN_ON(size < SECT_SIZE)) {
1133                        err_pgsize = SECT_SIZE;
1134                        goto err;
1135                }
1136
1137                /* workaround for h/w bug in System MMU v3.3 */
1138                exynos_iommu_set_pte(ent, ZERO_LV2LINK);
1139                size = SECT_SIZE;
1140                goto done;
1141        }
1142
1143        if (unlikely(lv1ent_fault(ent))) {
1144                if (size > SECT_SIZE)
1145                        size = SECT_SIZE;
1146                goto done;
1147        }
1148
1149        /* lv1ent_page(sent) == true here */
1150
1151        ent = page_entry(ent, iova);
1152
1153        if (unlikely(lv2ent_fault(ent))) {
1154                size = SPAGE_SIZE;
1155                goto done;
1156        }
1157
1158        if (lv2ent_small(ent)) {
1159                exynos_iommu_set_pte(ent, 0);
1160                size = SPAGE_SIZE;
1161                domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1162                goto done;
1163        }
1164
1165        /* lv1ent_large(ent) == true here */
1166        if (WARN_ON(size < LPAGE_SIZE)) {
1167                err_pgsize = LPAGE_SIZE;
1168                goto err;
1169        }
1170
1171        dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1172                                sizeof(*ent) * SPAGES_PER_LPAGE,
1173                                DMA_TO_DEVICE);
1174        memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1175        dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1176                                   sizeof(*ent) * SPAGES_PER_LPAGE,
1177                                   DMA_TO_DEVICE);
1178        size = LPAGE_SIZE;
1179        domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1180done:
1181        spin_unlock_irqrestore(&domain->pgtablelock, flags);
1182
1183        exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1184
1185        return size;
1186err:
1187        spin_unlock_irqrestore(&domain->pgtablelock, flags);
1188
1189        pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1190                __func__, size, iova, err_pgsize);
1191
1192        return 0;
1193}
1194
1195static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1196                                          dma_addr_t iova)
1197{
1198        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1199        sysmmu_pte_t *entry;
1200        unsigned long flags;
1201        phys_addr_t phys = 0;
1202
1203        spin_lock_irqsave(&domain->pgtablelock, flags);
1204
1205        entry = section_entry(domain->pgtable, iova);
1206
1207        if (lv1ent_section(entry)) {
1208                phys = section_phys(entry) + section_offs(iova);
1209        } else if (lv1ent_page(entry)) {
1210                entry = page_entry(entry, iova);
1211
1212                if (lv2ent_large(entry))
1213                        phys = lpage_phys(entry) + lpage_offs(iova);
1214                else if (lv2ent_small(entry))
1215                        phys = spage_phys(entry) + spage_offs(iova);
1216        }
1217
1218        spin_unlock_irqrestore(&domain->pgtablelock, flags);
1219
1220        return phys;
1221}
1222
1223static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1224{
1225        struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1226        struct sysmmu_drvdata *data;
1227
1228        if (!has_sysmmu(dev))
1229                return ERR_PTR(-ENODEV);
1230
1231        list_for_each_entry(data, &owner->controllers, owner_node) {
1232                /*
1233                 * SYSMMU will be runtime activated via device link
1234                 * (dependency) to its master device, so there are no
1235                 * direct calls to pm_runtime_get/put in this driver.
1236                 */
1237                data->link = device_link_add(dev, data->sysmmu,
1238                                             DL_FLAG_STATELESS |
1239                                             DL_FLAG_PM_RUNTIME);
1240        }
1241
1242        /* There is always at least one entry, see exynos_iommu_of_xlate() */
1243        data = list_first_entry(&owner->controllers,
1244                                struct sysmmu_drvdata, owner_node);
1245
1246        return &data->iommu;
1247}
1248
1249static void exynos_iommu_release_device(struct device *dev)
1250{
1251        struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1252        struct sysmmu_drvdata *data;
1253
1254        if (!has_sysmmu(dev))
1255                return;
1256
1257        if (owner->domain) {
1258                struct iommu_group *group = iommu_group_get(dev);
1259
1260                if (group) {
1261                        WARN_ON(owner->domain !=
1262                                iommu_group_default_domain(group));
1263                        exynos_iommu_detach_device(owner->domain, dev);
1264                        iommu_group_put(group);
1265                }
1266        }
1267
1268        list_for_each_entry(data, &owner->controllers, owner_node)
1269                device_link_del(data->link);
1270}
1271
1272static int exynos_iommu_of_xlate(struct device *dev,
1273                                 struct of_phandle_args *spec)
1274{
1275        struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1276        struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1277        struct sysmmu_drvdata *data, *entry;
1278
1279        if (!sysmmu)
1280                return -ENODEV;
1281
1282        data = platform_get_drvdata(sysmmu);
1283        if (!data) {
1284                put_device(&sysmmu->dev);
1285                return -ENODEV;
1286        }
1287
1288        if (!owner) {
1289                owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1290                if (!owner) {
1291                        put_device(&sysmmu->dev);
1292                        return -ENOMEM;
1293                }
1294
1295                INIT_LIST_HEAD(&owner->controllers);
1296                mutex_init(&owner->rpm_lock);
1297                dev_iommu_priv_set(dev, owner);
1298        }
1299
1300        list_for_each_entry(entry, &owner->controllers, owner_node)
1301                if (entry == data)
1302                        return 0;
1303
1304        list_add_tail(&data->owner_node, &owner->controllers);
1305        data->master = dev;
1306
1307        return 0;
1308}
1309
1310static const struct iommu_ops exynos_iommu_ops = {
1311        .domain_alloc = exynos_iommu_domain_alloc,
1312        .domain_free = exynos_iommu_domain_free,
1313        .attach_dev = exynos_iommu_attach_device,
1314        .detach_dev = exynos_iommu_detach_device,
1315        .map = exynos_iommu_map,
1316        .unmap = exynos_iommu_unmap,
1317        .iova_to_phys = exynos_iommu_iova_to_phys,
1318        .device_group = generic_device_group,
1319        .probe_device = exynos_iommu_probe_device,
1320        .release_device = exynos_iommu_release_device,
1321        .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1322        .of_xlate = exynos_iommu_of_xlate,
1323};
1324
1325static int __init exynos_iommu_init(void)
1326{
1327        struct device_node *np;
1328        int ret;
1329
1330        np = of_find_matching_node(NULL, sysmmu_of_match);
1331        if (!np)
1332                return 0;
1333
1334        of_node_put(np);
1335
1336        lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1337                                LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1338        if (!lv2table_kmem_cache) {
1339                pr_err("%s: Failed to create kmem cache\n", __func__);
1340                return -ENOMEM;
1341        }
1342
1343        ret = platform_driver_register(&exynos_sysmmu_driver);
1344        if (ret) {
1345                pr_err("%s: Failed to register driver\n", __func__);
1346                goto err_reg_driver;
1347        }
1348
1349        zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1350        if (zero_lv2_table == NULL) {
1351                pr_err("%s: Failed to allocate zero level2 page table\n",
1352                        __func__);
1353                ret = -ENOMEM;
1354                goto err_zero_lv2;
1355        }
1356
1357        ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1358        if (ret) {
1359                pr_err("%s: Failed to register exynos-iommu driver.\n",
1360                                                                __func__);
1361                goto err_set_iommu;
1362        }
1363
1364        return 0;
1365err_set_iommu:
1366        kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1367err_zero_lv2:
1368        platform_driver_unregister(&exynos_sysmmu_driver);
1369err_reg_driver:
1370        kmem_cache_destroy(lv2table_kmem_cache);
1371        return ret;
1372}
1373core_initcall(exynos_iommu_init);
1374