linux/drivers/iommu/mtk_iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015-2016 MediaTek Inc.
   4 * Author: Yong Wu <yong.wu@mediatek.com>
   5 */
   6#include <linux/memblock.h>
   7#include <linux/bug.h>
   8#include <linux/clk.h>
   9#include <linux/component.h>
  10#include <linux/device.h>
  11#include <linux/dma-iommu.h>
  12#include <linux/err.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/iommu.h>
  16#include <linux/iopoll.h>
  17#include <linux/list.h>
  18#include <linux/of_address.h>
  19#include <linux/of_iommu.h>
  20#include <linux/of_irq.h>
  21#include <linux/of_platform.h>
  22#include <linux/platform_device.h>
  23#include <linux/slab.h>
  24#include <linux/spinlock.h>
  25#include <asm/barrier.h>
  26#include <soc/mediatek/smi.h>
  27
  28#include "mtk_iommu.h"
  29
  30#define REG_MMU_PT_BASE_ADDR                    0x000
  31#define MMU_PT_ADDR_MASK                        GENMASK(31, 7)
  32
  33#define REG_MMU_INVALIDATE                      0x020
  34#define F_ALL_INVLD                             0x2
  35#define F_MMU_INV_RANGE                         0x1
  36
  37#define REG_MMU_INVLD_START_A                   0x024
  38#define REG_MMU_INVLD_END_A                     0x028
  39
  40#define REG_MMU_INV_SEL                         0x038
  41#define F_INVLD_EN0                             BIT(0)
  42#define F_INVLD_EN1                             BIT(1)
  43
  44#define REG_MMU_STANDARD_AXI_MODE               0x048
  45#define REG_MMU_DCM_DIS                         0x050
  46
  47#define REG_MMU_CTRL_REG                        0x110
  48#define F_MMU_TF_PROT_TO_PROGRAM_ADDR           (2 << 4)
  49#define F_MMU_PREFETCH_RT_REPLACE_MOD           BIT(4)
  50#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173    (2 << 5)
  51
  52#define REG_MMU_IVRP_PADDR                      0x114
  53
  54#define REG_MMU_VLD_PA_RNG                      0x118
  55#define F_MMU_VLD_PA_RNG(EA, SA)                (((EA) << 8) | (SA))
  56
  57#define REG_MMU_INT_CONTROL0                    0x120
  58#define F_L2_MULIT_HIT_EN                       BIT(0)
  59#define F_TABLE_WALK_FAULT_INT_EN               BIT(1)
  60#define F_PREETCH_FIFO_OVERFLOW_INT_EN          BIT(2)
  61#define F_MISS_FIFO_OVERFLOW_INT_EN             BIT(3)
  62#define F_PREFETCH_FIFO_ERR_INT_EN              BIT(5)
  63#define F_MISS_FIFO_ERR_INT_EN                  BIT(6)
  64#define F_INT_CLR_BIT                           BIT(12)
  65
  66#define REG_MMU_INT_MAIN_CONTROL                0x124
  67                                                /* mmu0 | mmu1 */
  68#define F_INT_TRANSLATION_FAULT                 (BIT(0) | BIT(7))
  69#define F_INT_MAIN_MULTI_HIT_FAULT              (BIT(1) | BIT(8))
  70#define F_INT_INVALID_PA_FAULT                  (BIT(2) | BIT(9))
  71#define F_INT_ENTRY_REPLACEMENT_FAULT           (BIT(3) | BIT(10))
  72#define F_INT_TLB_MISS_FAULT                    (BIT(4) | BIT(11))
  73#define F_INT_MISS_TRANSACTION_FIFO_FAULT       (BIT(5) | BIT(12))
  74#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT    (BIT(6) | BIT(13))
  75
  76#define REG_MMU_CPE_DONE                        0x12C
  77
  78#define REG_MMU_FAULT_ST1                       0x134
  79#define F_REG_MMU0_FAULT_MASK                   GENMASK(6, 0)
  80#define F_REG_MMU1_FAULT_MASK                   GENMASK(13, 7)
  81
  82#define REG_MMU0_FAULT_VA                       0x13c
  83#define F_MMU_FAULT_VA_WRITE_BIT                BIT(1)
  84#define F_MMU_FAULT_VA_LAYER_BIT                BIT(0)
  85
  86#define REG_MMU0_INVLD_PA                       0x140
  87#define REG_MMU1_FAULT_VA                       0x144
  88#define REG_MMU1_INVLD_PA                       0x148
  89#define REG_MMU0_INT_ID                         0x150
  90#define REG_MMU1_INT_ID                         0x154
  91#define F_MMU_INT_ID_LARB_ID(a)                 (((a) >> 7) & 0x7)
  92#define F_MMU_INT_ID_PORT_ID(a)                 (((a) >> 2) & 0x1f)
  93
  94#define MTK_PROTECT_PA_ALIGN                    128
  95
  96/*
  97 * Get the local arbiter ID and the portid within the larb arbiter
  98 * from mtk_m4u_id which is defined by MTK_M4U_ID.
  99 */
 100#define MTK_M4U_TO_LARB(id)             (((id) >> 5) & 0xf)
 101#define MTK_M4U_TO_PORT(id)             ((id) & 0x1f)
 102
 103struct mtk_iommu_domain {
 104        struct io_pgtable_cfg           cfg;
 105        struct io_pgtable_ops           *iop;
 106
 107        struct iommu_domain             domain;
 108};
 109
 110static const struct iommu_ops mtk_iommu_ops;
 111
 112/*
 113 * In M4U 4GB mode, the physical address is remapped as below:
 114 *
 115 * CPU Physical address:
 116 * ====================
 117 *
 118 * 0      1G       2G     3G       4G     5G
 119 * |---A---|---B---|---C---|---D---|---E---|
 120 * +--I/O--+------------Memory-------------+
 121 *
 122 * IOMMU output physical address:
 123 *  =============================
 124 *
 125 *                                 4G      5G     6G      7G      8G
 126 *                                 |---E---|---B---|---C---|---D---|
 127 *                                 +------------Memory-------------+
 128 *
 129 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
 130 * bit32 of the CPU physical address always is needed to set, and for Region
 131 * 'E', the CPU physical address keep as is.
 132 * Additionally, The iommu consumers always use the CPU phyiscal address.
 133 */
 134#define MTK_IOMMU_4GB_MODE_REMAP_BASE    0x140000000UL
 135
 136static LIST_HEAD(m4ulist);      /* List all the M4U HWs */
 137
 138#define for_each_m4u(data)      list_for_each_entry(data, &m4ulist, list)
 139
 140/*
 141 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
 142 * for the performance.
 143 *
 144 * Here always return the mtk_iommu_data of the first probed M4U where the
 145 * iommu domain information is recorded.
 146 */
 147static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
 148{
 149        struct mtk_iommu_data *data;
 150
 151        for_each_m4u(data)
 152                return data;
 153
 154        return NULL;
 155}
 156
 157static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
 158{
 159        return container_of(dom, struct mtk_iommu_domain, domain);
 160}
 161
 162static void mtk_iommu_tlb_flush_all(void *cookie)
 163{
 164        struct mtk_iommu_data *data = cookie;
 165
 166        for_each_m4u(data) {
 167                writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
 168                               data->base + REG_MMU_INV_SEL);
 169                writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
 170                wmb(); /* Make sure the tlb flush all done */
 171        }
 172}
 173
 174static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
 175                                           size_t granule, void *cookie)
 176{
 177        struct mtk_iommu_data *data = cookie;
 178        unsigned long flags;
 179        int ret;
 180        u32 tmp;
 181
 182        for_each_m4u(data) {
 183                spin_lock_irqsave(&data->tlb_lock, flags);
 184                writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
 185                               data->base + REG_MMU_INV_SEL);
 186
 187                writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
 188                writel_relaxed(iova + size - 1,
 189                               data->base + REG_MMU_INVLD_END_A);
 190                writel_relaxed(F_MMU_INV_RANGE,
 191                               data->base + REG_MMU_INVALIDATE);
 192
 193                /* tlb sync */
 194                ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
 195                                                tmp, tmp != 0, 10, 1000);
 196                if (ret) {
 197                        dev_warn(data->dev,
 198                                 "Partial TLB flush timed out, falling back to full flush\n");
 199                        mtk_iommu_tlb_flush_all(cookie);
 200                }
 201                /* Clear the CPE status */
 202                writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
 203                spin_unlock_irqrestore(&data->tlb_lock, flags);
 204        }
 205}
 206
 207static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
 208                                            unsigned long iova, size_t granule,
 209                                            void *cookie)
 210{
 211        struct mtk_iommu_data *data = cookie;
 212        struct iommu_domain *domain = &data->m4u_dom->domain;
 213
 214        iommu_iotlb_gather_add_page(domain, gather, iova, granule);
 215}
 216
 217static const struct iommu_flush_ops mtk_iommu_flush_ops = {
 218        .tlb_flush_all = mtk_iommu_tlb_flush_all,
 219        .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
 220        .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
 221        .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
 222};
 223
 224static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
 225{
 226        struct mtk_iommu_data *data = dev_id;
 227        struct mtk_iommu_domain *dom = data->m4u_dom;
 228        u32 int_state, regval, fault_iova, fault_pa;
 229        unsigned int fault_larb, fault_port;
 230        bool layer, write;
 231
 232        /* Read error info from registers */
 233        int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
 234        if (int_state & F_REG_MMU0_FAULT_MASK) {
 235                regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
 236                fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
 237                fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
 238        } else {
 239                regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
 240                fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
 241                fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
 242        }
 243        layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
 244        write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
 245        fault_larb = F_MMU_INT_ID_LARB_ID(regval);
 246        fault_port = F_MMU_INT_ID_PORT_ID(regval);
 247
 248        fault_larb = data->plat_data->larbid_remap[fault_larb];
 249
 250        if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
 251                               write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
 252                dev_err_ratelimited(
 253                        data->dev,
 254                        "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
 255                        int_state, fault_iova, fault_pa, fault_larb, fault_port,
 256                        layer, write ? "write" : "read");
 257        }
 258
 259        /* Interrupt clear */
 260        regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
 261        regval |= F_INT_CLR_BIT;
 262        writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
 263
 264        mtk_iommu_tlb_flush_all(data);
 265
 266        return IRQ_HANDLED;
 267}
 268
 269static void mtk_iommu_config(struct mtk_iommu_data *data,
 270                             struct device *dev, bool enable)
 271{
 272        struct mtk_smi_larb_iommu    *larb_mmu;
 273        unsigned int                 larbid, portid;
 274        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 275        int i;
 276
 277        for (i = 0; i < fwspec->num_ids; ++i) {
 278                larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
 279                portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
 280                larb_mmu = &data->larb_imu[larbid];
 281
 282                dev_dbg(dev, "%s iommu port: %d\n",
 283                        enable ? "enable" : "disable", portid);
 284
 285                if (enable)
 286                        larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
 287                else
 288                        larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
 289        }
 290}
 291
 292static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
 293{
 294        struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
 295
 296        dom->cfg = (struct io_pgtable_cfg) {
 297                .quirks = IO_PGTABLE_QUIRK_ARM_NS |
 298                        IO_PGTABLE_QUIRK_NO_PERMS |
 299                        IO_PGTABLE_QUIRK_TLBI_ON_MAP |
 300                        IO_PGTABLE_QUIRK_ARM_MTK_EXT,
 301                .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
 302                .ias = 32,
 303                .oas = 34,
 304                .tlb = &mtk_iommu_flush_ops,
 305                .iommu_dev = data->dev,
 306        };
 307
 308        dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
 309        if (!dom->iop) {
 310                dev_err(data->dev, "Failed to alloc io pgtable\n");
 311                return -EINVAL;
 312        }
 313
 314        /* Update our support page sizes bitmap */
 315        dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
 316        return 0;
 317}
 318
 319static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
 320{
 321        struct mtk_iommu_domain *dom;
 322
 323        if (type != IOMMU_DOMAIN_DMA)
 324                return NULL;
 325
 326        dom = kzalloc(sizeof(*dom), GFP_KERNEL);
 327        if (!dom)
 328                return NULL;
 329
 330        if (iommu_get_dma_cookie(&dom->domain))
 331                goto  free_dom;
 332
 333        if (mtk_iommu_domain_finalise(dom))
 334                goto  put_dma_cookie;
 335
 336        dom->domain.geometry.aperture_start = 0;
 337        dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
 338        dom->domain.geometry.force_aperture = true;
 339
 340        return &dom->domain;
 341
 342put_dma_cookie:
 343        iommu_put_dma_cookie(&dom->domain);
 344free_dom:
 345        kfree(dom);
 346        return NULL;
 347}
 348
 349static void mtk_iommu_domain_free(struct iommu_domain *domain)
 350{
 351        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 352
 353        free_io_pgtable_ops(dom->iop);
 354        iommu_put_dma_cookie(domain);
 355        kfree(to_mtk_domain(domain));
 356}
 357
 358static int mtk_iommu_attach_device(struct iommu_domain *domain,
 359                                   struct device *dev)
 360{
 361        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 362        struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
 363
 364        if (!data)
 365                return -ENODEV;
 366
 367        /* Update the pgtable base address register of the M4U HW */
 368        if (!data->m4u_dom) {
 369                data->m4u_dom = dom;
 370                writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
 371                       data->base + REG_MMU_PT_BASE_ADDR);
 372        }
 373
 374        mtk_iommu_config(data, dev, true);
 375        return 0;
 376}
 377
 378static void mtk_iommu_detach_device(struct iommu_domain *domain,
 379                                    struct device *dev)
 380{
 381        struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
 382
 383        if (!data)
 384                return;
 385
 386        mtk_iommu_config(data, dev, false);
 387}
 388
 389static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
 390                         phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 391{
 392        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 393        struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
 394
 395        /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
 396        if (data->enable_4GB)
 397                paddr |= BIT_ULL(32);
 398
 399        /* Synchronize with the tlb_lock */
 400        return dom->iop->map(dom->iop, iova, paddr, size, prot);
 401}
 402
 403static size_t mtk_iommu_unmap(struct iommu_domain *domain,
 404                              unsigned long iova, size_t size,
 405                              struct iommu_iotlb_gather *gather)
 406{
 407        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 408
 409        return dom->iop->unmap(dom->iop, iova, size, gather);
 410}
 411
 412static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
 413{
 414        mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
 415}
 416
 417static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
 418                                 struct iommu_iotlb_gather *gather)
 419{
 420        struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
 421        size_t length = gather->end - gather->start;
 422
 423        if (gather->start == ULONG_MAX)
 424                return;
 425
 426        mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
 427                                       data);
 428}
 429
 430static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
 431                                          dma_addr_t iova)
 432{
 433        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 434        struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
 435        phys_addr_t pa;
 436
 437        pa = dom->iop->iova_to_phys(dom->iop, iova);
 438        if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
 439                pa &= ~BIT_ULL(32);
 440
 441        return pa;
 442}
 443
 444static int mtk_iommu_add_device(struct device *dev)
 445{
 446        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 447        struct mtk_iommu_data *data;
 448        struct iommu_group *group;
 449
 450        if (!fwspec || fwspec->ops != &mtk_iommu_ops)
 451                return -ENODEV; /* Not a iommu client device */
 452
 453        data = fwspec->iommu_priv;
 454        iommu_device_link(&data->iommu, dev);
 455
 456        group = iommu_group_get_for_dev(dev);
 457        if (IS_ERR(group))
 458                return PTR_ERR(group);
 459
 460        iommu_group_put(group);
 461        return 0;
 462}
 463
 464static void mtk_iommu_remove_device(struct device *dev)
 465{
 466        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 467        struct mtk_iommu_data *data;
 468
 469        if (!fwspec || fwspec->ops != &mtk_iommu_ops)
 470                return;
 471
 472        data = fwspec->iommu_priv;
 473        iommu_device_unlink(&data->iommu, dev);
 474
 475        iommu_group_remove_device(dev);
 476        iommu_fwspec_free(dev);
 477}
 478
 479static struct iommu_group *mtk_iommu_device_group(struct device *dev)
 480{
 481        struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
 482
 483        if (!data)
 484                return ERR_PTR(-ENODEV);
 485
 486        /* All the client devices are in the same m4u iommu-group */
 487        if (!data->m4u_group) {
 488                data->m4u_group = iommu_group_alloc();
 489                if (IS_ERR(data->m4u_group))
 490                        dev_err(dev, "Failed to allocate M4U IOMMU group\n");
 491        } else {
 492                iommu_group_ref_get(data->m4u_group);
 493        }
 494        return data->m4u_group;
 495}
 496
 497static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
 498{
 499        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 500        struct platform_device *m4updev;
 501
 502        if (args->args_count != 1) {
 503                dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
 504                        args->args_count);
 505                return -EINVAL;
 506        }
 507
 508        if (!fwspec->iommu_priv) {
 509                /* Get the m4u device */
 510                m4updev = of_find_device_by_node(args->np);
 511                if (WARN_ON(!m4updev))
 512                        return -EINVAL;
 513
 514                fwspec->iommu_priv = platform_get_drvdata(m4updev);
 515        }
 516
 517        return iommu_fwspec_add_ids(dev, args->args, 1);
 518}
 519
 520static const struct iommu_ops mtk_iommu_ops = {
 521        .domain_alloc   = mtk_iommu_domain_alloc,
 522        .domain_free    = mtk_iommu_domain_free,
 523        .attach_dev     = mtk_iommu_attach_device,
 524        .detach_dev     = mtk_iommu_detach_device,
 525        .map            = mtk_iommu_map,
 526        .unmap          = mtk_iommu_unmap,
 527        .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
 528        .iotlb_sync     = mtk_iommu_iotlb_sync,
 529        .iova_to_phys   = mtk_iommu_iova_to_phys,
 530        .add_device     = mtk_iommu_add_device,
 531        .remove_device  = mtk_iommu_remove_device,
 532        .device_group   = mtk_iommu_device_group,
 533        .of_xlate       = mtk_iommu_of_xlate,
 534        .pgsize_bitmap  = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
 535};
 536
 537static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
 538{
 539        u32 regval;
 540        int ret;
 541
 542        ret = clk_prepare_enable(data->bclk);
 543        if (ret) {
 544                dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
 545                return ret;
 546        }
 547
 548        if (data->plat_data->m4u_plat == M4U_MT8173)
 549                regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
 550                         F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
 551        else
 552                regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
 553        writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
 554
 555        regval = F_L2_MULIT_HIT_EN |
 556                F_TABLE_WALK_FAULT_INT_EN |
 557                F_PREETCH_FIFO_OVERFLOW_INT_EN |
 558                F_MISS_FIFO_OVERFLOW_INT_EN |
 559                F_PREFETCH_FIFO_ERR_INT_EN |
 560                F_MISS_FIFO_ERR_INT_EN;
 561        writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
 562
 563        regval = F_INT_TRANSLATION_FAULT |
 564                F_INT_MAIN_MULTI_HIT_FAULT |
 565                F_INT_INVALID_PA_FAULT |
 566                F_INT_ENTRY_REPLACEMENT_FAULT |
 567                F_INT_TLB_MISS_FAULT |
 568                F_INT_MISS_TRANSACTION_FIFO_FAULT |
 569                F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
 570        writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
 571
 572        if (data->plat_data->m4u_plat == M4U_MT8173)
 573                regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
 574        else
 575                regval = lower_32_bits(data->protect_base) |
 576                         upper_32_bits(data->protect_base);
 577        writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
 578
 579        if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
 580                /*
 581                 * If 4GB mode is enabled, the validate PA range is from
 582                 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
 583                 */
 584                regval = F_MMU_VLD_PA_RNG(7, 4);
 585                writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
 586        }
 587        writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
 588
 589        if (data->plat_data->reset_axi)
 590                writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
 591
 592        if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
 593                             dev_name(data->dev), (void *)data)) {
 594                writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
 595                clk_disable_unprepare(data->bclk);
 596                dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
 597                return -ENODEV;
 598        }
 599
 600        return 0;
 601}
 602
 603static const struct component_master_ops mtk_iommu_com_ops = {
 604        .bind           = mtk_iommu_bind,
 605        .unbind         = mtk_iommu_unbind,
 606};
 607
 608static int mtk_iommu_probe(struct platform_device *pdev)
 609{
 610        struct mtk_iommu_data   *data;
 611        struct device           *dev = &pdev->dev;
 612        struct resource         *res;
 613        resource_size_t         ioaddr;
 614        struct component_match  *match = NULL;
 615        void                    *protect;
 616        int                     i, larb_nr, ret;
 617
 618        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
 619        if (!data)
 620                return -ENOMEM;
 621        data->dev = dev;
 622        data->plat_data = of_device_get_match_data(dev);
 623
 624        /* Protect memory. HW will access here while translation fault.*/
 625        protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
 626        if (!protect)
 627                return -ENOMEM;
 628        data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
 629
 630        /* Whether the current dram is over 4GB */
 631        data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
 632        if (!data->plat_data->has_4gb_mode)
 633                data->enable_4GB = false;
 634
 635        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 636        data->base = devm_ioremap_resource(dev, res);
 637        if (IS_ERR(data->base))
 638                return PTR_ERR(data->base);
 639        ioaddr = res->start;
 640
 641        data->irq = platform_get_irq(pdev, 0);
 642        if (data->irq < 0)
 643                return data->irq;
 644
 645        if (data->plat_data->has_bclk) {
 646                data->bclk = devm_clk_get(dev, "bclk");
 647                if (IS_ERR(data->bclk))
 648                        return PTR_ERR(data->bclk);
 649        }
 650
 651        larb_nr = of_count_phandle_with_args(dev->of_node,
 652                                             "mediatek,larbs", NULL);
 653        if (larb_nr < 0)
 654                return larb_nr;
 655
 656        for (i = 0; i < larb_nr; i++) {
 657                struct device_node *larbnode;
 658                struct platform_device *plarbdev;
 659                u32 id;
 660
 661                larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
 662                if (!larbnode)
 663                        return -EINVAL;
 664
 665                if (!of_device_is_available(larbnode)) {
 666                        of_node_put(larbnode);
 667                        continue;
 668                }
 669
 670                ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
 671                if (ret)/* The id is consecutive if there is no this property */
 672                        id = i;
 673
 674                plarbdev = of_find_device_by_node(larbnode);
 675                if (!plarbdev) {
 676                        of_node_put(larbnode);
 677                        return -EPROBE_DEFER;
 678                }
 679                data->larb_imu[id].dev = &plarbdev->dev;
 680
 681                component_match_add_release(dev, &match, release_of,
 682                                            compare_of, larbnode);
 683        }
 684
 685        platform_set_drvdata(pdev, data);
 686
 687        ret = mtk_iommu_hw_init(data);
 688        if (ret)
 689                return ret;
 690
 691        ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
 692                                     "mtk-iommu.%pa", &ioaddr);
 693        if (ret)
 694                return ret;
 695
 696        iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
 697        iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
 698
 699        ret = iommu_device_register(&data->iommu);
 700        if (ret)
 701                return ret;
 702
 703        spin_lock_init(&data->tlb_lock);
 704        list_add_tail(&data->list, &m4ulist);
 705
 706        if (!iommu_present(&platform_bus_type))
 707                bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
 708
 709        return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
 710}
 711
 712static int mtk_iommu_remove(struct platform_device *pdev)
 713{
 714        struct mtk_iommu_data *data = platform_get_drvdata(pdev);
 715
 716        iommu_device_sysfs_remove(&data->iommu);
 717        iommu_device_unregister(&data->iommu);
 718
 719        if (iommu_present(&platform_bus_type))
 720                bus_set_iommu(&platform_bus_type, NULL);
 721
 722        clk_disable_unprepare(data->bclk);
 723        devm_free_irq(&pdev->dev, data->irq, data);
 724        component_master_del(&pdev->dev, &mtk_iommu_com_ops);
 725        return 0;
 726}
 727
 728static int __maybe_unused mtk_iommu_suspend(struct device *dev)
 729{
 730        struct mtk_iommu_data *data = dev_get_drvdata(dev);
 731        struct mtk_iommu_suspend_reg *reg = &data->reg;
 732        void __iomem *base = data->base;
 733
 734        reg->standard_axi_mode = readl_relaxed(base +
 735                                               REG_MMU_STANDARD_AXI_MODE);
 736        reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
 737        reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
 738        reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
 739        reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
 740        reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
 741        reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
 742        clk_disable_unprepare(data->bclk);
 743        return 0;
 744}
 745
 746static int __maybe_unused mtk_iommu_resume(struct device *dev)
 747{
 748        struct mtk_iommu_data *data = dev_get_drvdata(dev);
 749        struct mtk_iommu_suspend_reg *reg = &data->reg;
 750        struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
 751        void __iomem *base = data->base;
 752        int ret;
 753
 754        ret = clk_prepare_enable(data->bclk);
 755        if (ret) {
 756                dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
 757                return ret;
 758        }
 759        writel_relaxed(reg->standard_axi_mode,
 760                       base + REG_MMU_STANDARD_AXI_MODE);
 761        writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
 762        writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
 763        writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
 764        writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
 765        writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
 766        writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
 767        if (m4u_dom)
 768                writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
 769                       base + REG_MMU_PT_BASE_ADDR);
 770        return 0;
 771}
 772
 773static const struct dev_pm_ops mtk_iommu_pm_ops = {
 774        SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
 775};
 776
 777static const struct mtk_iommu_plat_data mt2712_data = {
 778        .m4u_plat     = M4U_MT2712,
 779        .has_4gb_mode = true,
 780        .has_bclk     = true,
 781        .has_vld_pa_rng   = true,
 782        .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
 783};
 784
 785static const struct mtk_iommu_plat_data mt8173_data = {
 786        .m4u_plat     = M4U_MT8173,
 787        .has_4gb_mode = true,
 788        .has_bclk     = true,
 789        .reset_axi    = true,
 790        .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
 791};
 792
 793static const struct mtk_iommu_plat_data mt8183_data = {
 794        .m4u_plat     = M4U_MT8183,
 795        .reset_axi    = true,
 796        .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
 797};
 798
 799static const struct of_device_id mtk_iommu_of_ids[] = {
 800        { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
 801        { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
 802        { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
 803        {}
 804};
 805
 806static struct platform_driver mtk_iommu_driver = {
 807        .probe  = mtk_iommu_probe,
 808        .remove = mtk_iommu_remove,
 809        .driver = {
 810                .name = "mtk-iommu",
 811                .of_match_table = of_match_ptr(mtk_iommu_of_ids),
 812                .pm = &mtk_iommu_pm_ops,
 813        }
 814};
 815
 816static int __init mtk_iommu_init(void)
 817{
 818        int ret;
 819
 820        ret = platform_driver_register(&mtk_iommu_driver);
 821        if (ret != 0)
 822                pr_err("Failed to register MTK IOMMU driver\n");
 823
 824        return ret;
 825}
 826
 827subsys_initcall(mtk_iommu_init)
 828