linux/drivers/iommu/tegra-smmu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011-2014 NVIDIA CORPORATION.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/bitops.h>
  10#include <linux/debugfs.h>
  11#include <linux/err.h>
  12#include <linux/iommu.h>
  13#include <linux/kernel.h>
  14#include <linux/of.h>
  15#include <linux/of_device.h>
  16#include <linux/platform_device.h>
  17#include <linux/slab.h>
  18#include <linux/dma-mapping.h>
  19
  20#include <soc/tegra/ahb.h>
  21#include <soc/tegra/mc.h>
  22
  23struct tegra_smmu {
  24        void __iomem *regs;
  25        struct device *dev;
  26
  27        struct tegra_mc *mc;
  28        const struct tegra_smmu_soc *soc;
  29
  30        unsigned long pfn_mask;
  31        unsigned long tlb_mask;
  32
  33        unsigned long *asids;
  34        struct mutex lock;
  35
  36        struct list_head list;
  37
  38        struct dentry *debugfs;
  39};
  40
  41struct tegra_smmu_as {
  42        struct iommu_domain domain;
  43        struct tegra_smmu *smmu;
  44        unsigned int use_count;
  45        u32 *count;
  46        struct page **pts;
  47        struct page *pd;
  48        dma_addr_t pd_dma;
  49        unsigned id;
  50        u32 attr;
  51};
  52
  53static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
  54{
  55        return container_of(dom, struct tegra_smmu_as, domain);
  56}
  57
  58static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
  59                               unsigned long offset)
  60{
  61        writel(value, smmu->regs + offset);
  62}
  63
  64static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
  65{
  66        return readl(smmu->regs + offset);
  67}
  68
  69#define SMMU_CONFIG 0x010
  70#define  SMMU_CONFIG_ENABLE (1 << 0)
  71
  72#define SMMU_TLB_CONFIG 0x14
  73#define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
  74#define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
  75#define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
  76        ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
  77
  78#define SMMU_PTC_CONFIG 0x18
  79#define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
  80#define  SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
  81#define  SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
  82
  83#define SMMU_PTB_ASID 0x01c
  84#define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
  85
  86#define SMMU_PTB_DATA 0x020
  87#define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
  88
  89#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
  90
  91#define SMMU_TLB_FLUSH 0x030
  92#define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
  93#define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
  94#define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
  95#define  SMMU_TLB_FLUSH_ASID(x)          (((x) & 0x7f) << 24)
  96#define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
  97                                          SMMU_TLB_FLUSH_VA_MATCH_SECTION)
  98#define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
  99                                          SMMU_TLB_FLUSH_VA_MATCH_GROUP)
 100#define  SMMU_TLB_FLUSH_ASID_MATCH       (1 << 31)
 101
 102#define SMMU_PTC_FLUSH 0x034
 103#define  SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
 104#define  SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
 105
 106#define SMMU_PTC_FLUSH_HI 0x9b8
 107#define  SMMU_PTC_FLUSH_HI_MASK 0x3
 108
 109/* per-SWGROUP SMMU_*_ASID register */
 110#define SMMU_ASID_ENABLE (1 << 31)
 111#define SMMU_ASID_MASK 0x7f
 112#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
 113
 114/* page table definitions */
 115#define SMMU_NUM_PDE 1024
 116#define SMMU_NUM_PTE 1024
 117
 118#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
 119#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
 120
 121#define SMMU_PDE_SHIFT 22
 122#define SMMU_PTE_SHIFT 12
 123
 124#define SMMU_PD_READABLE        (1 << 31)
 125#define SMMU_PD_WRITABLE        (1 << 30)
 126#define SMMU_PD_NONSECURE       (1 << 29)
 127
 128#define SMMU_PDE_READABLE       (1 << 31)
 129#define SMMU_PDE_WRITABLE       (1 << 30)
 130#define SMMU_PDE_NONSECURE      (1 << 29)
 131#define SMMU_PDE_NEXT           (1 << 28)
 132
 133#define SMMU_PTE_READABLE       (1 << 31)
 134#define SMMU_PTE_WRITABLE       (1 << 30)
 135#define SMMU_PTE_NONSECURE      (1 << 29)
 136
 137#define SMMU_PDE_ATTR           (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
 138                                 SMMU_PDE_NONSECURE)
 139#define SMMU_PTE_ATTR           (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
 140                                 SMMU_PTE_NONSECURE)
 141
 142static unsigned int iova_pd_index(unsigned long iova)
 143{
 144        return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
 145}
 146
 147static unsigned int iova_pt_index(unsigned long iova)
 148{
 149        return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
 150}
 151
 152static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
 153{
 154        addr >>= 12;
 155        return (addr & smmu->pfn_mask) == addr;
 156}
 157
 158static dma_addr_t smmu_pde_to_dma(u32 pde)
 159{
 160        return pde << 12;
 161}
 162
 163static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
 164{
 165        smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
 166}
 167
 168static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
 169                                  unsigned long offset)
 170{
 171        u32 value;
 172
 173        offset &= ~(smmu->mc->soc->atom_size - 1);
 174
 175        if (smmu->mc->soc->num_address_bits > 32) {
 176#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 177                value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
 178#else
 179                value = 0;
 180#endif
 181                smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
 182        }
 183
 184        value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
 185        smmu_writel(smmu, value, SMMU_PTC_FLUSH);
 186}
 187
 188static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
 189{
 190        smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
 191}
 192
 193static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
 194                                       unsigned long asid)
 195{
 196        u32 value;
 197
 198        value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
 199                SMMU_TLB_FLUSH_VA_MATCH_ALL;
 200        smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 201}
 202
 203static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
 204                                          unsigned long asid,
 205                                          unsigned long iova)
 206{
 207        u32 value;
 208
 209        value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
 210                SMMU_TLB_FLUSH_VA_SECTION(iova);
 211        smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 212}
 213
 214static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
 215                                        unsigned long asid,
 216                                        unsigned long iova)
 217{
 218        u32 value;
 219
 220        value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
 221                SMMU_TLB_FLUSH_VA_GROUP(iova);
 222        smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 223}
 224
 225static inline void smmu_flush(struct tegra_smmu *smmu)
 226{
 227        smmu_readl(smmu, SMMU_CONFIG);
 228}
 229
 230static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
 231{
 232        unsigned long id;
 233
 234        mutex_lock(&smmu->lock);
 235
 236        id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
 237        if (id >= smmu->soc->num_asids) {
 238                mutex_unlock(&smmu->lock);
 239                return -ENOSPC;
 240        }
 241
 242        set_bit(id, smmu->asids);
 243        *idp = id;
 244
 245        mutex_unlock(&smmu->lock);
 246        return 0;
 247}
 248
 249static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
 250{
 251        mutex_lock(&smmu->lock);
 252        clear_bit(id, smmu->asids);
 253        mutex_unlock(&smmu->lock);
 254}
 255
 256static bool tegra_smmu_capable(enum iommu_cap cap)
 257{
 258        return false;
 259}
 260
 261static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
 262{
 263        struct tegra_smmu_as *as;
 264
 265        if (type != IOMMU_DOMAIN_UNMANAGED)
 266                return NULL;
 267
 268        as = kzalloc(sizeof(*as), GFP_KERNEL);
 269        if (!as)
 270                return NULL;
 271
 272        as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
 273
 274        as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
 275        if (!as->pd) {
 276                kfree(as);
 277                return NULL;
 278        }
 279
 280        as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
 281        if (!as->count) {
 282                __free_page(as->pd);
 283                kfree(as);
 284                return NULL;
 285        }
 286
 287        as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
 288        if (!as->pts) {
 289                kfree(as->count);
 290                __free_page(as->pd);
 291                kfree(as);
 292                return NULL;
 293        }
 294
 295        /* setup aperture */
 296        as->domain.geometry.aperture_start = 0;
 297        as->domain.geometry.aperture_end = 0xffffffff;
 298        as->domain.geometry.force_aperture = true;
 299
 300        return &as->domain;
 301}
 302
 303static void tegra_smmu_domain_free(struct iommu_domain *domain)
 304{
 305        struct tegra_smmu_as *as = to_smmu_as(domain);
 306
 307        /* TODO: free page directory and page tables */
 308
 309        kfree(as);
 310}
 311
 312static const struct tegra_smmu_swgroup *
 313tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
 314{
 315        const struct tegra_smmu_swgroup *group = NULL;
 316        unsigned int i;
 317
 318        for (i = 0; i < smmu->soc->num_swgroups; i++) {
 319                if (smmu->soc->swgroups[i].swgroup == swgroup) {
 320                        group = &smmu->soc->swgroups[i];
 321                        break;
 322                }
 323        }
 324
 325        return group;
 326}
 327
 328static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
 329                              unsigned int asid)
 330{
 331        const struct tegra_smmu_swgroup *group;
 332        unsigned int i;
 333        u32 value;
 334
 335        for (i = 0; i < smmu->soc->num_clients; i++) {
 336                const struct tegra_mc_client *client = &smmu->soc->clients[i];
 337
 338                if (client->swgroup != swgroup)
 339                        continue;
 340
 341                value = smmu_readl(smmu, client->smmu.reg);
 342                value |= BIT(client->smmu.bit);
 343                smmu_writel(smmu, value, client->smmu.reg);
 344        }
 345
 346        group = tegra_smmu_find_swgroup(smmu, swgroup);
 347        if (group) {
 348                value = smmu_readl(smmu, group->reg);
 349                value &= ~SMMU_ASID_MASK;
 350                value |= SMMU_ASID_VALUE(asid);
 351                value |= SMMU_ASID_ENABLE;
 352                smmu_writel(smmu, value, group->reg);
 353        }
 354}
 355
 356static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
 357                               unsigned int asid)
 358{
 359        const struct tegra_smmu_swgroup *group;
 360        unsigned int i;
 361        u32 value;
 362
 363        group = tegra_smmu_find_swgroup(smmu, swgroup);
 364        if (group) {
 365                value = smmu_readl(smmu, group->reg);
 366                value &= ~SMMU_ASID_MASK;
 367                value |= SMMU_ASID_VALUE(asid);
 368                value &= ~SMMU_ASID_ENABLE;
 369                smmu_writel(smmu, value, group->reg);
 370        }
 371
 372        for (i = 0; i < smmu->soc->num_clients; i++) {
 373                const struct tegra_mc_client *client = &smmu->soc->clients[i];
 374
 375                if (client->swgroup != swgroup)
 376                        continue;
 377
 378                value = smmu_readl(smmu, client->smmu.reg);
 379                value &= ~BIT(client->smmu.bit);
 380                smmu_writel(smmu, value, client->smmu.reg);
 381        }
 382}
 383
 384static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
 385                                 struct tegra_smmu_as *as)
 386{
 387        u32 value;
 388        int err;
 389
 390        if (as->use_count > 0) {
 391                as->use_count++;
 392                return 0;
 393        }
 394
 395        as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
 396                                  DMA_TO_DEVICE);
 397        if (dma_mapping_error(smmu->dev, as->pd_dma))
 398                return -ENOMEM;
 399
 400        /* We can't handle 64-bit DMA addresses */
 401        if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
 402                err = -ENOMEM;
 403                goto err_unmap;
 404        }
 405
 406        err = tegra_smmu_alloc_asid(smmu, &as->id);
 407        if (err < 0)
 408                goto err_unmap;
 409
 410        smmu_flush_ptc(smmu, as->pd_dma, 0);
 411        smmu_flush_tlb_asid(smmu, as->id);
 412
 413        smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
 414        value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
 415        smmu_writel(smmu, value, SMMU_PTB_DATA);
 416        smmu_flush(smmu);
 417
 418        as->smmu = smmu;
 419        as->use_count++;
 420
 421        return 0;
 422
 423err_unmap:
 424        dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
 425        return err;
 426}
 427
 428static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
 429                                    struct tegra_smmu_as *as)
 430{
 431        if (--as->use_count > 0)
 432                return;
 433
 434        tegra_smmu_free_asid(smmu, as->id);
 435
 436        dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
 437
 438        as->smmu = NULL;
 439}
 440
 441static int tegra_smmu_attach_dev(struct iommu_domain *domain,
 442                                 struct device *dev)
 443{
 444        struct tegra_smmu *smmu = dev->archdata.iommu;
 445        struct tegra_smmu_as *as = to_smmu_as(domain);
 446        struct device_node *np = dev->of_node;
 447        struct of_phandle_args args;
 448        unsigned int index = 0;
 449        int err = 0;
 450
 451        while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
 452                                           &args)) {
 453                unsigned int swgroup = args.args[0];
 454
 455                if (args.np != smmu->dev->of_node) {
 456                        of_node_put(args.np);
 457                        continue;
 458                }
 459
 460                of_node_put(args.np);
 461
 462                err = tegra_smmu_as_prepare(smmu, as);
 463                if (err < 0)
 464                        return err;
 465
 466                tegra_smmu_enable(smmu, swgroup, as->id);
 467                index++;
 468        }
 469
 470        if (index == 0)
 471                return -ENODEV;
 472
 473        return 0;
 474}
 475
 476static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
 477{
 478        struct tegra_smmu_as *as = to_smmu_as(domain);
 479        struct device_node *np = dev->of_node;
 480        struct tegra_smmu *smmu = as->smmu;
 481        struct of_phandle_args args;
 482        unsigned int index = 0;
 483
 484        while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
 485                                           &args)) {
 486                unsigned int swgroup = args.args[0];
 487
 488                if (args.np != smmu->dev->of_node) {
 489                        of_node_put(args.np);
 490                        continue;
 491                }
 492
 493                of_node_put(args.np);
 494
 495                tegra_smmu_disable(smmu, swgroup, as->id);
 496                tegra_smmu_as_unprepare(smmu, as);
 497                index++;
 498        }
 499}
 500
 501static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
 502                               u32 value)
 503{
 504        unsigned int pd_index = iova_pd_index(iova);
 505        struct tegra_smmu *smmu = as->smmu;
 506        u32 *pd = page_address(as->pd);
 507        unsigned long offset = pd_index * sizeof(*pd);
 508
 509        /* Set the page directory entry first */
 510        pd[pd_index] = value;
 511
 512        /* The flush the page directory entry from caches */
 513        dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
 514                                         sizeof(*pd), DMA_TO_DEVICE);
 515
 516        /* And flush the iommu */
 517        smmu_flush_ptc(smmu, as->pd_dma, offset);
 518        smmu_flush_tlb_section(smmu, as->id, iova);
 519        smmu_flush(smmu);
 520}
 521
 522static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
 523{
 524        u32 *pt = page_address(pt_page);
 525
 526        return pt + iova_pt_index(iova);
 527}
 528
 529static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
 530                                  dma_addr_t *dmap)
 531{
 532        unsigned int pd_index = iova_pd_index(iova);
 533        struct page *pt_page;
 534        u32 *pd;
 535
 536        pt_page = as->pts[pd_index];
 537        if (!pt_page)
 538                return NULL;
 539
 540        pd = page_address(as->pd);
 541        *dmap = smmu_pde_to_dma(pd[pd_index]);
 542
 543        return tegra_smmu_pte_offset(pt_page, iova);
 544}
 545
 546static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
 547                       dma_addr_t *dmap)
 548{
 549        unsigned int pde = iova_pd_index(iova);
 550        struct tegra_smmu *smmu = as->smmu;
 551
 552        if (!as->pts[pde]) {
 553                struct page *page;
 554                dma_addr_t dma;
 555
 556                page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
 557                if (!page)
 558                        return NULL;
 559
 560                dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
 561                                   DMA_TO_DEVICE);
 562                if (dma_mapping_error(smmu->dev, dma)) {
 563                        __free_page(page);
 564                        return NULL;
 565                }
 566
 567                if (!smmu_dma_addr_valid(smmu, dma)) {
 568                        dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
 569                                       DMA_TO_DEVICE);
 570                        __free_page(page);
 571                        return NULL;
 572                }
 573
 574                as->pts[pde] = page;
 575
 576                tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
 577                                                              SMMU_PDE_NEXT));
 578
 579                *dmap = dma;
 580        } else {
 581                u32 *pd = page_address(as->pd);
 582
 583                *dmap = smmu_pde_to_dma(pd[pde]);
 584        }
 585
 586        return tegra_smmu_pte_offset(as->pts[pde], iova);
 587}
 588
 589static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
 590{
 591        unsigned int pd_index = iova_pd_index(iova);
 592
 593        as->count[pd_index]++;
 594}
 595
 596static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
 597{
 598        unsigned int pde = iova_pd_index(iova);
 599        struct page *page = as->pts[pde];
 600
 601        /*
 602         * When no entries in this page table are used anymore, return the
 603         * memory page to the system.
 604         */
 605        if (--as->count[pde] == 0) {
 606                struct tegra_smmu *smmu = as->smmu;
 607                u32 *pd = page_address(as->pd);
 608                dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
 609
 610                tegra_smmu_set_pde(as, iova, 0);
 611
 612                dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
 613                __free_page(page);
 614                as->pts[pde] = NULL;
 615        }
 616}
 617
 618static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
 619                               u32 *pte, dma_addr_t pte_dma, u32 val)
 620{
 621        struct tegra_smmu *smmu = as->smmu;
 622        unsigned long offset = offset_in_page(pte);
 623
 624        *pte = val;
 625
 626        dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
 627                                         4, DMA_TO_DEVICE);
 628        smmu_flush_ptc(smmu, pte_dma, offset);
 629        smmu_flush_tlb_group(smmu, as->id, iova);
 630        smmu_flush(smmu);
 631}
 632
 633static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
 634                          phys_addr_t paddr, size_t size, int prot)
 635{
 636        struct tegra_smmu_as *as = to_smmu_as(domain);
 637        dma_addr_t pte_dma;
 638        u32 *pte;
 639
 640        pte = as_get_pte(as, iova, &pte_dma);
 641        if (!pte)
 642                return -ENOMEM;
 643
 644        /* If we aren't overwriting a pre-existing entry, increment use */
 645        if (*pte == 0)
 646                tegra_smmu_pte_get_use(as, iova);
 647
 648        tegra_smmu_set_pte(as, iova, pte, pte_dma,
 649                           __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
 650
 651        return 0;
 652}
 653
 654static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 655                               size_t size)
 656{
 657        struct tegra_smmu_as *as = to_smmu_as(domain);
 658        dma_addr_t pte_dma;
 659        u32 *pte;
 660
 661        pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
 662        if (!pte || !*pte)
 663                return 0;
 664
 665        tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
 666        tegra_smmu_pte_put_use(as, iova);
 667
 668        return size;
 669}
 670
 671static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
 672                                           dma_addr_t iova)
 673{
 674        struct tegra_smmu_as *as = to_smmu_as(domain);
 675        unsigned long pfn;
 676        dma_addr_t pte_dma;
 677        u32 *pte;
 678
 679        pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
 680        if (!pte || !*pte)
 681                return 0;
 682
 683        pfn = *pte & as->smmu->pfn_mask;
 684
 685        return PFN_PHYS(pfn);
 686}
 687
 688static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
 689{
 690        struct platform_device *pdev;
 691        struct tegra_mc *mc;
 692
 693        pdev = of_find_device_by_node(np);
 694        if (!pdev)
 695                return NULL;
 696
 697        mc = platform_get_drvdata(pdev);
 698        if (!mc)
 699                return NULL;
 700
 701        return mc->smmu;
 702}
 703
 704static int tegra_smmu_add_device(struct device *dev)
 705{
 706        struct device_node *np = dev->of_node;
 707        struct of_phandle_args args;
 708        unsigned int index = 0;
 709
 710        while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
 711                                          &args) == 0) {
 712                struct tegra_smmu *smmu;
 713
 714                smmu = tegra_smmu_find(args.np);
 715                if (smmu) {
 716                        /*
 717                         * Only a single IOMMU master interface is currently
 718                         * supported by the Linux kernel, so abort after the
 719                         * first match.
 720                         */
 721                        dev->archdata.iommu = smmu;
 722                        break;
 723                }
 724
 725                index++;
 726        }
 727
 728        return 0;
 729}
 730
 731static void tegra_smmu_remove_device(struct device *dev)
 732{
 733        dev->archdata.iommu = NULL;
 734}
 735
 736static const struct iommu_ops tegra_smmu_ops = {
 737        .capable = tegra_smmu_capable,
 738        .domain_alloc = tegra_smmu_domain_alloc,
 739        .domain_free = tegra_smmu_domain_free,
 740        .attach_dev = tegra_smmu_attach_dev,
 741        .detach_dev = tegra_smmu_detach_dev,
 742        .add_device = tegra_smmu_add_device,
 743        .remove_device = tegra_smmu_remove_device,
 744        .map = tegra_smmu_map,
 745        .unmap = tegra_smmu_unmap,
 746        .map_sg = default_iommu_map_sg,
 747        .iova_to_phys = tegra_smmu_iova_to_phys,
 748
 749        .pgsize_bitmap = SZ_4K,
 750};
 751
 752static void tegra_smmu_ahb_enable(void)
 753{
 754        static const struct of_device_id ahb_match[] = {
 755                { .compatible = "nvidia,tegra30-ahb", },
 756                { }
 757        };
 758        struct device_node *ahb;
 759
 760        ahb = of_find_matching_node(NULL, ahb_match);
 761        if (ahb) {
 762                tegra_ahb_enable_smmu(ahb);
 763                of_node_put(ahb);
 764        }
 765}
 766
 767static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
 768{
 769        struct tegra_smmu *smmu = s->private;
 770        unsigned int i;
 771        u32 value;
 772
 773        seq_printf(s, "swgroup    enabled  ASID\n");
 774        seq_printf(s, "------------------------\n");
 775
 776        for (i = 0; i < smmu->soc->num_swgroups; i++) {
 777                const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
 778                const char *status;
 779                unsigned int asid;
 780
 781                value = smmu_readl(smmu, group->reg);
 782
 783                if (value & SMMU_ASID_ENABLE)
 784                        status = "yes";
 785                else
 786                        status = "no";
 787
 788                asid = value & SMMU_ASID_MASK;
 789
 790                seq_printf(s, "%-9s  %-7s  %#04x\n", group->name, status,
 791                           asid);
 792        }
 793
 794        return 0;
 795}
 796
 797static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
 798{
 799        return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
 800}
 801
 802static const struct file_operations tegra_smmu_swgroups_fops = {
 803        .open = tegra_smmu_swgroups_open,
 804        .read = seq_read,
 805        .llseek = seq_lseek,
 806        .release = single_release,
 807};
 808
 809static int tegra_smmu_clients_show(struct seq_file *s, void *data)
 810{
 811        struct tegra_smmu *smmu = s->private;
 812        unsigned int i;
 813        u32 value;
 814
 815        seq_printf(s, "client       enabled\n");
 816        seq_printf(s, "--------------------\n");
 817
 818        for (i = 0; i < smmu->soc->num_clients; i++) {
 819                const struct tegra_mc_client *client = &smmu->soc->clients[i];
 820                const char *status;
 821
 822                value = smmu_readl(smmu, client->smmu.reg);
 823
 824                if (value & BIT(client->smmu.bit))
 825                        status = "yes";
 826                else
 827                        status = "no";
 828
 829                seq_printf(s, "%-12s %s\n", client->name, status);
 830        }
 831
 832        return 0;
 833}
 834
 835static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
 836{
 837        return single_open(file, tegra_smmu_clients_show, inode->i_private);
 838}
 839
 840static const struct file_operations tegra_smmu_clients_fops = {
 841        .open = tegra_smmu_clients_open,
 842        .read = seq_read,
 843        .llseek = seq_lseek,
 844        .release = single_release,
 845};
 846
 847static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
 848{
 849        smmu->debugfs = debugfs_create_dir("smmu", NULL);
 850        if (!smmu->debugfs)
 851                return;
 852
 853        debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
 854                            &tegra_smmu_swgroups_fops);
 855        debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
 856                            &tegra_smmu_clients_fops);
 857}
 858
 859static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
 860{
 861        debugfs_remove_recursive(smmu->debugfs);
 862}
 863
 864struct tegra_smmu *tegra_smmu_probe(struct device *dev,
 865                                    const struct tegra_smmu_soc *soc,
 866                                    struct tegra_mc *mc)
 867{
 868        struct tegra_smmu *smmu;
 869        size_t size;
 870        u32 value;
 871        int err;
 872
 873        /* This can happen on Tegra20 which doesn't have an SMMU */
 874        if (!soc)
 875                return NULL;
 876
 877        smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
 878        if (!smmu)
 879                return ERR_PTR(-ENOMEM);
 880
 881        /*
 882         * This is a bit of a hack. Ideally we'd want to simply return this
 883         * value. However the IOMMU registration process will attempt to add
 884         * all devices to the IOMMU when bus_set_iommu() is called. In order
 885         * not to rely on global variables to track the IOMMU instance, we
 886         * set it here so that it can be looked up from the .add_device()
 887         * callback via the IOMMU device's .drvdata field.
 888         */
 889        mc->smmu = smmu;
 890
 891        size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
 892
 893        smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
 894        if (!smmu->asids)
 895                return ERR_PTR(-ENOMEM);
 896
 897        mutex_init(&smmu->lock);
 898
 899        smmu->regs = mc->regs;
 900        smmu->soc = soc;
 901        smmu->dev = dev;
 902        smmu->mc = mc;
 903
 904        smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
 905        dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
 906                mc->soc->num_address_bits, smmu->pfn_mask);
 907        smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
 908        dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
 909                smmu->tlb_mask);
 910
 911        value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
 912
 913        if (soc->supports_request_limit)
 914                value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
 915
 916        smmu_writel(smmu, value, SMMU_PTC_CONFIG);
 917
 918        value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
 919                SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
 920
 921        if (soc->supports_round_robin_arbitration)
 922                value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
 923
 924        smmu_writel(smmu, value, SMMU_TLB_CONFIG);
 925
 926        smmu_flush_ptc_all(smmu);
 927        smmu_flush_tlb(smmu);
 928        smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
 929        smmu_flush(smmu);
 930
 931        tegra_smmu_ahb_enable();
 932
 933        err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
 934        if (err < 0)
 935                return ERR_PTR(err);
 936
 937        if (IS_ENABLED(CONFIG_DEBUG_FS))
 938                tegra_smmu_debugfs_init(smmu);
 939
 940        return smmu;
 941}
 942
 943void tegra_smmu_remove(struct tegra_smmu *smmu)
 944{
 945        if (IS_ENABLED(CONFIG_DEBUG_FS))
 946                tegra_smmu_debugfs_exit(smmu);
 947}
 948