linux/drivers/iommu/tegra-smmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011-2014 NVIDIA CORPORATION.  All rights reserved.
   4 */
   5
   6#include <linux/bitops.h>
   7#include <linux/debugfs.h>
   8#include <linux/err.h>
   9#include <linux/iommu.h>
  10#include <linux/kernel.h>
  11#include <linux/of.h>
  12#include <linux/of_device.h>
  13#include <linux/pci.h>
  14#include <linux/platform_device.h>
  15#include <linux/slab.h>
  16#include <linux/spinlock.h>
  17#include <linux/dma-mapping.h>
  18
  19#include <soc/tegra/ahb.h>
  20#include <soc/tegra/mc.h>
  21
  22struct tegra_smmu_group {
  23        struct list_head list;
  24        struct tegra_smmu *smmu;
  25        const struct tegra_smmu_group_soc *soc;
  26        struct iommu_group *group;
  27        unsigned int swgroup;
  28};
  29
  30struct tegra_smmu {
  31        void __iomem *regs;
  32        struct device *dev;
  33
  34        struct tegra_mc *mc;
  35        const struct tegra_smmu_soc *soc;
  36
  37        struct list_head groups;
  38
  39        unsigned long pfn_mask;
  40        unsigned long tlb_mask;
  41
  42        unsigned long *asids;
  43        struct mutex lock;
  44
  45        struct list_head list;
  46
  47        struct dentry *debugfs;
  48
  49        struct iommu_device iommu;      /* IOMMU Core code handle */
  50};
  51
  52struct tegra_smmu_as {
  53        struct iommu_domain domain;
  54        struct tegra_smmu *smmu;
  55        unsigned int use_count;
  56        spinlock_t lock;
  57        u32 *count;
  58        struct page **pts;
  59        struct page *pd;
  60        dma_addr_t pd_dma;
  61        unsigned id;
  62        u32 attr;
  63};
  64
  65static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
  66{
  67        return container_of(dom, struct tegra_smmu_as, domain);
  68}
  69
  70static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
  71                               unsigned long offset)
  72{
  73        writel(value, smmu->regs + offset);
  74}
  75
  76static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
  77{
  78        return readl(smmu->regs + offset);
  79}
  80
  81#define SMMU_CONFIG 0x010
  82#define  SMMU_CONFIG_ENABLE (1 << 0)
  83
  84#define SMMU_TLB_CONFIG 0x14
  85#define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
  86#define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
  87#define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
  88        ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
  89
  90#define SMMU_PTC_CONFIG 0x18
  91#define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
  92#define  SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
  93#define  SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
  94
  95#define SMMU_PTB_ASID 0x01c
  96#define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
  97
  98#define SMMU_PTB_DATA 0x020
  99#define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
 100
 101#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
 102
 103#define SMMU_TLB_FLUSH 0x030
 104#define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
 105#define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
 106#define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
 107#define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
 108                                          SMMU_TLB_FLUSH_VA_MATCH_SECTION)
 109#define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
 110                                          SMMU_TLB_FLUSH_VA_MATCH_GROUP)
 111#define  SMMU_TLB_FLUSH_ASID_MATCH       (1 << 31)
 112
 113#define SMMU_PTC_FLUSH 0x034
 114#define  SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
 115#define  SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
 116
 117#define SMMU_PTC_FLUSH_HI 0x9b8
 118#define  SMMU_PTC_FLUSH_HI_MASK 0x3
 119
 120/* per-SWGROUP SMMU_*_ASID register */
 121#define SMMU_ASID_ENABLE (1 << 31)
 122#define SMMU_ASID_MASK 0x7f
 123#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
 124
 125/* page table definitions */
 126#define SMMU_NUM_PDE 1024
 127#define SMMU_NUM_PTE 1024
 128
 129#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
 130#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
 131
 132#define SMMU_PDE_SHIFT 22
 133#define SMMU_PTE_SHIFT 12
 134
 135#define SMMU_PAGE_MASK          (~(SMMU_SIZE_PT-1))
 136#define SMMU_OFFSET_IN_PAGE(x)  ((unsigned long)(x) & ~SMMU_PAGE_MASK)
 137#define SMMU_PFN_PHYS(x)        ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
 138#define SMMU_PHYS_PFN(x)        ((unsigned long)((x) >> SMMU_PTE_SHIFT))
 139
 140#define SMMU_PD_READABLE        (1 << 31)
 141#define SMMU_PD_WRITABLE        (1 << 30)
 142#define SMMU_PD_NONSECURE       (1 << 29)
 143
 144#define SMMU_PDE_READABLE       (1 << 31)
 145#define SMMU_PDE_WRITABLE       (1 << 30)
 146#define SMMU_PDE_NONSECURE      (1 << 29)
 147#define SMMU_PDE_NEXT           (1 << 28)
 148
 149#define SMMU_PTE_READABLE       (1 << 31)
 150#define SMMU_PTE_WRITABLE       (1 << 30)
 151#define SMMU_PTE_NONSECURE      (1 << 29)
 152
 153#define SMMU_PDE_ATTR           (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
 154                                 SMMU_PDE_NONSECURE)
 155
 156static unsigned int iova_pd_index(unsigned long iova)
 157{
 158        return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
 159}
 160
 161static unsigned int iova_pt_index(unsigned long iova)
 162{
 163        return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
 164}
 165
 166static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
 167{
 168        addr >>= 12;
 169        return (addr & smmu->pfn_mask) == addr;
 170}
 171
 172static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
 173{
 174        return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
 175}
 176
 177static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
 178{
 179        smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
 180}
 181
 182static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
 183                                  unsigned long offset)
 184{
 185        u32 value;
 186
 187        offset &= ~(smmu->mc->soc->atom_size - 1);
 188
 189        if (smmu->mc->soc->num_address_bits > 32) {
 190#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 191                value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
 192#else
 193                value = 0;
 194#endif
 195                smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
 196        }
 197
 198        value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
 199        smmu_writel(smmu, value, SMMU_PTC_FLUSH);
 200}
 201
 202static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
 203{
 204        smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
 205}
 206
 207static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
 208                                       unsigned long asid)
 209{
 210        u32 value;
 211
 212        if (smmu->soc->num_asids == 4)
 213                value = (asid & 0x3) << 29;
 214        else
 215                value = (asid & 0x7f) << 24;
 216
 217        value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
 218        smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 219}
 220
 221static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
 222                                          unsigned long asid,
 223                                          unsigned long iova)
 224{
 225        u32 value;
 226
 227        if (smmu->soc->num_asids == 4)
 228                value = (asid & 0x3) << 29;
 229        else
 230                value = (asid & 0x7f) << 24;
 231
 232        value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
 233        smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 234}
 235
 236static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
 237                                        unsigned long asid,
 238                                        unsigned long iova)
 239{
 240        u32 value;
 241
 242        if (smmu->soc->num_asids == 4)
 243                value = (asid & 0x3) << 29;
 244        else
 245                value = (asid & 0x7f) << 24;
 246
 247        value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
 248        smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 249}
 250
 251static inline void smmu_flush(struct tegra_smmu *smmu)
 252{
 253        smmu_readl(smmu, SMMU_PTB_ASID);
 254}
 255
 256static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
 257{
 258        unsigned long id;
 259
 260        id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
 261        if (id >= smmu->soc->num_asids)
 262                return -ENOSPC;
 263
 264        set_bit(id, smmu->asids);
 265        *idp = id;
 266
 267        return 0;
 268}
 269
 270static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
 271{
 272        clear_bit(id, smmu->asids);
 273}
 274
 275static bool tegra_smmu_capable(enum iommu_cap cap)
 276{
 277        return false;
 278}
 279
 280static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
 281{
 282        struct tegra_smmu_as *as;
 283
 284        if (type != IOMMU_DOMAIN_UNMANAGED)
 285                return NULL;
 286
 287        as = kzalloc(sizeof(*as), GFP_KERNEL);
 288        if (!as)
 289                return NULL;
 290
 291        as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
 292
 293        as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
 294        if (!as->pd) {
 295                kfree(as);
 296                return NULL;
 297        }
 298
 299        as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
 300        if (!as->count) {
 301                __free_page(as->pd);
 302                kfree(as);
 303                return NULL;
 304        }
 305
 306        as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
 307        if (!as->pts) {
 308                kfree(as->count);
 309                __free_page(as->pd);
 310                kfree(as);
 311                return NULL;
 312        }
 313
 314        spin_lock_init(&as->lock);
 315
 316        /* setup aperture */
 317        as->domain.geometry.aperture_start = 0;
 318        as->domain.geometry.aperture_end = 0xffffffff;
 319        as->domain.geometry.force_aperture = true;
 320
 321        return &as->domain;
 322}
 323
 324static void tegra_smmu_domain_free(struct iommu_domain *domain)
 325{
 326        struct tegra_smmu_as *as = to_smmu_as(domain);
 327
 328        /* TODO: free page directory and page tables */
 329
 330        WARN_ON_ONCE(as->use_count);
 331        kfree(as->count);
 332        kfree(as->pts);
 333        kfree(as);
 334}
 335
 336static const struct tegra_smmu_swgroup *
 337tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
 338{
 339        const struct tegra_smmu_swgroup *group = NULL;
 340        unsigned int i;
 341
 342        for (i = 0; i < smmu->soc->num_swgroups; i++) {
 343                if (smmu->soc->swgroups[i].swgroup == swgroup) {
 344                        group = &smmu->soc->swgroups[i];
 345                        break;
 346                }
 347        }
 348
 349        return group;
 350}
 351
 352static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
 353                              unsigned int asid)
 354{
 355        const struct tegra_smmu_swgroup *group;
 356        unsigned int i;
 357        u32 value;
 358
 359        group = tegra_smmu_find_swgroup(smmu, swgroup);
 360        if (group) {
 361                value = smmu_readl(smmu, group->reg);
 362                value &= ~SMMU_ASID_MASK;
 363                value |= SMMU_ASID_VALUE(asid);
 364                value |= SMMU_ASID_ENABLE;
 365                smmu_writel(smmu, value, group->reg);
 366        } else {
 367                pr_warn("%s group from swgroup %u not found\n", __func__,
 368                                swgroup);
 369                /* No point moving ahead if group was not found */
 370                return;
 371        }
 372
 373        for (i = 0; i < smmu->soc->num_clients; i++) {
 374                const struct tegra_mc_client *client = &smmu->soc->clients[i];
 375
 376                if (client->swgroup != swgroup)
 377                        continue;
 378
 379                value = smmu_readl(smmu, client->regs.smmu.reg);
 380                value |= BIT(client->regs.smmu.bit);
 381                smmu_writel(smmu, value, client->regs.smmu.reg);
 382        }
 383}
 384
 385static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
 386                               unsigned int asid)
 387{
 388        const struct tegra_smmu_swgroup *group;
 389        unsigned int i;
 390        u32 value;
 391
 392        group = tegra_smmu_find_swgroup(smmu, swgroup);
 393        if (group) {
 394                value = smmu_readl(smmu, group->reg);
 395                value &= ~SMMU_ASID_MASK;
 396                value |= SMMU_ASID_VALUE(asid);
 397                value &= ~SMMU_ASID_ENABLE;
 398                smmu_writel(smmu, value, group->reg);
 399        }
 400
 401        for (i = 0; i < smmu->soc->num_clients; i++) {
 402                const struct tegra_mc_client *client = &smmu->soc->clients[i];
 403
 404                if (client->swgroup != swgroup)
 405                        continue;
 406
 407                value = smmu_readl(smmu, client->regs.smmu.reg);
 408                value &= ~BIT(client->regs.smmu.bit);
 409                smmu_writel(smmu, value, client->regs.smmu.reg);
 410        }
 411}
 412
 413static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
 414                                 struct tegra_smmu_as *as)
 415{
 416        u32 value;
 417        int err = 0;
 418
 419        mutex_lock(&smmu->lock);
 420
 421        if (as->use_count > 0) {
 422                as->use_count++;
 423                goto unlock;
 424        }
 425
 426        as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
 427                                  DMA_TO_DEVICE);
 428        if (dma_mapping_error(smmu->dev, as->pd_dma)) {
 429                err = -ENOMEM;
 430                goto unlock;
 431        }
 432
 433        /* We can't handle 64-bit DMA addresses */
 434        if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
 435                err = -ENOMEM;
 436                goto err_unmap;
 437        }
 438
 439        err = tegra_smmu_alloc_asid(smmu, &as->id);
 440        if (err < 0)
 441                goto err_unmap;
 442
 443        smmu_flush_ptc(smmu, as->pd_dma, 0);
 444        smmu_flush_tlb_asid(smmu, as->id);
 445
 446        smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
 447        value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
 448        smmu_writel(smmu, value, SMMU_PTB_DATA);
 449        smmu_flush(smmu);
 450
 451        as->smmu = smmu;
 452        as->use_count++;
 453
 454        mutex_unlock(&smmu->lock);
 455
 456        return 0;
 457
 458err_unmap:
 459        dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
 460unlock:
 461        mutex_unlock(&smmu->lock);
 462
 463        return err;
 464}
 465
 466static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
 467                                    struct tegra_smmu_as *as)
 468{
 469        mutex_lock(&smmu->lock);
 470
 471        if (--as->use_count > 0) {
 472                mutex_unlock(&smmu->lock);
 473                return;
 474        }
 475
 476        tegra_smmu_free_asid(smmu, as->id);
 477
 478        dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
 479
 480        as->smmu = NULL;
 481
 482        mutex_unlock(&smmu->lock);
 483}
 484
 485static int tegra_smmu_attach_dev(struct iommu_domain *domain,
 486                                 struct device *dev)
 487{
 488        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 489        struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
 490        struct tegra_smmu_as *as = to_smmu_as(domain);
 491        unsigned int index;
 492        int err;
 493
 494        if (!fwspec)
 495                return -ENOENT;
 496
 497        for (index = 0; index < fwspec->num_ids; index++) {
 498                err = tegra_smmu_as_prepare(smmu, as);
 499                if (err)
 500                        goto disable;
 501
 502                tegra_smmu_enable(smmu, fwspec->ids[index], as->id);
 503        }
 504
 505        if (index == 0)
 506                return -ENODEV;
 507
 508        return 0;
 509
 510disable:
 511        while (index--) {
 512                tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
 513                tegra_smmu_as_unprepare(smmu, as);
 514        }
 515
 516        return err;
 517}
 518
 519static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
 520{
 521        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 522        struct tegra_smmu_as *as = to_smmu_as(domain);
 523        struct tegra_smmu *smmu = as->smmu;
 524        unsigned int index;
 525
 526        if (!fwspec)
 527                return;
 528
 529        for (index = 0; index < fwspec->num_ids; index++) {
 530                tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
 531                tegra_smmu_as_unprepare(smmu, as);
 532        }
 533}
 534
 535static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
 536                               u32 value)
 537{
 538        unsigned int pd_index = iova_pd_index(iova);
 539        struct tegra_smmu *smmu = as->smmu;
 540        u32 *pd = page_address(as->pd);
 541        unsigned long offset = pd_index * sizeof(*pd);
 542
 543        /* Set the page directory entry first */
 544        pd[pd_index] = value;
 545
 546        /* The flush the page directory entry from caches */
 547        dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
 548                                         sizeof(*pd), DMA_TO_DEVICE);
 549
 550        /* And flush the iommu */
 551        smmu_flush_ptc(smmu, as->pd_dma, offset);
 552        smmu_flush_tlb_section(smmu, as->id, iova);
 553        smmu_flush(smmu);
 554}
 555
 556static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
 557{
 558        u32 *pt = page_address(pt_page);
 559
 560        return pt + iova_pt_index(iova);
 561}
 562
 563static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
 564                                  dma_addr_t *dmap)
 565{
 566        unsigned int pd_index = iova_pd_index(iova);
 567        struct tegra_smmu *smmu = as->smmu;
 568        struct page *pt_page;
 569        u32 *pd;
 570
 571        pt_page = as->pts[pd_index];
 572        if (!pt_page)
 573                return NULL;
 574
 575        pd = page_address(as->pd);
 576        *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
 577
 578        return tegra_smmu_pte_offset(pt_page, iova);
 579}
 580
 581static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
 582                       dma_addr_t *dmap, struct page *page)
 583{
 584        unsigned int pde = iova_pd_index(iova);
 585        struct tegra_smmu *smmu = as->smmu;
 586
 587        if (!as->pts[pde]) {
 588                dma_addr_t dma;
 589
 590                dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
 591                                   DMA_TO_DEVICE);
 592                if (dma_mapping_error(smmu->dev, dma)) {
 593                        __free_page(page);
 594                        return NULL;
 595                }
 596
 597                if (!smmu_dma_addr_valid(smmu, dma)) {
 598                        dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
 599                                       DMA_TO_DEVICE);
 600                        __free_page(page);
 601                        return NULL;
 602                }
 603
 604                as->pts[pde] = page;
 605
 606                tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
 607                                                              SMMU_PDE_NEXT));
 608
 609                *dmap = dma;
 610        } else {
 611                u32 *pd = page_address(as->pd);
 612
 613                *dmap = smmu_pde_to_dma(smmu, pd[pde]);
 614        }
 615
 616        return tegra_smmu_pte_offset(as->pts[pde], iova);
 617}
 618
 619static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
 620{
 621        unsigned int pd_index = iova_pd_index(iova);
 622
 623        as->count[pd_index]++;
 624}
 625
 626static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
 627{
 628        unsigned int pde = iova_pd_index(iova);
 629        struct page *page = as->pts[pde];
 630
 631        /*
 632         * When no entries in this page table are used anymore, return the
 633         * memory page to the system.
 634         */
 635        if (--as->count[pde] == 0) {
 636                struct tegra_smmu *smmu = as->smmu;
 637                u32 *pd = page_address(as->pd);
 638                dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
 639
 640                tegra_smmu_set_pde(as, iova, 0);
 641
 642                dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
 643                __free_page(page);
 644                as->pts[pde] = NULL;
 645        }
 646}
 647
 648static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
 649                               u32 *pte, dma_addr_t pte_dma, u32 val)
 650{
 651        struct tegra_smmu *smmu = as->smmu;
 652        unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
 653
 654        *pte = val;
 655
 656        dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
 657                                         4, DMA_TO_DEVICE);
 658        smmu_flush_ptc(smmu, pte_dma, offset);
 659        smmu_flush_tlb_group(smmu, as->id, iova);
 660        smmu_flush(smmu);
 661}
 662
 663static struct page *as_get_pde_page(struct tegra_smmu_as *as,
 664                                    unsigned long iova, gfp_t gfp,
 665                                    unsigned long *flags)
 666{
 667        unsigned int pde = iova_pd_index(iova);
 668        struct page *page = as->pts[pde];
 669
 670        /* at first check whether allocation needs to be done at all */
 671        if (page)
 672                return page;
 673
 674        /*
 675         * In order to prevent exhaustion of the atomic memory pool, we
 676         * allocate page in a sleeping context if GFP flags permit. Hence
 677         * spinlock needs to be unlocked and re-locked after allocation.
 678         */
 679        if (!(gfp & __GFP_ATOMIC))
 680                spin_unlock_irqrestore(&as->lock, *flags);
 681
 682        page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
 683
 684        if (!(gfp & __GFP_ATOMIC))
 685                spin_lock_irqsave(&as->lock, *flags);
 686
 687        /*
 688         * In a case of blocking allocation, a concurrent mapping may win
 689         * the PDE allocation. In this case the allocated page isn't needed
 690         * if allocation succeeded and the allocation failure isn't fatal.
 691         */
 692        if (as->pts[pde]) {
 693                if (page)
 694                        __free_page(page);
 695
 696                page = as->pts[pde];
 697        }
 698
 699        return page;
 700}
 701
 702static int
 703__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
 704                 phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
 705                 unsigned long *flags)
 706{
 707        struct tegra_smmu_as *as = to_smmu_as(domain);
 708        dma_addr_t pte_dma;
 709        struct page *page;
 710        u32 pte_attrs;
 711        u32 *pte;
 712
 713        page = as_get_pde_page(as, iova, gfp, flags);
 714        if (!page)
 715                return -ENOMEM;
 716
 717        pte = as_get_pte(as, iova, &pte_dma, page);
 718        if (!pte)
 719                return -ENOMEM;
 720
 721        /* If we aren't overwriting a pre-existing entry, increment use */
 722        if (*pte == 0)
 723                tegra_smmu_pte_get_use(as, iova);
 724
 725        pte_attrs = SMMU_PTE_NONSECURE;
 726
 727        if (prot & IOMMU_READ)
 728                pte_attrs |= SMMU_PTE_READABLE;
 729
 730        if (prot & IOMMU_WRITE)
 731                pte_attrs |= SMMU_PTE_WRITABLE;
 732
 733        tegra_smmu_set_pte(as, iova, pte, pte_dma,
 734                           SMMU_PHYS_PFN(paddr) | pte_attrs);
 735
 736        return 0;
 737}
 738
 739static size_t
 740__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 741                   size_t size, struct iommu_iotlb_gather *gather)
 742{
 743        struct tegra_smmu_as *as = to_smmu_as(domain);
 744        dma_addr_t pte_dma;
 745        u32 *pte;
 746
 747        pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
 748        if (!pte || !*pte)
 749                return 0;
 750
 751        tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
 752        tegra_smmu_pte_put_use(as, iova);
 753
 754        return size;
 755}
 756
 757static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
 758                          phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 759{
 760        struct tegra_smmu_as *as = to_smmu_as(domain);
 761        unsigned long flags;
 762        int ret;
 763
 764        spin_lock_irqsave(&as->lock, flags);
 765        ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
 766        spin_unlock_irqrestore(&as->lock, flags);
 767
 768        return ret;
 769}
 770
 771static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 772                               size_t size, struct iommu_iotlb_gather *gather)
 773{
 774        struct tegra_smmu_as *as = to_smmu_as(domain);
 775        unsigned long flags;
 776
 777        spin_lock_irqsave(&as->lock, flags);
 778        size = __tegra_smmu_unmap(domain, iova, size, gather);
 779        spin_unlock_irqrestore(&as->lock, flags);
 780
 781        return size;
 782}
 783
 784static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
 785                                           dma_addr_t iova)
 786{
 787        struct tegra_smmu_as *as = to_smmu_as(domain);
 788        unsigned long pfn;
 789        dma_addr_t pte_dma;
 790        u32 *pte;
 791
 792        pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
 793        if (!pte || !*pte)
 794                return 0;
 795
 796        pfn = *pte & as->smmu->pfn_mask;
 797
 798        return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
 799}
 800
 801static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
 802{
 803        struct platform_device *pdev;
 804        struct tegra_mc *mc;
 805
 806        pdev = of_find_device_by_node(np);
 807        if (!pdev)
 808                return NULL;
 809
 810        mc = platform_get_drvdata(pdev);
 811        if (!mc)
 812                return NULL;
 813
 814        return mc->smmu;
 815}
 816
 817static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
 818                                struct of_phandle_args *args)
 819{
 820        const struct iommu_ops *ops = smmu->iommu.ops;
 821        int err;
 822
 823        err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
 824        if (err < 0) {
 825                dev_err(dev, "failed to initialize fwspec: %d\n", err);
 826                return err;
 827        }
 828
 829        err = ops->of_xlate(dev, args);
 830        if (err < 0) {
 831                dev_err(dev, "failed to parse SW group ID: %d\n", err);
 832                iommu_fwspec_free(dev);
 833                return err;
 834        }
 835
 836        return 0;
 837}
 838
 839static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
 840{
 841        struct device_node *np = dev->of_node;
 842        struct tegra_smmu *smmu = NULL;
 843        struct of_phandle_args args;
 844        unsigned int index = 0;
 845        int err;
 846
 847        while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
 848                                          &args) == 0) {
 849                smmu = tegra_smmu_find(args.np);
 850                if (smmu) {
 851                        err = tegra_smmu_configure(smmu, dev, &args);
 852
 853                        if (err < 0) {
 854                                of_node_put(args.np);
 855                                return ERR_PTR(err);
 856                        }
 857                }
 858
 859                of_node_put(args.np);
 860                index++;
 861        }
 862
 863        smmu = dev_iommu_priv_get(dev);
 864        if (!smmu)
 865                return ERR_PTR(-ENODEV);
 866
 867        return &smmu->iommu;
 868}
 869
 870static void tegra_smmu_release_device(struct device *dev) {}
 871
 872static const struct tegra_smmu_group_soc *
 873tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
 874{
 875        unsigned int i, j;
 876
 877        for (i = 0; i < smmu->soc->num_groups; i++)
 878                for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
 879                        if (smmu->soc->groups[i].swgroups[j] == swgroup)
 880                                return &smmu->soc->groups[i];
 881
 882        return NULL;
 883}
 884
 885static void tegra_smmu_group_release(void *iommu_data)
 886{
 887        struct tegra_smmu_group *group = iommu_data;
 888        struct tegra_smmu *smmu = group->smmu;
 889
 890        mutex_lock(&smmu->lock);
 891        list_del(&group->list);
 892        mutex_unlock(&smmu->lock);
 893}
 894
 895static struct iommu_group *tegra_smmu_device_group(struct device *dev)
 896{
 897        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 898        struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
 899        const struct tegra_smmu_group_soc *soc;
 900        unsigned int swgroup = fwspec->ids[0];
 901        struct tegra_smmu_group *group;
 902        struct iommu_group *grp;
 903
 904        /* Find group_soc associating with swgroup */
 905        soc = tegra_smmu_find_group(smmu, swgroup);
 906
 907        mutex_lock(&smmu->lock);
 908
 909        /* Find existing iommu_group associating with swgroup or group_soc */
 910        list_for_each_entry(group, &smmu->groups, list)
 911                if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
 912                        grp = iommu_group_ref_get(group->group);
 913                        mutex_unlock(&smmu->lock);
 914                        return grp;
 915                }
 916
 917        group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
 918        if (!group) {
 919                mutex_unlock(&smmu->lock);
 920                return NULL;
 921        }
 922
 923        INIT_LIST_HEAD(&group->list);
 924        group->swgroup = swgroup;
 925        group->smmu = smmu;
 926        group->soc = soc;
 927
 928        if (dev_is_pci(dev))
 929                group->group = pci_device_group(dev);
 930        else
 931                group->group = generic_device_group(dev);
 932
 933        if (IS_ERR(group->group)) {
 934                devm_kfree(smmu->dev, group);
 935                mutex_unlock(&smmu->lock);
 936                return NULL;
 937        }
 938
 939        iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
 940        if (soc)
 941                iommu_group_set_name(group->group, soc->name);
 942        list_add_tail(&group->list, &smmu->groups);
 943        mutex_unlock(&smmu->lock);
 944
 945        return group->group;
 946}
 947
 948static int tegra_smmu_of_xlate(struct device *dev,
 949                               struct of_phandle_args *args)
 950{
 951        struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
 952        struct tegra_mc *mc = platform_get_drvdata(iommu_pdev);
 953        u32 id = args->args[0];
 954
 955        /*
 956         * Note: we are here releasing the reference of &iommu_pdev->dev, which
 957         * is mc->dev. Although some functions in tegra_smmu_ops may keep using
 958         * its private data beyond this point, it's still safe to do so because
 959         * the SMMU parent device is the same as the MC, so the reference count
 960         * isn't strictly necessary.
 961         */
 962        put_device(&iommu_pdev->dev);
 963
 964        dev_iommu_priv_set(dev, mc->smmu);
 965
 966        return iommu_fwspec_add_ids(dev, &id, 1);
 967}
 968
 969static const struct iommu_ops tegra_smmu_ops = {
 970        .capable = tegra_smmu_capable,
 971        .domain_alloc = tegra_smmu_domain_alloc,
 972        .domain_free = tegra_smmu_domain_free,
 973        .attach_dev = tegra_smmu_attach_dev,
 974        .detach_dev = tegra_smmu_detach_dev,
 975        .probe_device = tegra_smmu_probe_device,
 976        .release_device = tegra_smmu_release_device,
 977        .device_group = tegra_smmu_device_group,
 978        .map = tegra_smmu_map,
 979        .unmap = tegra_smmu_unmap,
 980        .iova_to_phys = tegra_smmu_iova_to_phys,
 981        .of_xlate = tegra_smmu_of_xlate,
 982        .pgsize_bitmap = SZ_4K,
 983};
 984
 985static void tegra_smmu_ahb_enable(void)
 986{
 987        static const struct of_device_id ahb_match[] = {
 988                { .compatible = "nvidia,tegra30-ahb", },
 989                { }
 990        };
 991        struct device_node *ahb;
 992
 993        ahb = of_find_matching_node(NULL, ahb_match);
 994        if (ahb) {
 995                tegra_ahb_enable_smmu(ahb);
 996                of_node_put(ahb);
 997        }
 998}
 999
1000static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
1001{
1002        struct tegra_smmu *smmu = s->private;
1003        unsigned int i;
1004        u32 value;
1005
1006        seq_printf(s, "swgroup    enabled  ASID\n");
1007        seq_printf(s, "------------------------\n");
1008
1009        for (i = 0; i < smmu->soc->num_swgroups; i++) {
1010                const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1011                const char *status;
1012                unsigned int asid;
1013
1014                value = smmu_readl(smmu, group->reg);
1015
1016                if (value & SMMU_ASID_ENABLE)
1017                        status = "yes";
1018                else
1019                        status = "no";
1020
1021                asid = value & SMMU_ASID_MASK;
1022
1023                seq_printf(s, "%-9s  %-7s  %#04x\n", group->name, status,
1024                           asid);
1025        }
1026
1027        return 0;
1028}
1029
1030DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
1031
1032static int tegra_smmu_clients_show(struct seq_file *s, void *data)
1033{
1034        struct tegra_smmu *smmu = s->private;
1035        unsigned int i;
1036        u32 value;
1037
1038        seq_printf(s, "client       enabled\n");
1039        seq_printf(s, "--------------------\n");
1040
1041        for (i = 0; i < smmu->soc->num_clients; i++) {
1042                const struct tegra_mc_client *client = &smmu->soc->clients[i];
1043                const char *status;
1044
1045                value = smmu_readl(smmu, client->regs.smmu.reg);
1046
1047                if (value & BIT(client->regs.smmu.bit))
1048                        status = "yes";
1049                else
1050                        status = "no";
1051
1052                seq_printf(s, "%-12s %s\n", client->name, status);
1053        }
1054
1055        return 0;
1056}
1057
1058DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
1059
1060static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1061{
1062        smmu->debugfs = debugfs_create_dir("smmu", NULL);
1063        if (!smmu->debugfs)
1064                return;
1065
1066        debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1067                            &tegra_smmu_swgroups_fops);
1068        debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1069                            &tegra_smmu_clients_fops);
1070}
1071
1072static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1073{
1074        debugfs_remove_recursive(smmu->debugfs);
1075}
1076
1077struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1078                                    const struct tegra_smmu_soc *soc,
1079                                    struct tegra_mc *mc)
1080{
1081        struct tegra_smmu *smmu;
1082        size_t size;
1083        u32 value;
1084        int err;
1085
1086        smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1087        if (!smmu)
1088                return ERR_PTR(-ENOMEM);
1089
1090        /*
1091         * This is a bit of a hack. Ideally we'd want to simply return this
1092         * value. However the IOMMU registration process will attempt to add
1093         * all devices to the IOMMU when bus_set_iommu() is called. In order
1094         * not to rely on global variables to track the IOMMU instance, we
1095         * set it here so that it can be looked up from the .probe_device()
1096         * callback via the IOMMU device's .drvdata field.
1097         */
1098        mc->smmu = smmu;
1099
1100        size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
1101
1102        smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1103        if (!smmu->asids)
1104                return ERR_PTR(-ENOMEM);
1105
1106        INIT_LIST_HEAD(&smmu->groups);
1107        mutex_init(&smmu->lock);
1108
1109        smmu->regs = mc->regs;
1110        smmu->soc = soc;
1111        smmu->dev = dev;
1112        smmu->mc = mc;
1113
1114        smmu->pfn_mask =
1115                BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
1116        dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1117                mc->soc->num_address_bits, smmu->pfn_mask);
1118        smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
1119        dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1120                smmu->tlb_mask);
1121
1122        value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1123
1124        if (soc->supports_request_limit)
1125                value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1126
1127        smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1128
1129        value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1130                SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1131
1132        if (soc->supports_round_robin_arbitration)
1133                value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1134
1135        smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1136
1137        smmu_flush_ptc_all(smmu);
1138        smmu_flush_tlb(smmu);
1139        smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1140        smmu_flush(smmu);
1141
1142        tegra_smmu_ahb_enable();
1143
1144        err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1145        if (err)
1146                return ERR_PTR(err);
1147
1148        err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
1149        if (err)
1150                goto remove_sysfs;
1151
1152        err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
1153        if (err < 0)
1154                goto unregister;
1155
1156#ifdef CONFIG_PCI
1157        err = bus_set_iommu(&pci_bus_type, &tegra_smmu_ops);
1158        if (err < 0)
1159                goto unset_platform_bus;
1160#endif
1161
1162        if (IS_ENABLED(CONFIG_DEBUG_FS))
1163                tegra_smmu_debugfs_init(smmu);
1164
1165        return smmu;
1166
1167unset_platform_bus: __maybe_unused;
1168        bus_set_iommu(&platform_bus_type, NULL);
1169unregister:
1170        iommu_device_unregister(&smmu->iommu);
1171remove_sysfs:
1172        iommu_device_sysfs_remove(&smmu->iommu);
1173
1174        return ERR_PTR(err);
1175}
1176
1177void tegra_smmu_remove(struct tegra_smmu *smmu)
1178{
1179        iommu_device_unregister(&smmu->iommu);
1180        iommu_device_sysfs_remove(&smmu->iommu);
1181
1182        if (IS_ENABLED(CONFIG_DEBUG_FS))
1183                tegra_smmu_debugfs_exit(smmu);
1184}
1185