linux/drivers/iommu/amd_iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/ratelimit.h>
  21#include <linux/pci.h>
  22#include <linux/pci-ats.h>
  23#include <linux/bitmap.h>
  24#include <linux/slab.h>
  25#include <linux/debugfs.h>
  26#include <linux/scatterlist.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/iommu-helper.h>
  29#include <linux/iommu.h>
  30#include <linux/delay.h>
  31#include <linux/amd-iommu.h>
  32#include <linux/notifier.h>
  33#include <linux/export.h>
  34#include <linux/irq.h>
  35#include <linux/msi.h>
  36#include <linux/dma-contiguous.h>
  37#include <linux/irqdomain.h>
  38#include <asm/irq_remapping.h>
  39#include <asm/io_apic.h>
  40#include <asm/apic.h>
  41#include <asm/hw_irq.h>
  42#include <asm/msidef.h>
  43#include <asm/proto.h>
  44#include <asm/iommu.h>
  45#include <asm/gart.h>
  46#include <asm/dma.h>
  47
  48#include "amd_iommu_proto.h"
  49#include "amd_iommu_types.h"
  50#include "irq_remapping.h"
  51
  52#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  53
  54#define LOOP_TIMEOUT    100000
  55
  56/*
  57 * This bitmap is used to advertise the page sizes our hardware support
  58 * to the IOMMU core, which will then use this information to split
  59 * physically contiguous memory regions it is mapping into page sizes
  60 * that we support.
  61 *
  62 * 512GB Pages are not supported due to a hardware bug
  63 */
  64#define AMD_IOMMU_PGSIZES       ((~0xFFFUL) & ~(2ULL << 38))
  65
  66static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  67
  68/* List of all available dev_data structures */
  69static LIST_HEAD(dev_data_list);
  70static DEFINE_SPINLOCK(dev_data_list_lock);
  71
  72LIST_HEAD(ioapic_map);
  73LIST_HEAD(hpet_map);
  74
  75/*
  76 * Domain for untranslated devices - only allocated
  77 * if iommu=pt passed on kernel cmd line.
  78 */
  79static const struct iommu_ops amd_iommu_ops;
  80
  81static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
  82int amd_iommu_max_glx_val = -1;
  83
  84static struct dma_map_ops amd_iommu_dma_ops;
  85
  86/*
  87 * This struct contains device specific data for the IOMMU
  88 */
  89struct iommu_dev_data {
  90        struct list_head list;            /* For domain->dev_list */
  91        struct list_head dev_data_list;   /* For global dev_data_list */
  92        struct list_head alias_list;      /* Link alias-groups together */
  93        struct iommu_dev_data *alias_data;/* The alias dev_data */
  94        struct protection_domain *domain; /* Domain the device is bound to */
  95        u16 devid;                        /* PCI Device ID */
  96        bool iommu_v2;                    /* Device can make use of IOMMUv2 */
  97        bool passthrough;                 /* Device is identity mapped */
  98        struct {
  99                bool enabled;
 100                int qdep;
 101        } ats;                            /* ATS state */
 102        bool pri_tlp;                     /* PASID TLB required for
 103                                             PPR completions */
 104        u32 errata;                       /* Bitmap for errata to apply */
 105};
 106
 107/*
 108 * general struct to manage commands send to an IOMMU
 109 */
 110struct iommu_cmd {
 111        u32 data[4];
 112};
 113
 114struct kmem_cache *amd_iommu_irq_cache;
 115
 116static void update_domain(struct protection_domain *domain);
 117static int protection_domain_init(struct protection_domain *domain);
 118
 119/****************************************************************************
 120 *
 121 * Helper functions
 122 *
 123 ****************************************************************************/
 124
 125static struct protection_domain *to_pdomain(struct iommu_domain *dom)
 126{
 127        return container_of(dom, struct protection_domain, domain);
 128}
 129
 130static struct iommu_dev_data *alloc_dev_data(u16 devid)
 131{
 132        struct iommu_dev_data *dev_data;
 133        unsigned long flags;
 134
 135        dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
 136        if (!dev_data)
 137                return NULL;
 138
 139        INIT_LIST_HEAD(&dev_data->alias_list);
 140
 141        dev_data->devid = devid;
 142
 143        spin_lock_irqsave(&dev_data_list_lock, flags);
 144        list_add_tail(&dev_data->dev_data_list, &dev_data_list);
 145        spin_unlock_irqrestore(&dev_data_list_lock, flags);
 146
 147        return dev_data;
 148}
 149
 150static void free_dev_data(struct iommu_dev_data *dev_data)
 151{
 152        unsigned long flags;
 153
 154        spin_lock_irqsave(&dev_data_list_lock, flags);
 155        list_del(&dev_data->dev_data_list);
 156        spin_unlock_irqrestore(&dev_data_list_lock, flags);
 157
 158        kfree(dev_data);
 159}
 160
 161static struct iommu_dev_data *search_dev_data(u16 devid)
 162{
 163        struct iommu_dev_data *dev_data;
 164        unsigned long flags;
 165
 166        spin_lock_irqsave(&dev_data_list_lock, flags);
 167        list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
 168                if (dev_data->devid == devid)
 169                        goto out_unlock;
 170        }
 171
 172        dev_data = NULL;
 173
 174out_unlock:
 175        spin_unlock_irqrestore(&dev_data_list_lock, flags);
 176
 177        return dev_data;
 178}
 179
 180static struct iommu_dev_data *find_dev_data(u16 devid)
 181{
 182        struct iommu_dev_data *dev_data;
 183
 184        dev_data = search_dev_data(devid);
 185
 186        if (dev_data == NULL)
 187                dev_data = alloc_dev_data(devid);
 188
 189        return dev_data;
 190}
 191
 192static inline u16 get_device_id(struct device *dev)
 193{
 194        struct pci_dev *pdev = to_pci_dev(dev);
 195
 196        return PCI_DEVID(pdev->bus->number, pdev->devfn);
 197}
 198
 199static struct iommu_dev_data *get_dev_data(struct device *dev)
 200{
 201        return dev->archdata.iommu;
 202}
 203
 204static bool pci_iommuv2_capable(struct pci_dev *pdev)
 205{
 206        static const int caps[] = {
 207                PCI_EXT_CAP_ID_ATS,
 208                PCI_EXT_CAP_ID_PRI,
 209                PCI_EXT_CAP_ID_PASID,
 210        };
 211        int i, pos;
 212
 213        for (i = 0; i < 3; ++i) {
 214                pos = pci_find_ext_capability(pdev, caps[i]);
 215                if (pos == 0)
 216                        return false;
 217        }
 218
 219        return true;
 220}
 221
 222static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
 223{
 224        struct iommu_dev_data *dev_data;
 225
 226        dev_data = get_dev_data(&pdev->dev);
 227
 228        return dev_data->errata & (1 << erratum) ? true : false;
 229}
 230
 231/*
 232 * This function actually applies the mapping to the page table of the
 233 * dma_ops domain.
 234 */
 235static void alloc_unity_mapping(struct dma_ops_domain *dma_dom,
 236                                struct unity_map_entry *e)
 237{
 238        u64 addr;
 239
 240        for (addr = e->address_start; addr < e->address_end;
 241             addr += PAGE_SIZE) {
 242                if (addr < dma_dom->aperture_size)
 243                        __set_bit(addr >> PAGE_SHIFT,
 244                                  dma_dom->aperture[0]->bitmap);
 245        }
 246}
 247
 248/*
 249 * Inits the unity mappings required for a specific device
 250 */
 251static void init_unity_mappings_for_device(struct device *dev,
 252                                           struct dma_ops_domain *dma_dom)
 253{
 254        struct unity_map_entry *e;
 255        u16 devid;
 256
 257        devid = get_device_id(dev);
 258
 259        list_for_each_entry(e, &amd_iommu_unity_map, list) {
 260                if (!(devid >= e->devid_start && devid <= e->devid_end))
 261                        continue;
 262                alloc_unity_mapping(dma_dom, e);
 263        }
 264}
 265
 266/*
 267 * This function checks if the driver got a valid device from the caller to
 268 * avoid dereferencing invalid pointers.
 269 */
 270static bool check_device(struct device *dev)
 271{
 272        u16 devid;
 273
 274        if (!dev || !dev->dma_mask)
 275                return false;
 276
 277        /* No PCI device */
 278        if (!dev_is_pci(dev))
 279                return false;
 280
 281        devid = get_device_id(dev);
 282
 283        /* Out of our scope? */
 284        if (devid > amd_iommu_last_bdf)
 285                return false;
 286
 287        if (amd_iommu_rlookup_table[devid] == NULL)
 288                return false;
 289
 290        return true;
 291}
 292
 293static void init_iommu_group(struct device *dev)
 294{
 295        struct dma_ops_domain *dma_domain;
 296        struct iommu_domain *domain;
 297        struct iommu_group *group;
 298
 299        group = iommu_group_get_for_dev(dev);
 300        if (IS_ERR(group))
 301                return;
 302
 303        domain = iommu_group_default_domain(group);
 304        if (!domain)
 305                goto out;
 306
 307        dma_domain = to_pdomain(domain)->priv;
 308
 309        init_unity_mappings_for_device(dev, dma_domain);
 310out:
 311        iommu_group_put(group);
 312}
 313
 314static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
 315{
 316        *(u16 *)data = alias;
 317        return 0;
 318}
 319
 320static u16 get_alias(struct device *dev)
 321{
 322        struct pci_dev *pdev = to_pci_dev(dev);
 323        u16 devid, ivrs_alias, pci_alias;
 324
 325        devid = get_device_id(dev);
 326        ivrs_alias = amd_iommu_alias_table[devid];
 327        pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
 328
 329        if (ivrs_alias == pci_alias)
 330                return ivrs_alias;
 331
 332        /*
 333         * DMA alias showdown
 334         *
 335         * The IVRS is fairly reliable in telling us about aliases, but it
 336         * can't know about every screwy device.  If we don't have an IVRS
 337         * reported alias, use the PCI reported alias.  In that case we may
 338         * still need to initialize the rlookup and dev_table entries if the
 339         * alias is to a non-existent device.
 340         */
 341        if (ivrs_alias == devid) {
 342                if (!amd_iommu_rlookup_table[pci_alias]) {
 343                        amd_iommu_rlookup_table[pci_alias] =
 344                                amd_iommu_rlookup_table[devid];
 345                        memcpy(amd_iommu_dev_table[pci_alias].data,
 346                               amd_iommu_dev_table[devid].data,
 347                               sizeof(amd_iommu_dev_table[pci_alias].data));
 348                }
 349
 350                return pci_alias;
 351        }
 352
 353        pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
 354                "for device %s[%04x:%04x], kernel reported alias "
 355                "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
 356                PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
 357                PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
 358                PCI_FUNC(pci_alias));
 359
 360        /*
 361         * If we don't have a PCI DMA alias and the IVRS alias is on the same
 362         * bus, then the IVRS table may know about a quirk that we don't.
 363         */
 364        if (pci_alias == devid &&
 365            PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
 366                pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
 367                pdev->dma_alias_devfn = ivrs_alias & 0xff;
 368                pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
 369                        PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
 370                        dev_name(dev));
 371        }
 372
 373        return ivrs_alias;
 374}
 375
 376static int iommu_init_device(struct device *dev)
 377{
 378        struct pci_dev *pdev = to_pci_dev(dev);
 379        struct iommu_dev_data *dev_data;
 380        u16 alias;
 381
 382        if (dev->archdata.iommu)
 383                return 0;
 384
 385        dev_data = find_dev_data(get_device_id(dev));
 386        if (!dev_data)
 387                return -ENOMEM;
 388
 389        alias = get_alias(dev);
 390
 391        if (alias != dev_data->devid) {
 392                struct iommu_dev_data *alias_data;
 393
 394                alias_data = find_dev_data(alias);
 395                if (alias_data == NULL) {
 396                        pr_err("AMD-Vi: Warning: Unhandled device %s\n",
 397                                        dev_name(dev));
 398                        free_dev_data(dev_data);
 399                        return -ENOTSUPP;
 400                }
 401                dev_data->alias_data = alias_data;
 402
 403                /* Add device to the alias_list */
 404                list_add(&dev_data->alias_list, &alias_data->alias_list);
 405        }
 406
 407        if (pci_iommuv2_capable(pdev)) {
 408                struct amd_iommu *iommu;
 409
 410                iommu              = amd_iommu_rlookup_table[dev_data->devid];
 411                dev_data->iommu_v2 = iommu->is_iommu_v2;
 412        }
 413
 414        dev->archdata.iommu = dev_data;
 415
 416        iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
 417                          dev);
 418
 419        return 0;
 420}
 421
 422static void iommu_ignore_device(struct device *dev)
 423{
 424        u16 devid, alias;
 425
 426        devid = get_device_id(dev);
 427        alias = amd_iommu_alias_table[devid];
 428
 429        memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
 430        memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
 431
 432        amd_iommu_rlookup_table[devid] = NULL;
 433        amd_iommu_rlookup_table[alias] = NULL;
 434}
 435
 436static void iommu_uninit_device(struct device *dev)
 437{
 438        struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev));
 439
 440        if (!dev_data)
 441                return;
 442
 443        iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
 444                            dev);
 445
 446        iommu_group_remove_device(dev);
 447
 448        /* Unlink from alias, it may change if another device is re-plugged */
 449        dev_data->alias_data = NULL;
 450
 451        /* Remove dma-ops */
 452        dev->archdata.dma_ops = NULL;
 453
 454        /*
 455         * We keep dev_data around for unplugged devices and reuse it when the
 456         * device is re-plugged - not doing so would introduce a ton of races.
 457         */
 458}
 459
 460#ifdef CONFIG_AMD_IOMMU_STATS
 461
 462/*
 463 * Initialization code for statistics collection
 464 */
 465
 466DECLARE_STATS_COUNTER(compl_wait);
 467DECLARE_STATS_COUNTER(cnt_map_single);
 468DECLARE_STATS_COUNTER(cnt_unmap_single);
 469DECLARE_STATS_COUNTER(cnt_map_sg);
 470DECLARE_STATS_COUNTER(cnt_unmap_sg);
 471DECLARE_STATS_COUNTER(cnt_alloc_coherent);
 472DECLARE_STATS_COUNTER(cnt_free_coherent);
 473DECLARE_STATS_COUNTER(cross_page);
 474DECLARE_STATS_COUNTER(domain_flush_single);
 475DECLARE_STATS_COUNTER(domain_flush_all);
 476DECLARE_STATS_COUNTER(alloced_io_mem);
 477DECLARE_STATS_COUNTER(total_map_requests);
 478DECLARE_STATS_COUNTER(complete_ppr);
 479DECLARE_STATS_COUNTER(invalidate_iotlb);
 480DECLARE_STATS_COUNTER(invalidate_iotlb_all);
 481DECLARE_STATS_COUNTER(pri_requests);
 482
 483static struct dentry *stats_dir;
 484static struct dentry *de_fflush;
 485
 486static void amd_iommu_stats_add(struct __iommu_counter *cnt)
 487{
 488        if (stats_dir == NULL)
 489                return;
 490
 491        cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
 492                                       &cnt->value);
 493}
 494
 495static void amd_iommu_stats_init(void)
 496{
 497        stats_dir = debugfs_create_dir("amd-iommu", NULL);
 498        if (stats_dir == NULL)
 499                return;
 500
 501        de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
 502                                         &amd_iommu_unmap_flush);
 503
 504        amd_iommu_stats_add(&compl_wait);
 505        amd_iommu_stats_add(&cnt_map_single);
 506        amd_iommu_stats_add(&cnt_unmap_single);
 507        amd_iommu_stats_add(&cnt_map_sg);
 508        amd_iommu_stats_add(&cnt_unmap_sg);
 509        amd_iommu_stats_add(&cnt_alloc_coherent);
 510        amd_iommu_stats_add(&cnt_free_coherent);
 511        amd_iommu_stats_add(&cross_page);
 512        amd_iommu_stats_add(&domain_flush_single);
 513        amd_iommu_stats_add(&domain_flush_all);
 514        amd_iommu_stats_add(&alloced_io_mem);
 515        amd_iommu_stats_add(&total_map_requests);
 516        amd_iommu_stats_add(&complete_ppr);
 517        amd_iommu_stats_add(&invalidate_iotlb);
 518        amd_iommu_stats_add(&invalidate_iotlb_all);
 519        amd_iommu_stats_add(&pri_requests);
 520}
 521
 522#endif
 523
 524/****************************************************************************
 525 *
 526 * Interrupt handling functions
 527 *
 528 ****************************************************************************/
 529
 530static void dump_dte_entry(u16 devid)
 531{
 532        int i;
 533
 534        for (i = 0; i < 4; ++i)
 535                pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
 536                        amd_iommu_dev_table[devid].data[i]);
 537}
 538
 539static void dump_command(unsigned long phys_addr)
 540{
 541        struct iommu_cmd *cmd = phys_to_virt(phys_addr);
 542        int i;
 543
 544        for (i = 0; i < 4; ++i)
 545                pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
 546}
 547
 548static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 549{
 550        int type, devid, domid, flags;
 551        volatile u32 *event = __evt;
 552        int count = 0;
 553        u64 address;
 554
 555retry:
 556        type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
 557        devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
 558        domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
 559        flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
 560        address = (u64)(((u64)event[3]) << 32) | event[2];
 561
 562        if (type == 0) {
 563                /* Did we hit the erratum? */
 564                if (++count == LOOP_TIMEOUT) {
 565                        pr_err("AMD-Vi: No event written to event log\n");
 566                        return;
 567                }
 568                udelay(1);
 569                goto retry;
 570        }
 571
 572        printk(KERN_ERR "AMD-Vi: Event logged [");
 573
 574        switch (type) {
 575        case EVENT_TYPE_ILL_DEV:
 576                printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
 577                       "address=0x%016llx flags=0x%04x]\n",
 578                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 579                       address, flags);
 580                dump_dte_entry(devid);
 581                break;
 582        case EVENT_TYPE_IO_FAULT:
 583                printk("IO_PAGE_FAULT device=%02x:%02x.%x "
 584                       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 585                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 586                       domid, address, flags);
 587                break;
 588        case EVENT_TYPE_DEV_TAB_ERR:
 589                printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 590                       "address=0x%016llx flags=0x%04x]\n",
 591                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 592                       address, flags);
 593                break;
 594        case EVENT_TYPE_PAGE_TAB_ERR:
 595                printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 596                       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 597                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 598                       domid, address, flags);
 599                break;
 600        case EVENT_TYPE_ILL_CMD:
 601                printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
 602                dump_command(address);
 603                break;
 604        case EVENT_TYPE_CMD_HARD_ERR:
 605                printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
 606                       "flags=0x%04x]\n", address, flags);
 607                break;
 608        case EVENT_TYPE_IOTLB_INV_TO:
 609                printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
 610                       "address=0x%016llx]\n",
 611                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 612                       address);
 613                break;
 614        case EVENT_TYPE_INV_DEV_REQ:
 615                printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
 616                       "address=0x%016llx flags=0x%04x]\n",
 617                       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 618                       address, flags);
 619                break;
 620        default:
 621                printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
 622        }
 623
 624        memset(__evt, 0, 4 * sizeof(u32));
 625}
 626
 627static void iommu_poll_events(struct amd_iommu *iommu)
 628{
 629        u32 head, tail;
 630
 631        head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 632        tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 633
 634        while (head != tail) {
 635                iommu_print_event(iommu, iommu->evt_buf + head);
 636                head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
 637        }
 638
 639        writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 640}
 641
 642static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 643{
 644        struct amd_iommu_fault fault;
 645
 646        INC_STATS_COUNTER(pri_requests);
 647
 648        if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
 649                pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
 650                return;
 651        }
 652
 653        fault.address   = raw[1];
 654        fault.pasid     = PPR_PASID(raw[0]);
 655        fault.device_id = PPR_DEVID(raw[0]);
 656        fault.tag       = PPR_TAG(raw[0]);
 657        fault.flags     = PPR_FLAGS(raw[0]);
 658
 659        atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 660}
 661
 662static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 663{
 664        u32 head, tail;
 665
 666        if (iommu->ppr_log == NULL)
 667                return;
 668
 669        head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 670        tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 671
 672        while (head != tail) {
 673                volatile u64 *raw;
 674                u64 entry[2];
 675                int i;
 676
 677                raw = (u64 *)(iommu->ppr_log + head);
 678
 679                /*
 680                 * Hardware bug: Interrupt may arrive before the entry is
 681                 * written to memory. If this happens we need to wait for the
 682                 * entry to arrive.
 683                 */
 684                for (i = 0; i < LOOP_TIMEOUT; ++i) {
 685                        if (PPR_REQ_TYPE(raw[0]) != 0)
 686                                break;
 687                        udelay(1);
 688                }
 689
 690                /* Avoid memcpy function-call overhead */
 691                entry[0] = raw[0];
 692                entry[1] = raw[1];
 693
 694                /*
 695                 * To detect the hardware bug we need to clear the entry
 696                 * back to zero.
 697                 */
 698                raw[0] = raw[1] = 0UL;
 699
 700                /* Update head pointer of hardware ring-buffer */
 701                head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
 702                writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 703
 704                /* Handle PPR entry */
 705                iommu_handle_ppr_entry(iommu, entry);
 706
 707                /* Refresh ring-buffer information */
 708                head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 709                tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 710        }
 711}
 712
 713irqreturn_t amd_iommu_int_thread(int irq, void *data)
 714{
 715        struct amd_iommu *iommu = (struct amd_iommu *) data;
 716        u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 717
 718        while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
 719                /* Enable EVT and PPR interrupts again */
 720                writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
 721                        iommu->mmio_base + MMIO_STATUS_OFFSET);
 722
 723                if (status & MMIO_STATUS_EVT_INT_MASK) {
 724                        pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
 725                        iommu_poll_events(iommu);
 726                }
 727
 728                if (status & MMIO_STATUS_PPR_INT_MASK) {
 729                        pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
 730                        iommu_poll_ppr_log(iommu);
 731                }
 732
 733                /*
 734                 * Hardware bug: ERBT1312
 735                 * When re-enabling interrupt (by writing 1
 736                 * to clear the bit), the hardware might also try to set
 737                 * the interrupt bit in the event status register.
 738                 * In this scenario, the bit will be set, and disable
 739                 * subsequent interrupts.
 740                 *
 741                 * Workaround: The IOMMU driver should read back the
 742                 * status register and check if the interrupt bits are cleared.
 743                 * If not, driver will need to go through the interrupt handler
 744                 * again and re-clear the bits
 745                 */
 746                status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 747        }
 748        return IRQ_HANDLED;
 749}
 750
 751irqreturn_t amd_iommu_int_handler(int irq, void *data)
 752{
 753        return IRQ_WAKE_THREAD;
 754}
 755
 756/****************************************************************************
 757 *
 758 * IOMMU command queuing functions
 759 *
 760 ****************************************************************************/
 761
 762static int wait_on_sem(volatile u64 *sem)
 763{
 764        int i = 0;
 765
 766        while (*sem == 0 && i < LOOP_TIMEOUT) {
 767                udelay(1);
 768                i += 1;
 769        }
 770
 771        if (i == LOOP_TIMEOUT) {
 772                pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
 773                return -EIO;
 774        }
 775
 776        return 0;
 777}
 778
 779static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 780                               struct iommu_cmd *cmd,
 781                               u32 tail)
 782{
 783        u8 *target;
 784
 785        target = iommu->cmd_buf + tail;
 786        tail   = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
 787
 788        /* Copy command to buffer */
 789        memcpy(target, cmd, sizeof(*cmd));
 790
 791        /* Tell the IOMMU about it */
 792        writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 793}
 794
 795static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 796{
 797        WARN_ON(address & 0x7ULL);
 798
 799        memset(cmd, 0, sizeof(*cmd));
 800        cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
 801        cmd->data[1] = upper_32_bits(__pa(address));
 802        cmd->data[2] = 1;
 803        CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 804}
 805
 806static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
 807{
 808        memset(cmd, 0, sizeof(*cmd));
 809        cmd->data[0] = devid;
 810        CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
 811}
 812
 813static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
 814                                  size_t size, u16 domid, int pde)
 815{
 816        u64 pages;
 817        bool s;
 818
 819        pages = iommu_num_pages(address, size, PAGE_SIZE);
 820        s     = false;
 821
 822        if (pages > 1) {
 823                /*
 824                 * If we have to flush more than one page, flush all
 825                 * TLB entries for this domain
 826                 */
 827                address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 828                s = true;
 829        }
 830
 831        address &= PAGE_MASK;
 832
 833        memset(cmd, 0, sizeof(*cmd));
 834        cmd->data[1] |= domid;
 835        cmd->data[2]  = lower_32_bits(address);
 836        cmd->data[3]  = upper_32_bits(address);
 837        CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 838        if (s) /* size bit - we flush more than one 4kb page */
 839                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 840        if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
 841                cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 842}
 843
 844static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
 845                                  u64 address, size_t size)
 846{
 847        u64 pages;
 848        bool s;
 849
 850        pages = iommu_num_pages(address, size, PAGE_SIZE);
 851        s     = false;
 852
 853        if (pages > 1) {
 854                /*
 855                 * If we have to flush more than one page, flush all
 856                 * TLB entries for this domain
 857                 */
 858                address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 859                s = true;
 860        }
 861
 862        address &= PAGE_MASK;
 863
 864        memset(cmd, 0, sizeof(*cmd));
 865        cmd->data[0]  = devid;
 866        cmd->data[0] |= (qdep & 0xff) << 24;
 867        cmd->data[1]  = devid;
 868        cmd->data[2]  = lower_32_bits(address);
 869        cmd->data[3]  = upper_32_bits(address);
 870        CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 871        if (s)
 872                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 873}
 874
 875static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
 876                                  u64 address, bool size)
 877{
 878        memset(cmd, 0, sizeof(*cmd));
 879
 880        address &= ~(0xfffULL);
 881
 882        cmd->data[0]  = pasid;
 883        cmd->data[1]  = domid;
 884        cmd->data[2]  = lower_32_bits(address);
 885        cmd->data[3]  = upper_32_bits(address);
 886        cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 887        cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 888        if (size)
 889                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 890        CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 891}
 892
 893static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
 894                                  int qdep, u64 address, bool size)
 895{
 896        memset(cmd, 0, sizeof(*cmd));
 897
 898        address &= ~(0xfffULL);
 899
 900        cmd->data[0]  = devid;
 901        cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
 902        cmd->data[0] |= (qdep  & 0xff) << 24;
 903        cmd->data[1]  = devid;
 904        cmd->data[1] |= (pasid & 0xff) << 16;
 905        cmd->data[2]  = lower_32_bits(address);
 906        cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 907        cmd->data[3]  = upper_32_bits(address);
 908        if (size)
 909                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 910        CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 911}
 912
 913static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
 914                               int status, int tag, bool gn)
 915{
 916        memset(cmd, 0, sizeof(*cmd));
 917
 918        cmd->data[0]  = devid;
 919        if (gn) {
 920                cmd->data[1]  = pasid;
 921                cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
 922        }
 923        cmd->data[3]  = tag & 0x1ff;
 924        cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
 925
 926        CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
 927}
 928
 929static void build_inv_all(struct iommu_cmd *cmd)
 930{
 931        memset(cmd, 0, sizeof(*cmd));
 932        CMD_SET_TYPE(cmd, CMD_INV_ALL);
 933}
 934
 935static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
 936{
 937        memset(cmd, 0, sizeof(*cmd));
 938        cmd->data[0] = devid;
 939        CMD_SET_TYPE(cmd, CMD_INV_IRT);
 940}
 941
 942/*
 943 * Writes the command to the IOMMUs command buffer and informs the
 944 * hardware about the new command.
 945 */
 946static int iommu_queue_command_sync(struct amd_iommu *iommu,
 947                                    struct iommu_cmd *cmd,
 948                                    bool sync)
 949{
 950        u32 left, tail, head, next_tail;
 951        unsigned long flags;
 952
 953        WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
 954
 955again:
 956        spin_lock_irqsave(&iommu->lock, flags);
 957
 958        head      = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
 959        tail      = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 960        next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
 961        left      = (head - next_tail) % iommu->cmd_buf_size;
 962
 963        if (left <= 2) {
 964                struct iommu_cmd sync_cmd;
 965                volatile u64 sem = 0;
 966                int ret;
 967
 968                build_completion_wait(&sync_cmd, (u64)&sem);
 969                copy_cmd_to_buffer(iommu, &sync_cmd, tail);
 970
 971                spin_unlock_irqrestore(&iommu->lock, flags);
 972
 973                if ((ret = wait_on_sem(&sem)) != 0)
 974                        return ret;
 975
 976                goto again;
 977        }
 978
 979        copy_cmd_to_buffer(iommu, cmd, tail);
 980
 981        /* We need to sync now to make sure all commands are processed */
 982        iommu->need_sync = sync;
 983
 984        spin_unlock_irqrestore(&iommu->lock, flags);
 985
 986        return 0;
 987}
 988
 989static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
 990{
 991        return iommu_queue_command_sync(iommu, cmd, true);
 992}
 993
 994/*
 995 * This function queues a completion wait command into the command
 996 * buffer of an IOMMU
 997 */
 998static int iommu_completion_wait(struct amd_iommu *iommu)
 999{
1000        struct iommu_cmd cmd;
1001        volatile u64 sem = 0;
1002        int ret;
1003
1004        if (!iommu->need_sync)
1005                return 0;
1006
1007        build_completion_wait(&cmd, (u64)&sem);
1008
1009        ret = iommu_queue_command_sync(iommu, &cmd, false);
1010        if (ret)
1011                return ret;
1012
1013        return wait_on_sem(&sem);
1014}
1015
1016static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1017{
1018        struct iommu_cmd cmd;
1019
1020        build_inv_dte(&cmd, devid);
1021
1022        return iommu_queue_command(iommu, &cmd);
1023}
1024
1025static void iommu_flush_dte_all(struct amd_iommu *iommu)
1026{
1027        u32 devid;
1028
1029        for (devid = 0; devid <= 0xffff; ++devid)
1030                iommu_flush_dte(iommu, devid);
1031
1032        iommu_completion_wait(iommu);
1033}
1034
1035/*
1036 * This function uses heavy locking and may disable irqs for some time. But
1037 * this is no issue because it is only called during resume.
1038 */
1039static void iommu_flush_tlb_all(struct amd_iommu *iommu)
1040{
1041        u32 dom_id;
1042
1043        for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1044                struct iommu_cmd cmd;
1045                build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1046                                      dom_id, 1);
1047                iommu_queue_command(iommu, &cmd);
1048        }
1049
1050        iommu_completion_wait(iommu);
1051}
1052
1053static void iommu_flush_all(struct amd_iommu *iommu)
1054{
1055        struct iommu_cmd cmd;
1056
1057        build_inv_all(&cmd);
1058
1059        iommu_queue_command(iommu, &cmd);
1060        iommu_completion_wait(iommu);
1061}
1062
1063static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1064{
1065        struct iommu_cmd cmd;
1066
1067        build_inv_irt(&cmd, devid);
1068
1069        iommu_queue_command(iommu, &cmd);
1070}
1071
1072static void iommu_flush_irt_all(struct amd_iommu *iommu)
1073{
1074        u32 devid;
1075
1076        for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1077                iommu_flush_irt(iommu, devid);
1078
1079        iommu_completion_wait(iommu);
1080}
1081
1082void iommu_flush_all_caches(struct amd_iommu *iommu)
1083{
1084        if (iommu_feature(iommu, FEATURE_IA)) {
1085                iommu_flush_all(iommu);
1086        } else {
1087                iommu_flush_dte_all(iommu);
1088                iommu_flush_irt_all(iommu);
1089                iommu_flush_tlb_all(iommu);
1090        }
1091}
1092
1093/*
1094 * Command send function for flushing on-device TLB
1095 */
1096static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1097                              u64 address, size_t size)
1098{
1099        struct amd_iommu *iommu;
1100        struct iommu_cmd cmd;
1101        int qdep;
1102
1103        qdep     = dev_data->ats.qdep;
1104        iommu    = amd_iommu_rlookup_table[dev_data->devid];
1105
1106        build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1107
1108        return iommu_queue_command(iommu, &cmd);
1109}
1110
1111/*
1112 * Command send function for invalidating a device table entry
1113 */
1114static int device_flush_dte(struct iommu_dev_data *dev_data)
1115{
1116        struct amd_iommu *iommu;
1117        int ret;
1118
1119        iommu = amd_iommu_rlookup_table[dev_data->devid];
1120
1121        ret = iommu_flush_dte(iommu, dev_data->devid);
1122        if (ret)
1123                return ret;
1124
1125        if (dev_data->ats.enabled)
1126                ret = device_flush_iotlb(dev_data, 0, ~0UL);
1127
1128        return ret;
1129}
1130
1131/*
1132 * TLB invalidation function which is called from the mapping functions.
1133 * It invalidates a single PTE if the range to flush is within a single
1134 * page. Otherwise it flushes the whole TLB of the IOMMU.
1135 */
1136static void __domain_flush_pages(struct protection_domain *domain,
1137                                 u64 address, size_t size, int pde)
1138{
1139        struct iommu_dev_data *dev_data;
1140        struct iommu_cmd cmd;
1141        int ret = 0, i;
1142
1143        build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1144
1145        for (i = 0; i < amd_iommus_present; ++i) {
1146                if (!domain->dev_iommu[i])
1147                        continue;
1148
1149                /*
1150                 * Devices of this domain are behind this IOMMU
1151                 * We need a TLB flush
1152                 */
1153                ret |= iommu_queue_command(amd_iommus[i], &cmd);
1154        }
1155
1156        list_for_each_entry(dev_data, &domain->dev_list, list) {
1157
1158                if (!dev_data->ats.enabled)
1159                        continue;
1160
1161                ret |= device_flush_iotlb(dev_data, address, size);
1162        }
1163
1164        WARN_ON(ret);
1165}
1166
1167static void domain_flush_pages(struct protection_domain *domain,
1168                               u64 address, size_t size)
1169{
1170        __domain_flush_pages(domain, address, size, 0);
1171}
1172
1173/* Flush the whole IO/TLB for a given protection domain */
1174static void domain_flush_tlb(struct protection_domain *domain)
1175{
1176        __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1177}
1178
1179/* Flush the whole IO/TLB for a given protection domain - including PDE */
1180static void domain_flush_tlb_pde(struct protection_domain *domain)
1181{
1182        __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1183}
1184
1185static void domain_flush_complete(struct protection_domain *domain)
1186{
1187        int i;
1188
1189        for (i = 0; i < amd_iommus_present; ++i) {
1190                if (!domain->dev_iommu[i])
1191                        continue;
1192
1193                /*
1194                 * Devices of this domain are behind this IOMMU
1195                 * We need to wait for completion of all commands.
1196                 */
1197                iommu_completion_wait(amd_iommus[i]);
1198        }
1199}
1200
1201
1202/*
1203 * This function flushes the DTEs for all devices in domain
1204 */
1205static void domain_flush_devices(struct protection_domain *domain)
1206{
1207        struct iommu_dev_data *dev_data;
1208
1209        list_for_each_entry(dev_data, &domain->dev_list, list)
1210                device_flush_dte(dev_data);
1211}
1212
1213/****************************************************************************
1214 *
1215 * The functions below are used the create the page table mappings for
1216 * unity mapped regions.
1217 *
1218 ****************************************************************************/
1219
1220/*
1221 * This function is used to add another level to an IO page table. Adding
1222 * another level increases the size of the address space by 9 bits to a size up
1223 * to 64 bits.
1224 */
1225static bool increase_address_space(struct protection_domain *domain,
1226                                   gfp_t gfp)
1227{
1228        u64 *pte;
1229
1230        if (domain->mode == PAGE_MODE_6_LEVEL)
1231                /* address space already 64 bit large */
1232                return false;
1233
1234        pte = (void *)get_zeroed_page(gfp);
1235        if (!pte)
1236                return false;
1237
1238        *pte             = PM_LEVEL_PDE(domain->mode,
1239                                        virt_to_phys(domain->pt_root));
1240        domain->pt_root  = pte;
1241        domain->mode    += 1;
1242        domain->updated  = true;
1243
1244        return true;
1245}
1246
1247static u64 *alloc_pte(struct protection_domain *domain,
1248                      unsigned long address,
1249                      unsigned long page_size,
1250                      u64 **pte_page,
1251                      gfp_t gfp)
1252{
1253        int level, end_lvl;
1254        u64 *pte, *page;
1255
1256        BUG_ON(!is_power_of_2(page_size));
1257
1258        while (address > PM_LEVEL_SIZE(domain->mode))
1259                increase_address_space(domain, gfp);
1260
1261        level   = domain->mode - 1;
1262        pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1263        address = PAGE_SIZE_ALIGN(address, page_size);
1264        end_lvl = PAGE_SIZE_LEVEL(page_size);
1265
1266        while (level > end_lvl) {
1267                if (!IOMMU_PTE_PRESENT(*pte)) {
1268                        page = (u64 *)get_zeroed_page(gfp);
1269                        if (!page)
1270                                return NULL;
1271                        *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1272                }
1273
1274                /* No level skipping support yet */
1275                if (PM_PTE_LEVEL(*pte) != level)
1276                        return NULL;
1277
1278                level -= 1;
1279
1280                pte = IOMMU_PTE_PAGE(*pte);
1281
1282                if (pte_page && level == end_lvl)
1283                        *pte_page = pte;
1284
1285                pte = &pte[PM_LEVEL_INDEX(level, address)];
1286        }
1287
1288        return pte;
1289}
1290
1291/*
1292 * This function checks if there is a PTE for a given dma address. If
1293 * there is one, it returns the pointer to it.
1294 */
1295static u64 *fetch_pte(struct protection_domain *domain,
1296                      unsigned long address,
1297                      unsigned long *page_size)
1298{
1299        int level;
1300        u64 *pte;
1301
1302        if (address > PM_LEVEL_SIZE(domain->mode))
1303                return NULL;
1304
1305        level      =  domain->mode - 1;
1306        pte        = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1307        *page_size =  PTE_LEVEL_PAGE_SIZE(level);
1308
1309        while (level > 0) {
1310
1311                /* Not Present */
1312                if (!IOMMU_PTE_PRESENT(*pte))
1313                        return NULL;
1314
1315                /* Large PTE */
1316                if (PM_PTE_LEVEL(*pte) == 7 ||
1317                    PM_PTE_LEVEL(*pte) == 0)
1318                        break;
1319
1320                /* No level skipping support yet */
1321                if (PM_PTE_LEVEL(*pte) != level)
1322                        return NULL;
1323
1324                level -= 1;
1325
1326                /* Walk to the next level */
1327                pte        = IOMMU_PTE_PAGE(*pte);
1328                pte        = &pte[PM_LEVEL_INDEX(level, address)];
1329                *page_size = PTE_LEVEL_PAGE_SIZE(level);
1330        }
1331
1332        if (PM_PTE_LEVEL(*pte) == 0x07) {
1333                unsigned long pte_mask;
1334
1335                /*
1336                 * If we have a series of large PTEs, make
1337                 * sure to return a pointer to the first one.
1338                 */
1339                *page_size = pte_mask = PTE_PAGE_SIZE(*pte);
1340                pte_mask   = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1341                pte        = (u64 *)(((unsigned long)pte) & pte_mask);
1342        }
1343
1344        return pte;
1345}
1346
1347/*
1348 * Generic mapping functions. It maps a physical address into a DMA
1349 * address space. It allocates the page table pages if necessary.
1350 * In the future it can be extended to a generic mapping function
1351 * supporting all features of AMD IOMMU page tables like level skipping
1352 * and full 64 bit address spaces.
1353 */
1354static int iommu_map_page(struct protection_domain *dom,
1355                          unsigned long bus_addr,
1356                          unsigned long phys_addr,
1357                          int prot,
1358                          unsigned long page_size)
1359{
1360        u64 __pte, *pte;
1361        int i, count;
1362
1363        BUG_ON(!IS_ALIGNED(bus_addr, page_size));
1364        BUG_ON(!IS_ALIGNED(phys_addr, page_size));
1365
1366        if (!(prot & IOMMU_PROT_MASK))
1367                return -EINVAL;
1368
1369        count = PAGE_SIZE_PTE_COUNT(page_size);
1370        pte   = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
1371
1372        if (!pte)
1373                return -ENOMEM;
1374
1375        for (i = 0; i < count; ++i)
1376                if (IOMMU_PTE_PRESENT(pte[i]))
1377                        return -EBUSY;
1378
1379        if (count > 1) {
1380                __pte = PAGE_SIZE_PTE(phys_addr, page_size);
1381                __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
1382        } else
1383                __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1384
1385        if (prot & IOMMU_PROT_IR)
1386                __pte |= IOMMU_PTE_IR;
1387        if (prot & IOMMU_PROT_IW)
1388                __pte |= IOMMU_PTE_IW;
1389
1390        for (i = 0; i < count; ++i)
1391                pte[i] = __pte;
1392
1393        update_domain(dom);
1394
1395        return 0;
1396}
1397
1398static unsigned long iommu_unmap_page(struct protection_domain *dom,
1399                                      unsigned long bus_addr,
1400                                      unsigned long page_size)
1401{
1402        unsigned long long unmapped;
1403        unsigned long unmap_size;
1404        u64 *pte;
1405
1406        BUG_ON(!is_power_of_2(page_size));
1407
1408        unmapped = 0;
1409
1410        while (unmapped < page_size) {
1411
1412                pte = fetch_pte(dom, bus_addr, &unmap_size);
1413
1414                if (pte) {
1415                        int i, count;
1416
1417                        count = PAGE_SIZE_PTE_COUNT(unmap_size);
1418                        for (i = 0; i < count; i++)
1419                                pte[i] = 0ULL;
1420                }
1421
1422                bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1423                unmapped += unmap_size;
1424        }
1425
1426        BUG_ON(unmapped && !is_power_of_2(unmapped));
1427
1428        return unmapped;
1429}
1430
1431/****************************************************************************
1432 *
1433 * The next functions belong to the address allocator for the dma_ops
1434 * interface functions. They work like the allocators in the other IOMMU
1435 * drivers. Its basically a bitmap which marks the allocated pages in
1436 * the aperture. Maybe it could be enhanced in the future to a more
1437 * efficient allocator.
1438 *
1439 ****************************************************************************/
1440
1441/*
1442 * The address allocator core functions.
1443 *
1444 * called with domain->lock held
1445 */
1446
1447/*
1448 * Used to reserve address ranges in the aperture (e.g. for exclusion
1449 * ranges.
1450 */
1451static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1452                                      unsigned long start_page,
1453                                      unsigned int pages)
1454{
1455        unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1456
1457        if (start_page + pages > last_page)
1458                pages = last_page - start_page;
1459
1460        for (i = start_page; i < start_page + pages; ++i) {
1461                int index = i / APERTURE_RANGE_PAGES;
1462                int page  = i % APERTURE_RANGE_PAGES;
1463                __set_bit(page, dom->aperture[index]->bitmap);
1464        }
1465}
1466
1467/*
1468 * This function is used to add a new aperture range to an existing
1469 * aperture in case of dma_ops domain allocation or address allocation
1470 * failure.
1471 */
1472static int alloc_new_range(struct dma_ops_domain *dma_dom,
1473                           bool populate, gfp_t gfp)
1474{
1475        int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
1476        struct amd_iommu *iommu;
1477        unsigned long i, old_size, pte_pgsize;
1478
1479#ifdef CONFIG_IOMMU_STRESS
1480        populate = false;
1481#endif
1482
1483        if (index >= APERTURE_MAX_RANGES)
1484                return -ENOMEM;
1485
1486        dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
1487        if (!dma_dom->aperture[index])
1488                return -ENOMEM;
1489
1490        dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
1491        if (!dma_dom->aperture[index]->bitmap)
1492                goto out_free;
1493
1494        dma_dom->aperture[index]->offset = dma_dom->aperture_size;
1495
1496        if (populate) {
1497                unsigned long address = dma_dom->aperture_size;
1498                int i, num_ptes = APERTURE_RANGE_PAGES / 512;
1499                u64 *pte, *pte_page;
1500
1501                for (i = 0; i < num_ptes; ++i) {
1502                        pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
1503                                        &pte_page, gfp);
1504                        if (!pte)
1505                                goto out_free;
1506
1507                        dma_dom->aperture[index]->pte_pages[i] = pte_page;
1508
1509                        address += APERTURE_RANGE_SIZE / 64;
1510                }
1511        }
1512
1513        old_size                = dma_dom->aperture_size;
1514        dma_dom->aperture_size += APERTURE_RANGE_SIZE;
1515
1516        /* Reserve address range used for MSI messages */
1517        if (old_size < MSI_ADDR_BASE_LO &&
1518            dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
1519                unsigned long spage;
1520                int pages;
1521
1522                pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
1523                spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
1524
1525                dma_ops_reserve_addresses(dma_dom, spage, pages);
1526        }
1527
1528        /* Initialize the exclusion range if necessary */
1529        for_each_iommu(iommu) {
1530                if (iommu->exclusion_start &&
1531                    iommu->exclusion_start >= dma_dom->aperture[index]->offset
1532                    && iommu->exclusion_start < dma_dom->aperture_size) {
1533                        unsigned long startpage;
1534                        int pages = iommu_num_pages(iommu->exclusion_start,
1535                                                    iommu->exclusion_length,
1536                                                    PAGE_SIZE);
1537                        startpage = iommu->exclusion_start >> PAGE_SHIFT;
1538                        dma_ops_reserve_addresses(dma_dom, startpage, pages);
1539                }
1540        }
1541
1542        /*
1543         * Check for areas already mapped as present in the new aperture
1544         * range and mark those pages as reserved in the allocator. Such
1545         * mappings may already exist as a result of requested unity
1546         * mappings for devices.
1547         */
1548        for (i = dma_dom->aperture[index]->offset;
1549             i < dma_dom->aperture_size;
1550             i += pte_pgsize) {
1551                u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
1552                if (!pte || !IOMMU_PTE_PRESENT(*pte))
1553                        continue;
1554
1555                dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
1556                                          pte_pgsize >> 12);
1557        }
1558
1559        update_domain(&dma_dom->domain);
1560
1561        return 0;
1562
1563out_free:
1564        update_domain(&dma_dom->domain);
1565
1566        free_page((unsigned long)dma_dom->aperture[index]->bitmap);
1567
1568        kfree(dma_dom->aperture[index]);
1569        dma_dom->aperture[index] = NULL;
1570
1571        return -ENOMEM;
1572}
1573
1574static unsigned long dma_ops_area_alloc(struct device *dev,
1575                                        struct dma_ops_domain *dom,
1576                                        unsigned int pages,
1577                                        unsigned long align_mask,
1578                                        u64 dma_mask,
1579                                        unsigned long start)
1580{
1581        unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
1582        int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
1583        int i = start >> APERTURE_RANGE_SHIFT;
1584        unsigned long boundary_size, mask;
1585        unsigned long address = -1;
1586        unsigned long limit;
1587
1588        next_bit >>= PAGE_SHIFT;
1589
1590        mask = dma_get_seg_boundary(dev);
1591
1592        boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
1593                                   1UL << (BITS_PER_LONG - PAGE_SHIFT);
1594
1595        for (;i < max_index; ++i) {
1596                unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
1597
1598                if (dom->aperture[i]->offset >= dma_mask)
1599                        break;
1600
1601                limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1602                                               dma_mask >> PAGE_SHIFT);
1603
1604                address = iommu_area_alloc(dom->aperture[i]->bitmap,
1605                                           limit, next_bit, pages, 0,
1606                                            boundary_size, align_mask);
1607                if (address != -1) {
1608                        address = dom->aperture[i]->offset +
1609                                  (address << PAGE_SHIFT);
1610                        dom->next_address = address + (pages << PAGE_SHIFT);
1611                        break;
1612                }
1613
1614                next_bit = 0;
1615        }
1616
1617        return address;
1618}
1619
1620static unsigned long dma_ops_alloc_addresses(struct device *dev,
1621                                             struct dma_ops_domain *dom,
1622                                             unsigned int pages,
1623                                             unsigned long align_mask,
1624                                             u64 dma_mask)
1625{
1626        unsigned long address;
1627
1628#ifdef CONFIG_IOMMU_STRESS
1629        dom->next_address = 0;
1630        dom->need_flush = true;
1631#endif
1632
1633        address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1634                                     dma_mask, dom->next_address);
1635
1636        if (address == -1) {
1637                dom->next_address = 0;
1638                address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1639                                             dma_mask, 0);
1640                dom->need_flush = true;
1641        }
1642
1643        if (unlikely(address == -1))
1644                address = DMA_ERROR_CODE;
1645
1646        WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1647
1648        return address;
1649}
1650
1651/*
1652 * The address free function.
1653 *
1654 * called with domain->lock held
1655 */
1656static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1657                                   unsigned long address,
1658                                   unsigned int pages)
1659{
1660        unsigned i = address >> APERTURE_RANGE_SHIFT;
1661        struct aperture_range *range = dom->aperture[i];
1662
1663        BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1664
1665#ifdef CONFIG_IOMMU_STRESS
1666        if (i < 4)
1667                return;
1668#endif
1669
1670        if (address >= dom->next_address)
1671                dom->need_flush = true;
1672
1673        address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1674
1675        bitmap_clear(range->bitmap, address, pages);
1676
1677}
1678
1679/****************************************************************************
1680 *
1681 * The next functions belong to the domain allocation. A domain is
1682 * allocated for every IOMMU as the default domain. If device isolation
1683 * is enabled, every device get its own domain. The most important thing
1684 * about domains is the page table mapping the DMA address space they
1685 * contain.
1686 *
1687 ****************************************************************************/
1688
1689/*
1690 * This function adds a protection domain to the global protection domain list
1691 */
1692static void add_domain_to_list(struct protection_domain *domain)
1693{
1694        unsigned long flags;
1695
1696        spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1697        list_add(&domain->list, &amd_iommu_pd_list);
1698        spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1699}
1700
1701/*
1702 * This function removes a protection domain to the global
1703 * protection domain list
1704 */
1705static void del_domain_from_list(struct protection_domain *domain)
1706{
1707        unsigned long flags;
1708
1709        spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1710        list_del(&domain->list);
1711        spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1712}
1713
1714static u16 domain_id_alloc(void)
1715{
1716        unsigned long flags;
1717        int id;
1718
1719        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1720        id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1721        BUG_ON(id == 0);
1722        if (id > 0 && id < MAX_DOMAIN_ID)
1723                __set_bit(id, amd_iommu_pd_alloc_bitmap);
1724        else
1725                id = 0;
1726        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1727
1728        return id;
1729}
1730
1731static void domain_id_free(int id)
1732{
1733        unsigned long flags;
1734
1735        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1736        if (id > 0 && id < MAX_DOMAIN_ID)
1737                __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1738        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1739}
1740
1741#define DEFINE_FREE_PT_FN(LVL, FN)                              \
1742static void free_pt_##LVL (unsigned long __pt)                  \
1743{                                                               \
1744        unsigned long p;                                        \
1745        u64 *pt;                                                \
1746        int i;                                                  \
1747                                                                \
1748        pt = (u64 *)__pt;                                       \
1749                                                                \
1750        for (i = 0; i < 512; ++i) {                             \
1751                /* PTE present? */                              \
1752                if (!IOMMU_PTE_PRESENT(pt[i]))                  \
1753                        continue;                               \
1754                                                                \
1755                /* Large PTE? */                                \
1756                if (PM_PTE_LEVEL(pt[i]) == 0 ||                 \
1757                    PM_PTE_LEVEL(pt[i]) == 7)                   \
1758                        continue;                               \
1759                                                                \
1760                p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);       \
1761                FN(p);                                          \
1762        }                                                       \
1763        free_page((unsigned long)pt);                           \
1764}
1765
1766DEFINE_FREE_PT_FN(l2, free_page)
1767DEFINE_FREE_PT_FN(l3, free_pt_l2)
1768DEFINE_FREE_PT_FN(l4, free_pt_l3)
1769DEFINE_FREE_PT_FN(l5, free_pt_l4)
1770DEFINE_FREE_PT_FN(l6, free_pt_l5)
1771
1772static void free_pagetable(struct protection_domain *domain)
1773{
1774        unsigned long root = (unsigned long)domain->pt_root;
1775
1776        switch (domain->mode) {
1777        case PAGE_MODE_NONE:
1778                break;
1779        case PAGE_MODE_1_LEVEL:
1780                free_page(root);
1781                break;
1782        case PAGE_MODE_2_LEVEL:
1783                free_pt_l2(root);
1784                break;
1785        case PAGE_MODE_3_LEVEL:
1786                free_pt_l3(root);
1787                break;
1788        case PAGE_MODE_4_LEVEL:
1789                free_pt_l4(root);
1790                break;
1791        case PAGE_MODE_5_LEVEL:
1792                free_pt_l5(root);
1793                break;
1794        case PAGE_MODE_6_LEVEL:
1795                free_pt_l6(root);
1796                break;
1797        default:
1798                BUG();
1799        }
1800}
1801
1802static void free_gcr3_tbl_level1(u64 *tbl)
1803{
1804        u64 *ptr;
1805        int i;
1806
1807        for (i = 0; i < 512; ++i) {
1808                if (!(tbl[i] & GCR3_VALID))
1809                        continue;
1810
1811                ptr = __va(tbl[i] & PAGE_MASK);
1812
1813                free_page((unsigned long)ptr);
1814        }
1815}
1816
1817static void free_gcr3_tbl_level2(u64 *tbl)
1818{
1819        u64 *ptr;
1820        int i;
1821
1822        for (i = 0; i < 512; ++i) {
1823                if (!(tbl[i] & GCR3_VALID))
1824                        continue;
1825
1826                ptr = __va(tbl[i] & PAGE_MASK);
1827
1828                free_gcr3_tbl_level1(ptr);
1829        }
1830}
1831
1832static void free_gcr3_table(struct protection_domain *domain)
1833{
1834        if (domain->glx == 2)
1835                free_gcr3_tbl_level2(domain->gcr3_tbl);
1836        else if (domain->glx == 1)
1837                free_gcr3_tbl_level1(domain->gcr3_tbl);
1838        else if (domain->glx != 0)
1839                BUG();
1840
1841        free_page((unsigned long)domain->gcr3_tbl);
1842}
1843
1844/*
1845 * Free a domain, only used if something went wrong in the
1846 * allocation path and we need to free an already allocated page table
1847 */
1848static void dma_ops_domain_free(struct dma_ops_domain *dom)
1849{
1850        int i;
1851
1852        if (!dom)
1853                return;
1854
1855        del_domain_from_list(&dom->domain);
1856
1857        free_pagetable(&dom->domain);
1858
1859        for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1860                if (!dom->aperture[i])
1861                        continue;
1862                free_page((unsigned long)dom->aperture[i]->bitmap);
1863                kfree(dom->aperture[i]);
1864        }
1865
1866        kfree(dom);
1867}
1868
1869/*
1870 * Allocates a new protection domain usable for the dma_ops functions.
1871 * It also initializes the page table and the address allocator data
1872 * structures required for the dma_ops interface
1873 */
1874static struct dma_ops_domain *dma_ops_domain_alloc(void)
1875{
1876        struct dma_ops_domain *dma_dom;
1877
1878        dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1879        if (!dma_dom)
1880                return NULL;
1881
1882        if (protection_domain_init(&dma_dom->domain))
1883                goto free_dma_dom;
1884
1885        dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
1886        dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1887        dma_dom->domain.flags = PD_DMA_OPS_MASK;
1888        dma_dom->domain.priv = dma_dom;
1889        if (!dma_dom->domain.pt_root)
1890                goto free_dma_dom;
1891
1892        dma_dom->need_flush = false;
1893
1894        add_domain_to_list(&dma_dom->domain);
1895
1896        if (alloc_new_range(dma_dom, true, GFP_KERNEL))
1897                goto free_dma_dom;
1898
1899        /*
1900         * mark the first page as allocated so we never return 0 as
1901         * a valid dma-address. So we can use 0 as error value
1902         */
1903        dma_dom->aperture[0]->bitmap[0] = 1;
1904        dma_dom->next_address = 0;
1905
1906
1907        return dma_dom;
1908
1909free_dma_dom:
1910        dma_ops_domain_free(dma_dom);
1911
1912        return NULL;
1913}
1914
1915/*
1916 * little helper function to check whether a given protection domain is a
1917 * dma_ops domain
1918 */
1919static bool dma_ops_domain(struct protection_domain *domain)
1920{
1921        return domain->flags & PD_DMA_OPS_MASK;
1922}
1923
1924static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1925{
1926        u64 pte_root = 0;
1927        u64 flags = 0;
1928
1929        if (domain->mode != PAGE_MODE_NONE)
1930                pte_root = virt_to_phys(domain->pt_root);
1931
1932        pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1933                    << DEV_ENTRY_MODE_SHIFT;
1934        pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1935
1936        flags = amd_iommu_dev_table[devid].data[1];
1937
1938        if (ats)
1939                flags |= DTE_FLAG_IOTLB;
1940
1941        if (domain->flags & PD_IOMMUV2_MASK) {
1942                u64 gcr3 = __pa(domain->gcr3_tbl);
1943                u64 glx  = domain->glx;
1944                u64 tmp;
1945
1946                pte_root |= DTE_FLAG_GV;
1947                pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1948
1949                /* First mask out possible old values for GCR3 table */
1950                tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1951                flags    &= ~tmp;
1952
1953                tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1954                flags    &= ~tmp;
1955
1956                /* Encode GCR3 table into DTE */
1957                tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1958                pte_root |= tmp;
1959
1960                tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1961                flags    |= tmp;
1962
1963                tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1964                flags    |= tmp;
1965        }
1966
1967        flags &= ~(0xffffUL);
1968        flags |= domain->id;
1969
1970        amd_iommu_dev_table[devid].data[1]  = flags;
1971        amd_iommu_dev_table[devid].data[0]  = pte_root;
1972}
1973
1974static void clear_dte_entry(u16 devid)
1975{
1976        /* remove entry from the device table seen by the hardware */
1977        amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1978        amd_iommu_dev_table[devid].data[1] = 0;
1979
1980        amd_iommu_apply_erratum_63(devid);
1981}
1982
1983static void do_attach(struct iommu_dev_data *dev_data,
1984                      struct protection_domain *domain)
1985{
1986        struct amd_iommu *iommu;
1987        bool ats;
1988
1989        iommu = amd_iommu_rlookup_table[dev_data->devid];
1990        ats   = dev_data->ats.enabled;
1991
1992        /* Update data structures */
1993        dev_data->domain = domain;
1994        list_add(&dev_data->list, &domain->dev_list);
1995        set_dte_entry(dev_data->devid, domain, ats);
1996
1997        /* Do reference counting */
1998        domain->dev_iommu[iommu->index] += 1;
1999        domain->dev_cnt                 += 1;
2000
2001        /* Flush the DTE entry */
2002        device_flush_dte(dev_data);
2003}
2004
2005static void do_detach(struct iommu_dev_data *dev_data)
2006{
2007        struct amd_iommu *iommu;
2008
2009        iommu = amd_iommu_rlookup_table[dev_data->devid];
2010
2011        /* decrease reference counters */
2012        dev_data->domain->dev_iommu[iommu->index] -= 1;
2013        dev_data->domain->dev_cnt                 -= 1;
2014
2015        /* Update data structures */
2016        dev_data->domain = NULL;
2017        list_del(&dev_data->list);
2018        clear_dte_entry(dev_data->devid);
2019
2020        /* Flush the DTE entry */
2021        device_flush_dte(dev_data);
2022}
2023
2024/*
2025 * If a device is not yet associated with a domain, this function does
2026 * assigns it visible for the hardware
2027 */
2028static int __attach_device(struct iommu_dev_data *dev_data,
2029                           struct protection_domain *domain)
2030{
2031        struct iommu_dev_data *head, *entry;
2032        int ret;
2033
2034        /* lock domain */
2035        spin_lock(&domain->lock);
2036
2037        head = dev_data;
2038
2039        if (head->alias_data != NULL)
2040                head = head->alias_data;
2041
2042        /* Now we have the root of the alias group, if any */
2043
2044        ret = -EBUSY;
2045        if (head->domain != NULL)
2046                goto out_unlock;
2047
2048        /* Attach alias group root */
2049        do_attach(head, domain);
2050
2051        /* Attach other devices in the alias group */
2052        list_for_each_entry(entry, &head->alias_list, alias_list)
2053                do_attach(entry, domain);
2054
2055        ret = 0;
2056
2057out_unlock:
2058
2059        /* ready */
2060        spin_unlock(&domain->lock);
2061
2062        return ret;
2063}
2064
2065
2066static void pdev_iommuv2_disable(struct pci_dev *pdev)
2067{
2068        pci_disable_ats(pdev);
2069        pci_disable_pri(pdev);
2070        pci_disable_pasid(pdev);
2071}
2072
2073/* FIXME: Change generic reset-function to do the same */
2074static int pri_reset_while_enabled(struct pci_dev *pdev)
2075{
2076        u16 control;
2077        int pos;
2078
2079        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2080        if (!pos)
2081                return -EINVAL;
2082
2083        pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
2084        control |= PCI_PRI_CTRL_RESET;
2085        pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
2086
2087        return 0;
2088}
2089
2090static int pdev_iommuv2_enable(struct pci_dev *pdev)
2091{
2092        bool reset_enable;
2093        int reqs, ret;
2094
2095        /* FIXME: Hardcode number of outstanding requests for now */
2096        reqs = 32;
2097        if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2098                reqs = 1;
2099        reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
2100
2101        /* Only allow access to user-accessible pages */
2102        ret = pci_enable_pasid(pdev, 0);
2103        if (ret)
2104                goto out_err;
2105
2106        /* First reset the PRI state of the device */
2107        ret = pci_reset_pri(pdev);
2108        if (ret)
2109                goto out_err;
2110
2111        /* Enable PRI */
2112        ret = pci_enable_pri(pdev, reqs);
2113        if (ret)
2114                goto out_err;
2115
2116        if (reset_enable) {
2117                ret = pri_reset_while_enabled(pdev);
2118                if (ret)
2119                        goto out_err;
2120        }
2121
2122        ret = pci_enable_ats(pdev, PAGE_SHIFT);
2123        if (ret)
2124                goto out_err;
2125
2126        return 0;
2127
2128out_err:
2129        pci_disable_pri(pdev);
2130        pci_disable_pasid(pdev);
2131
2132        return ret;
2133}
2134
2135/* FIXME: Move this to PCI code */
2136#define PCI_PRI_TLP_OFF         (1 << 15)
2137
2138static bool pci_pri_tlp_required(struct pci_dev *pdev)
2139{
2140        u16 status;
2141        int pos;
2142
2143        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2144        if (!pos)
2145                return false;
2146
2147        pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
2148
2149        return (status & PCI_PRI_TLP_OFF) ? true : false;
2150}
2151
2152/*
2153 * If a device is not yet associated with a domain, this function
2154 * assigns it visible for the hardware
2155 */
2156static int attach_device(struct device *dev,
2157                         struct protection_domain *domain)
2158{
2159        struct pci_dev *pdev = to_pci_dev(dev);
2160        struct iommu_dev_data *dev_data;
2161        unsigned long flags;
2162        int ret;
2163
2164        dev_data = get_dev_data(dev);
2165
2166        if (domain->flags & PD_IOMMUV2_MASK) {
2167                if (!dev_data->passthrough)
2168                        return -EINVAL;
2169
2170                if (dev_data->iommu_v2) {
2171                        if (pdev_iommuv2_enable(pdev) != 0)
2172                                return -EINVAL;
2173
2174                        dev_data->ats.enabled = true;
2175                        dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2176                        dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
2177                }
2178        } else if (amd_iommu_iotlb_sup &&
2179                   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2180                dev_data->ats.enabled = true;
2181                dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2182        }
2183
2184        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2185        ret = __attach_device(dev_data, domain);
2186        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2187
2188        /*
2189         * We might boot into a crash-kernel here. The crashed kernel
2190         * left the caches in the IOMMU dirty. So we have to flush
2191         * here to evict all dirty stuff.
2192         */
2193        domain_flush_tlb_pde(domain);
2194
2195        return ret;
2196}
2197
2198/*
2199 * Removes a device from a protection domain (unlocked)
2200 */
2201static void __detach_device(struct iommu_dev_data *dev_data)
2202{
2203        struct iommu_dev_data *head, *entry;
2204        struct protection_domain *domain;
2205        unsigned long flags;
2206
2207        BUG_ON(!dev_data->domain);
2208
2209        domain = dev_data->domain;
2210
2211        spin_lock_irqsave(&domain->lock, flags);
2212
2213        head = dev_data;
2214        if (head->alias_data != NULL)
2215                head = head->alias_data;
2216
2217        list_for_each_entry(entry, &head->alias_list, alias_list)
2218                do_detach(entry);
2219
2220        do_detach(head);
2221
2222        spin_unlock_irqrestore(&domain->lock, flags);
2223}
2224
2225/*
2226 * Removes a device from a protection domain (with devtable_lock held)
2227 */
2228static void detach_device(struct device *dev)
2229{
2230        struct protection_domain *domain;
2231        struct iommu_dev_data *dev_data;
2232        unsigned long flags;
2233
2234        dev_data = get_dev_data(dev);
2235        domain   = dev_data->domain;
2236
2237        /* lock device table */
2238        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2239        __detach_device(dev_data);
2240        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2241
2242        if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2243                pdev_iommuv2_disable(to_pci_dev(dev));
2244        else if (dev_data->ats.enabled)
2245                pci_disable_ats(to_pci_dev(dev));
2246
2247        dev_data->ats.enabled = false;
2248}
2249
2250static int amd_iommu_add_device(struct device *dev)
2251{
2252        struct iommu_dev_data *dev_data;
2253        struct iommu_domain *domain;
2254        struct amd_iommu *iommu;
2255        u16 devid;
2256        int ret;
2257
2258        if (!check_device(dev) || get_dev_data(dev))
2259                return 0;
2260
2261        devid = get_device_id(dev);
2262        iommu = amd_iommu_rlookup_table[devid];
2263
2264        ret = iommu_init_device(dev);
2265        if (ret) {
2266                if (ret != -ENOTSUPP)
2267                        pr_err("Failed to initialize device %s - trying to proceed anyway\n",
2268                                dev_name(dev));
2269
2270                iommu_ignore_device(dev);
2271                dev->archdata.dma_ops = &nommu_dma_ops;
2272                goto out;
2273        }
2274        init_iommu_group(dev);
2275
2276        dev_data = get_dev_data(dev);
2277
2278        BUG_ON(!dev_data);
2279
2280        if (iommu_pass_through || dev_data->iommu_v2)
2281                iommu_request_dm_for_dev(dev);
2282
2283        /* Domains are initialized for this device - have a look what we ended up with */
2284        domain = iommu_get_domain_for_dev(dev);
2285        if (domain->type == IOMMU_DOMAIN_IDENTITY)
2286                dev_data->passthrough = true;
2287        else
2288                dev->archdata.dma_ops = &amd_iommu_dma_ops;
2289
2290out:
2291        iommu_completion_wait(iommu);
2292
2293        return 0;
2294}
2295
2296static void amd_iommu_remove_device(struct device *dev)
2297{
2298        struct amd_iommu *iommu;
2299        u16 devid;
2300
2301        if (!check_device(dev))
2302                return;
2303
2304        devid = get_device_id(dev);
2305        iommu = amd_iommu_rlookup_table[devid];
2306
2307        iommu_uninit_device(dev);
2308        iommu_completion_wait(iommu);
2309}
2310
2311/*****************************************************************************
2312 *
2313 * The next functions belong to the dma_ops mapping/unmapping code.
2314 *
2315 *****************************************************************************/
2316
2317/*
2318 * In the dma_ops path we only have the struct device. This function
2319 * finds the corresponding IOMMU, the protection domain and the
2320 * requestor id for a given device.
2321 * If the device is not yet associated with a domain this is also done
2322 * in this function.
2323 */
2324static struct protection_domain *get_domain(struct device *dev)
2325{
2326        struct protection_domain *domain;
2327        struct iommu_domain *io_domain;
2328
2329        if (!check_device(dev))
2330                return ERR_PTR(-EINVAL);
2331
2332        io_domain = iommu_get_domain_for_dev(dev);
2333        if (!io_domain)
2334                return NULL;
2335
2336        domain = to_pdomain(io_domain);
2337        if (!dma_ops_domain(domain))
2338                return ERR_PTR(-EBUSY);
2339
2340        return domain;
2341}
2342
2343static void update_device_table(struct protection_domain *domain)
2344{
2345        struct iommu_dev_data *dev_data;
2346
2347        list_for_each_entry(dev_data, &domain->dev_list, list)
2348                set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
2349}
2350
2351static void update_domain(struct protection_domain *domain)
2352{
2353        if (!domain->updated)
2354                return;
2355
2356        update_device_table(domain);
2357
2358        domain_flush_devices(domain);
2359        domain_flush_tlb_pde(domain);
2360
2361        domain->updated = false;
2362}
2363
2364/*
2365 * This function fetches the PTE for a given address in the aperture
2366 */
2367static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
2368                            unsigned long address)
2369{
2370        struct aperture_range *aperture;
2371        u64 *pte, *pte_page;
2372
2373        aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2374        if (!aperture)
2375                return NULL;
2376
2377        pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2378        if (!pte) {
2379                pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
2380                                GFP_ATOMIC);
2381                aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
2382        } else
2383                pte += PM_LEVEL_INDEX(0, address);
2384
2385        update_domain(&dom->domain);
2386
2387        return pte;
2388}
2389
2390/*
2391 * This is the generic map function. It maps one 4kb page at paddr to
2392 * the given address in the DMA address space for the domain.
2393 */
2394static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
2395                                     unsigned long address,
2396                                     phys_addr_t paddr,
2397                                     int direction)
2398{
2399        u64 *pte, __pte;
2400
2401        WARN_ON(address > dom->aperture_size);
2402
2403        paddr &= PAGE_MASK;
2404
2405        pte  = dma_ops_get_pte(dom, address);
2406        if (!pte)
2407                return DMA_ERROR_CODE;
2408
2409        __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
2410
2411        if (direction == DMA_TO_DEVICE)
2412                __pte |= IOMMU_PTE_IR;
2413        else if (direction == DMA_FROM_DEVICE)
2414                __pte |= IOMMU_PTE_IW;
2415        else if (direction == DMA_BIDIRECTIONAL)
2416                __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
2417
2418        WARN_ON(*pte);
2419
2420        *pte = __pte;
2421
2422        return (dma_addr_t)address;
2423}
2424
2425/*
2426 * The generic unmapping function for on page in the DMA address space.
2427 */
2428static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
2429                                 unsigned long address)
2430{
2431        struct aperture_range *aperture;
2432        u64 *pte;
2433
2434        if (address >= dom->aperture_size)
2435                return;
2436
2437        aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2438        if (!aperture)
2439                return;
2440
2441        pte  = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2442        if (!pte)
2443                return;
2444
2445        pte += PM_LEVEL_INDEX(0, address);
2446
2447        WARN_ON(!*pte);
2448
2449        *pte = 0ULL;
2450}
2451
2452/*
2453 * This function contains common code for mapping of a physically
2454 * contiguous memory region into DMA address space. It is used by all
2455 * mapping functions provided with this IOMMU driver.
2456 * Must be called with the domain lock held.
2457 */
2458static dma_addr_t __map_single(struct device *dev,
2459                               struct dma_ops_domain *dma_dom,
2460                               phys_addr_t paddr,
2461                               size_t size,
2462                               int dir,
2463                               bool align,
2464                               u64 dma_mask)
2465{
2466        dma_addr_t offset = paddr & ~PAGE_MASK;
2467        dma_addr_t address, start, ret;
2468        unsigned int pages;
2469        unsigned long align_mask = 0;
2470        int i;
2471
2472        pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2473        paddr &= PAGE_MASK;
2474
2475        INC_STATS_COUNTER(total_map_requests);
2476
2477        if (pages > 1)
2478                INC_STATS_COUNTER(cross_page);
2479
2480        if (align)
2481                align_mask = (1UL << get_order(size)) - 1;
2482
2483retry:
2484        address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2485                                          dma_mask);
2486        if (unlikely(address == DMA_ERROR_CODE)) {
2487                /*
2488                 * setting next_address here will let the address
2489                 * allocator only scan the new allocated range in the
2490                 * first run. This is a small optimization.
2491                 */
2492                dma_dom->next_address = dma_dom->aperture_size;
2493
2494                if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
2495                        goto out;
2496
2497                /*
2498                 * aperture was successfully enlarged by 128 MB, try
2499                 * allocation again
2500                 */
2501                goto retry;
2502        }
2503
2504        start = address;
2505        for (i = 0; i < pages; ++i) {
2506                ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
2507                if (ret == DMA_ERROR_CODE)
2508                        goto out_unmap;
2509
2510                paddr += PAGE_SIZE;
2511                start += PAGE_SIZE;
2512        }
2513        address += offset;
2514
2515        ADD_STATS_COUNTER(alloced_io_mem, size);
2516
2517        if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
2518                domain_flush_tlb(&dma_dom->domain);
2519                dma_dom->need_flush = false;
2520        } else if (unlikely(amd_iommu_np_cache))
2521                domain_flush_pages(&dma_dom->domain, address, size);
2522
2523out:
2524        return address;
2525
2526out_unmap:
2527
2528        for (--i; i >= 0; --i) {
2529                start -= PAGE_SIZE;
2530                dma_ops_domain_unmap(dma_dom, start);
2531        }
2532
2533        dma_ops_free_addresses(dma_dom, address, pages);
2534
2535        return DMA_ERROR_CODE;
2536}
2537
2538/*
2539 * Does the reverse of the __map_single function. Must be called with
2540 * the domain lock held too
2541 */
2542static void __unmap_single(struct dma_ops_domain *dma_dom,
2543                           dma_addr_t dma_addr,
2544                           size_t size,
2545                           int dir)
2546{
2547        dma_addr_t flush_addr;
2548        dma_addr_t i, start;
2549        unsigned int pages;
2550
2551        if ((dma_addr == DMA_ERROR_CODE) ||
2552            (dma_addr + size > dma_dom->aperture_size))
2553                return;
2554
2555        flush_addr = dma_addr;
2556        pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
2557        dma_addr &= PAGE_MASK;
2558        start = dma_addr;
2559
2560        for (i = 0; i < pages; ++i) {
2561                dma_ops_domain_unmap(dma_dom, start);
2562                start += PAGE_SIZE;
2563        }
2564
2565        SUB_STATS_COUNTER(alloced_io_mem, size);
2566
2567        dma_ops_free_addresses(dma_dom, dma_addr, pages);
2568
2569        if (amd_iommu_unmap_flush || dma_dom->need_flush) {
2570                domain_flush_pages(&dma_dom->domain, flush_addr, size);
2571                dma_dom->need_flush = false;
2572        }
2573}
2574
2575/*
2576 * The exported map_single function for dma_ops.
2577 */
2578static dma_addr_t map_page(struct device *dev, struct page *page,
2579                           unsigned long offset, size_t size,
2580                           enum dma_data_direction dir,
2581                           struct dma_attrs *attrs)
2582{
2583        unsigned long flags;
2584        struct protection_domain *domain;
2585        dma_addr_t addr;
2586        u64 dma_mask;
2587        phys_addr_t paddr = page_to_phys(page) + offset;
2588
2589        INC_STATS_COUNTER(cnt_map_single);
2590
2591        domain = get_domain(dev);
2592        if (PTR_ERR(domain) == -EINVAL)
2593                return (dma_addr_t)paddr;
2594        else if (IS_ERR(domain))
2595                return DMA_ERROR_CODE;
2596
2597        dma_mask = *dev->dma_mask;
2598
2599        spin_lock_irqsave(&domain->lock, flags);
2600
2601        addr = __map_single(dev, domain->priv, paddr, size, dir, false,
2602                            dma_mask);
2603        if (addr == DMA_ERROR_CODE)
2604                goto out;
2605
2606        domain_flush_complete(domain);
2607
2608out:
2609        spin_unlock_irqrestore(&domain->lock, flags);
2610
2611        return addr;
2612}
2613
2614/*
2615 * The exported unmap_single function for dma_ops.
2616 */
2617static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2618                       enum dma_data_direction dir, struct dma_attrs *attrs)
2619{
2620        unsigned long flags;
2621        struct protection_domain *domain;
2622
2623        INC_STATS_COUNTER(cnt_unmap_single);
2624
2625        domain = get_domain(dev);
2626        if (IS_ERR(domain))
2627                return;
2628
2629        spin_lock_irqsave(&domain->lock, flags);
2630
2631        __unmap_single(domain->priv, dma_addr, size, dir);
2632
2633        domain_flush_complete(domain);
2634
2635        spin_unlock_irqrestore(&domain->lock, flags);
2636}
2637
2638/*
2639 * The exported map_sg function for dma_ops (handles scatter-gather
2640 * lists).
2641 */
2642static int map_sg(struct device *dev, struct scatterlist *sglist,
2643                  int nelems, enum dma_data_direction dir,
2644                  struct dma_attrs *attrs)
2645{
2646        unsigned long flags;
2647        struct protection_domain *domain;
2648        int i;
2649        struct scatterlist *s;
2650        phys_addr_t paddr;
2651        int mapped_elems = 0;
2652        u64 dma_mask;
2653
2654        INC_STATS_COUNTER(cnt_map_sg);
2655
2656        domain = get_domain(dev);
2657        if (IS_ERR(domain))
2658                return 0;
2659
2660        dma_mask = *dev->dma_mask;
2661
2662        spin_lock_irqsave(&domain->lock, flags);
2663
2664        for_each_sg(sglist, s, nelems, i) {
2665                paddr = sg_phys(s);
2666
2667                s->dma_address = __map_single(dev, domain->priv,
2668                                              paddr, s->length, dir, false,
2669                                              dma_mask);
2670
2671                if (s->dma_address) {
2672                        s->dma_length = s->length;
2673                        mapped_elems++;
2674                } else
2675                        goto unmap;
2676        }
2677
2678        domain_flush_complete(domain);
2679
2680out:
2681        spin_unlock_irqrestore(&domain->lock, flags);
2682
2683        return mapped_elems;
2684unmap:
2685        for_each_sg(sglist, s, mapped_elems, i) {
2686                if (s->dma_address)
2687                        __unmap_single(domain->priv, s->dma_address,
2688                                       s->dma_length, dir);
2689                s->dma_address = s->dma_length = 0;
2690        }
2691
2692        mapped_elems = 0;
2693
2694        goto out;
2695}
2696
2697/*
2698 * The exported map_sg function for dma_ops (handles scatter-gather
2699 * lists).
2700 */
2701static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2702                     int nelems, enum dma_data_direction dir,
2703                     struct dma_attrs *attrs)
2704{
2705        unsigned long flags;
2706        struct protection_domain *domain;
2707        struct scatterlist *s;
2708        int i;
2709
2710        INC_STATS_COUNTER(cnt_unmap_sg);
2711
2712        domain = get_domain(dev);
2713        if (IS_ERR(domain))
2714                return;
2715
2716        spin_lock_irqsave(&domain->lock, flags);
2717
2718        for_each_sg(sglist, s, nelems, i) {
2719                __unmap_single(domain->priv, s->dma_address,
2720                               s->dma_length, dir);
2721                s->dma_address = s->dma_length = 0;
2722        }
2723
2724        domain_flush_complete(domain);
2725
2726        spin_unlock_irqrestore(&domain->lock, flags);
2727}
2728
2729/*
2730 * The exported alloc_coherent function for dma_ops.
2731 */
2732static void *alloc_coherent(struct device *dev, size_t size,
2733                            dma_addr_t *dma_addr, gfp_t flag,
2734                            struct dma_attrs *attrs)
2735{
2736        u64 dma_mask = dev->coherent_dma_mask;
2737        struct protection_domain *domain;
2738        unsigned long flags;
2739        struct page *page;
2740
2741        INC_STATS_COUNTER(cnt_alloc_coherent);
2742
2743        domain = get_domain(dev);
2744        if (PTR_ERR(domain) == -EINVAL) {
2745                page = alloc_pages(flag, get_order(size));
2746                *dma_addr = page_to_phys(page);
2747                return page_address(page);
2748        } else if (IS_ERR(domain))
2749                return NULL;
2750
2751        size      = PAGE_ALIGN(size);
2752        dma_mask  = dev->coherent_dma_mask;
2753        flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2754        flag     |= __GFP_ZERO;
2755
2756        page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
2757        if (!page) {
2758                if (!(flag & __GFP_WAIT))
2759                        return NULL;
2760
2761                page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
2762                                                 get_order(size));
2763                if (!page)
2764                        return NULL;
2765        }
2766
2767        if (!dma_mask)
2768                dma_mask = *dev->dma_mask;
2769
2770        spin_lock_irqsave(&domain->lock, flags);
2771
2772        *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
2773                                 size, DMA_BIDIRECTIONAL, true, dma_mask);
2774
2775        if (*dma_addr == DMA_ERROR_CODE) {
2776                spin_unlock_irqrestore(&domain->lock, flags);
2777                goto out_free;
2778        }
2779
2780        domain_flush_complete(domain);
2781
2782        spin_unlock_irqrestore(&domain->lock, flags);
2783
2784        return page_address(page);
2785
2786out_free:
2787
2788        if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2789                __free_pages(page, get_order(size));
2790
2791        return NULL;
2792}
2793
2794/*
2795 * The exported free_coherent function for dma_ops.
2796 */
2797static void free_coherent(struct device *dev, size_t size,
2798                          void *virt_addr, dma_addr_t dma_addr,
2799                          struct dma_attrs *attrs)
2800{
2801        struct protection_domain *domain;
2802        unsigned long flags;
2803        struct page *page;
2804
2805        INC_STATS_COUNTER(cnt_free_coherent);
2806
2807        page = virt_to_page(virt_addr);
2808        size = PAGE_ALIGN(size);
2809
2810        domain = get_domain(dev);
2811        if (IS_ERR(domain))
2812                goto free_mem;
2813
2814        spin_lock_irqsave(&domain->lock, flags);
2815
2816        __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
2817
2818        domain_flush_complete(domain);
2819
2820        spin_unlock_irqrestore(&domain->lock, flags);
2821
2822free_mem:
2823        if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2824                __free_pages(page, get_order(size));
2825}
2826
2827/*
2828 * This function is called by the DMA layer to find out if we can handle a
2829 * particular device. It is part of the dma_ops.
2830 */
2831static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2832{
2833        return check_device(dev);
2834}
2835
2836static struct dma_map_ops amd_iommu_dma_ops = {
2837        .alloc = alloc_coherent,
2838        .free = free_coherent,
2839        .map_page = map_page,
2840        .unmap_page = unmap_page,
2841        .map_sg = map_sg,
2842        .unmap_sg = unmap_sg,
2843        .dma_supported = amd_iommu_dma_supported,
2844};
2845
2846int __init amd_iommu_init_api(void)
2847{
2848        return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
2849}
2850
2851int __init amd_iommu_init_dma_ops(void)
2852{
2853        swiotlb        = iommu_pass_through ? 1 : 0;
2854        iommu_detected = 1;
2855
2856        /*
2857         * In case we don't initialize SWIOTLB (actually the common case
2858         * when AMD IOMMU is enabled), make sure there are global
2859         * dma_ops set as a fall-back for devices not handled by this
2860         * driver (for example non-PCI devices).
2861         */
2862        if (!swiotlb)
2863                dma_ops = &nommu_dma_ops;
2864
2865        amd_iommu_stats_init();
2866
2867        if (amd_iommu_unmap_flush)
2868                pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
2869        else
2870                pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
2871
2872        return 0;
2873}
2874
2875/*****************************************************************************
2876 *
2877 * The following functions belong to the exported interface of AMD IOMMU
2878 *
2879 * This interface allows access to lower level functions of the IOMMU
2880 * like protection domain handling and assignement of devices to domains
2881 * which is not possible with the dma_ops interface.
2882 *
2883 *****************************************************************************/
2884
2885static void cleanup_domain(struct protection_domain *domain)
2886{
2887        struct iommu_dev_data *entry;
2888        unsigned long flags;
2889
2890        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2891
2892        while (!list_empty(&domain->dev_list)) {
2893                entry = list_first_entry(&domain->dev_list,
2894                                         struct iommu_dev_data, list);
2895                __detach_device(entry);
2896        }
2897
2898        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2899}
2900
2901static void protection_domain_free(struct protection_domain *domain)
2902{
2903        if (!domain)
2904                return;
2905
2906        del_domain_from_list(domain);
2907
2908        if (domain->id)
2909                domain_id_free(domain->id);
2910
2911        kfree(domain);
2912}
2913
2914static int protection_domain_init(struct protection_domain *domain)
2915{
2916        spin_lock_init(&domain->lock);
2917        mutex_init(&domain->api_lock);
2918        domain->id = domain_id_alloc();
2919        if (!domain->id)
2920                return -ENOMEM;
2921        INIT_LIST_HEAD(&domain->dev_list);
2922
2923        return 0;
2924}
2925
2926static struct protection_domain *protection_domain_alloc(void)
2927{
2928        struct protection_domain *domain;
2929
2930        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2931        if (!domain)
2932                return NULL;
2933
2934        if (protection_domain_init(domain))
2935                goto out_err;
2936
2937        add_domain_to_list(domain);
2938
2939        return domain;
2940
2941out_err:
2942        kfree(domain);
2943
2944        return NULL;
2945}
2946
2947static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2948{
2949        struct protection_domain *pdomain;
2950        struct dma_ops_domain *dma_domain;
2951
2952        switch (type) {
2953        case IOMMU_DOMAIN_UNMANAGED:
2954                pdomain = protection_domain_alloc();
2955                if (!pdomain)
2956                        return NULL;
2957
2958                pdomain->mode    = PAGE_MODE_3_LEVEL;
2959                pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2960                if (!pdomain->pt_root) {
2961                        protection_domain_free(pdomain);
2962                        return NULL;
2963                }
2964
2965                pdomain->domain.geometry.aperture_start = 0;
2966                pdomain->domain.geometry.aperture_end   = ~0ULL;
2967                pdomain->domain.geometry.force_aperture = true;
2968
2969                break;
2970        case IOMMU_DOMAIN_DMA:
2971                dma_domain = dma_ops_domain_alloc();
2972                if (!dma_domain) {
2973                        pr_err("AMD-Vi: Failed to allocate\n");
2974                        return NULL;
2975                }
2976                pdomain = &dma_domain->domain;
2977                break;
2978        case IOMMU_DOMAIN_IDENTITY:
2979                pdomain = protection_domain_alloc();
2980                if (!pdomain)
2981                        return NULL;
2982
2983                pdomain->mode = PAGE_MODE_NONE;
2984                break;
2985        default:
2986                return NULL;
2987        }
2988
2989        return &pdomain->domain;
2990}
2991
2992static void amd_iommu_domain_free(struct iommu_domain *dom)
2993{
2994        struct protection_domain *domain;
2995
2996        if (!dom)
2997                return;
2998
2999        domain = to_pdomain(dom);
3000
3001        if (domain->dev_cnt > 0)
3002                cleanup_domain(domain);
3003
3004        BUG_ON(domain->dev_cnt != 0);
3005
3006        if (domain->mode != PAGE_MODE_NONE)
3007                free_pagetable(domain);
3008
3009        if (domain->flags & PD_IOMMUV2_MASK)
3010                free_gcr3_table(domain);
3011
3012        protection_domain_free(domain);
3013}
3014
3015static void amd_iommu_detach_device(struct iommu_domain *dom,
3016                                    struct device *dev)
3017{
3018        struct iommu_dev_data *dev_data = dev->archdata.iommu;
3019        struct amd_iommu *iommu;
3020        u16 devid;
3021
3022        if (!check_device(dev))
3023                return;
3024
3025        devid = get_device_id(dev);
3026
3027        if (dev_data->domain != NULL)
3028                detach_device(dev);
3029
3030        iommu = amd_iommu_rlookup_table[devid];
3031        if (!iommu)
3032                return;
3033
3034        iommu_completion_wait(iommu);
3035}
3036
3037static int amd_iommu_attach_device(struct iommu_domain *dom,
3038                                   struct device *dev)
3039{
3040        struct protection_domain *domain = to_pdomain(dom);
3041        struct iommu_dev_data *dev_data;
3042        struct amd_iommu *iommu;
3043        int ret;
3044
3045        if (!check_device(dev))
3046                return -EINVAL;
3047
3048        dev_data = dev->archdata.iommu;
3049
3050        iommu = amd_iommu_rlookup_table[dev_data->devid];
3051        if (!iommu)
3052                return -EINVAL;
3053
3054        if (dev_data->domain)
3055                detach_device(dev);
3056
3057        ret = attach_device(dev, domain);
3058
3059        iommu_completion_wait(iommu);
3060
3061        return ret;
3062}
3063
3064static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3065                         phys_addr_t paddr, size_t page_size, int iommu_prot)
3066{
3067        struct protection_domain *domain = to_pdomain(dom);
3068        int prot = 0;
3069        int ret;
3070
3071        if (domain->mode == PAGE_MODE_NONE)
3072                return -EINVAL;
3073
3074        if (iommu_prot & IOMMU_READ)
3075                prot |= IOMMU_PROT_IR;
3076        if (iommu_prot & IOMMU_WRITE)
3077                prot |= IOMMU_PROT_IW;
3078
3079        mutex_lock(&domain->api_lock);
3080        ret = iommu_map_page(domain, iova, paddr, prot, page_size);
3081        mutex_unlock(&domain->api_lock);
3082
3083        return ret;
3084}
3085
3086static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3087                           size_t page_size)
3088{
3089        struct protection_domain *domain = to_pdomain(dom);
3090        size_t unmap_size;
3091
3092        if (domain->mode == PAGE_MODE_NONE)
3093                return -EINVAL;
3094
3095        mutex_lock(&domain->api_lock);
3096        unmap_size = iommu_unmap_page(domain, iova, page_size);
3097        mutex_unlock(&domain->api_lock);
3098
3099        domain_flush_tlb_pde(domain);
3100
3101        return unmap_size;
3102}
3103
3104static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3105                                          dma_addr_t iova)
3106{
3107        struct protection_domain *domain = to_pdomain(dom);
3108        unsigned long offset_mask, pte_pgsize;
3109        u64 *pte, __pte;
3110
3111        if (domain->mode == PAGE_MODE_NONE)
3112                return iova;
3113
3114        pte = fetch_pte(domain, iova, &pte_pgsize);
3115
3116        if (!pte || !IOMMU_PTE_PRESENT(*pte))
3117                return 0;
3118
3119        offset_mask = pte_pgsize - 1;
3120        __pte       = *pte & PM_ADDR_MASK;
3121
3122        return (__pte & ~offset_mask) | (iova & offset_mask);
3123}
3124
3125static bool amd_iommu_capable(enum iommu_cap cap)
3126{
3127        switch (cap) {
3128        case IOMMU_CAP_CACHE_COHERENCY:
3129                return true;
3130        case IOMMU_CAP_INTR_REMAP:
3131                return (irq_remapping_enabled == 1);
3132        case IOMMU_CAP_NOEXEC:
3133                return false;
3134        }
3135
3136        return false;
3137}
3138
3139static void amd_iommu_get_dm_regions(struct device *dev,
3140                                     struct list_head *head)
3141{
3142        struct unity_map_entry *entry;
3143        u16 devid;
3144
3145        devid = get_device_id(dev);
3146
3147        list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3148                struct iommu_dm_region *region;
3149
3150                if (devid < entry->devid_start || devid > entry->devid_end)
3151                        continue;
3152
3153                region = kzalloc(sizeof(*region), GFP_KERNEL);
3154                if (!region) {
3155                        pr_err("Out of memory allocating dm-regions for %s\n",
3156                                dev_name(dev));
3157                        return;
3158                }
3159
3160                region->start = entry->address_start;
3161                region->length = entry->address_end - entry->address_start;
3162                if (entry->prot & IOMMU_PROT_IR)
3163                        region->prot |= IOMMU_READ;
3164                if (entry->prot & IOMMU_PROT_IW)
3165                        region->prot |= IOMMU_WRITE;
3166
3167                list_add_tail(&region->list, head);
3168        }
3169}
3170
3171static void amd_iommu_put_dm_regions(struct device *dev,
3172                                     struct list_head *head)
3173{
3174        struct iommu_dm_region *entry, *next;
3175
3176        list_for_each_entry_safe(entry, next, head, list)
3177                kfree(entry);
3178}
3179
3180static const struct iommu_ops amd_iommu_ops = {
3181        .capable = amd_iommu_capable,
3182        .domain_alloc = amd_iommu_domain_alloc,
3183        .domain_free  = amd_iommu_domain_free,
3184        .attach_dev = amd_iommu_attach_device,
3185        .detach_dev = amd_iommu_detach_device,
3186        .map = amd_iommu_map,
3187        .unmap = amd_iommu_unmap,
3188        .map_sg = default_iommu_map_sg,
3189        .iova_to_phys = amd_iommu_iova_to_phys,
3190        .add_device = amd_iommu_add_device,
3191        .remove_device = amd_iommu_remove_device,
3192        .get_dm_regions = amd_iommu_get_dm_regions,
3193        .put_dm_regions = amd_iommu_put_dm_regions,
3194        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
3195};
3196
3197/*****************************************************************************
3198 *
3199 * The next functions do a basic initialization of IOMMU for pass through
3200 * mode
3201 *
3202 * In passthrough mode the IOMMU is initialized and enabled but not used for
3203 * DMA-API translation.
3204 *
3205 *****************************************************************************/
3206
3207/* IOMMUv2 specific functions */
3208int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3209{
3210        return atomic_notifier_chain_register(&ppr_notifier, nb);
3211}
3212EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3213
3214int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3215{
3216        return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3217}
3218EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
3219
3220void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3221{
3222        struct protection_domain *domain = to_pdomain(dom);
3223        unsigned long flags;
3224
3225        spin_lock_irqsave(&domain->lock, flags);
3226
3227        /* Update data structure */
3228        domain->mode    = PAGE_MODE_NONE;
3229        domain->updated = true;
3230
3231        /* Make changes visible to IOMMUs */
3232        update_domain(domain);
3233
3234        /* Page-table is not visible to IOMMU anymore, so free it */
3235        free_pagetable(domain);
3236
3237        spin_unlock_irqrestore(&domain->lock, flags);
3238}
3239EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3240
3241int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3242{
3243        struct protection_domain *domain = to_pdomain(dom);
3244        unsigned long flags;
3245        int levels, ret;
3246
3247        if (pasids <= 0 || pasids > (PASID_MASK + 1))
3248                return -EINVAL;
3249
3250        /* Number of GCR3 table levels required */
3251        for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3252                levels += 1;
3253
3254        if (levels > amd_iommu_max_glx_val)
3255                return -EINVAL;
3256
3257        spin_lock_irqsave(&domain->lock, flags);
3258
3259        /*
3260         * Save us all sanity checks whether devices already in the
3261         * domain support IOMMUv2. Just force that the domain has no
3262         * devices attached when it is switched into IOMMUv2 mode.
3263         */
3264        ret = -EBUSY;
3265        if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3266                goto out;
3267
3268        ret = -ENOMEM;
3269        domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3270        if (domain->gcr3_tbl == NULL)
3271                goto out;
3272
3273        domain->glx      = levels;
3274        domain->flags   |= PD_IOMMUV2_MASK;
3275        domain->updated  = true;
3276
3277        update_domain(domain);
3278
3279        ret = 0;
3280
3281out:
3282        spin_unlock_irqrestore(&domain->lock, flags);
3283
3284        return ret;
3285}
3286EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
3287
3288static int __flush_pasid(struct protection_domain *domain, int pasid,
3289                         u64 address, bool size)
3290{
3291        struct iommu_dev_data *dev_data;
3292        struct iommu_cmd cmd;
3293        int i, ret;
3294
3295        if (!(domain->flags & PD_IOMMUV2_MASK))
3296                return -EINVAL;
3297
3298        build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3299
3300        /*
3301         * IOMMU TLB needs to be flushed before Device TLB to
3302         * prevent device TLB refill from IOMMU TLB
3303         */
3304        for (i = 0; i < amd_iommus_present; ++i) {
3305                if (domain->dev_iommu[i] == 0)
3306                        continue;
3307
3308                ret = iommu_queue_command(amd_iommus[i], &cmd);
3309                if (ret != 0)
3310                        goto out;
3311        }
3312
3313        /* Wait until IOMMU TLB flushes are complete */
3314        domain_flush_complete(domain);
3315
3316        /* Now flush device TLBs */
3317        list_for_each_entry(dev_data, &domain->dev_list, list) {
3318                struct amd_iommu *iommu;
3319                int qdep;
3320
3321                /*
3322                   There might be non-IOMMUv2 capable devices in an IOMMUv2
3323                 * domain.
3324                 */
3325                if (!dev_data->ats.enabled)
3326                        continue;
3327
3328                qdep  = dev_data->ats.qdep;
3329                iommu = amd_iommu_rlookup_table[dev_data->devid];
3330
3331                build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3332                                      qdep, address, size);
3333
3334                ret = iommu_queue_command(iommu, &cmd);
3335                if (ret != 0)
3336                        goto out;
3337        }
3338
3339        /* Wait until all device TLBs are flushed */
3340        domain_flush_complete(domain);
3341
3342        ret = 0;
3343
3344out:
3345
3346        return ret;
3347}
3348
3349static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3350                                  u64 address)
3351{
3352        INC_STATS_COUNTER(invalidate_iotlb);
3353
3354        return __flush_pasid(domain, pasid, address, false);
3355}
3356
3357int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3358                         u64 address)
3359{
3360        struct protection_domain *domain = to_pdomain(dom);
3361        unsigned long flags;
3362        int ret;
3363
3364        spin_lock_irqsave(&domain->lock, flags);
3365        ret = __amd_iommu_flush_page(domain, pasid, address);
3366        spin_unlock_irqrestore(&domain->lock, flags);
3367
3368        return ret;
3369}
3370EXPORT_SYMBOL(amd_iommu_flush_page);
3371
3372static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3373{
3374        INC_STATS_COUNTER(invalidate_iotlb_all);
3375
3376        return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3377                             true);
3378}
3379
3380int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3381{
3382        struct protection_domain *domain = to_pdomain(dom);
3383        unsigned long flags;
3384        int ret;
3385
3386        spin_lock_irqsave(&domain->lock, flags);
3387        ret = __amd_iommu_flush_tlb(domain, pasid);
3388        spin_unlock_irqrestore(&domain->lock, flags);
3389
3390        return ret;
3391}
3392EXPORT_SYMBOL(amd_iommu_flush_tlb);
3393
3394static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3395{
3396        int index;
3397        u64 *pte;
3398
3399        while (true) {
3400
3401                index = (pasid >> (9 * level)) & 0x1ff;
3402                pte   = &root[index];
3403
3404                if (level == 0)
3405                        break;
3406
3407                if (!(*pte & GCR3_VALID)) {
3408                        if (!alloc)
3409                                return NULL;
3410
3411                        root = (void *)get_zeroed_page(GFP_ATOMIC);
3412                        if (root == NULL)
3413                                return NULL;
3414
3415                        *pte = __pa(root) | GCR3_VALID;
3416                }
3417
3418                root = __va(*pte & PAGE_MASK);
3419
3420                level -= 1;
3421        }
3422
3423        return pte;
3424}
3425
3426static int __set_gcr3(struct protection_domain *domain, int pasid,
3427                      unsigned long cr3)
3428{
3429        u64 *pte;
3430
3431        if (domain->mode != PAGE_MODE_NONE)
3432                return -EINVAL;
3433
3434        pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3435        if (pte == NULL)
3436                return -ENOMEM;
3437
3438        *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3439
3440        return __amd_iommu_flush_tlb(domain, pasid);
3441}
3442
3443static int __clear_gcr3(struct protection_domain *domain, int pasid)
3444{
3445        u64 *pte;
3446
3447        if (domain->mode != PAGE_MODE_NONE)
3448                return -EINVAL;
3449
3450        pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3451        if (pte == NULL)
3452                return 0;
3453
3454        *pte = 0;
3455
3456        return __amd_iommu_flush_tlb(domain, pasid);
3457}
3458
3459int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3460                              unsigned long cr3)
3461{
3462        struct protection_domain *domain = to_pdomain(dom);
3463        unsigned long flags;
3464        int ret;
3465
3466        spin_lock_irqsave(&domain->lock, flags);
3467        ret = __set_gcr3(domain, pasid, cr3);
3468        spin_unlock_irqrestore(&domain->lock, flags);
3469
3470        return ret;
3471}
3472EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3473
3474int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3475{
3476        struct protection_domain *domain = to_pdomain(dom);
3477        unsigned long flags;
3478        int ret;
3479
3480        spin_lock_irqsave(&domain->lock, flags);
3481        ret = __clear_gcr3(domain, pasid);
3482        spin_unlock_irqrestore(&domain->lock, flags);
3483
3484        return ret;
3485}
3486EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3487
3488int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3489                           int status, int tag)
3490{
3491        struct iommu_dev_data *dev_data;
3492        struct amd_iommu *iommu;
3493        struct iommu_cmd cmd;
3494
3495        INC_STATS_COUNTER(complete_ppr);
3496
3497        dev_data = get_dev_data(&pdev->dev);
3498        iommu    = amd_iommu_rlookup_table[dev_data->devid];
3499
3500        build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3501                           tag, dev_data->pri_tlp);
3502
3503        return iommu_queue_command(iommu, &cmd);
3504}
3505EXPORT_SYMBOL(amd_iommu_complete_ppr);
3506
3507struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3508{
3509        struct protection_domain *pdomain;
3510
3511        pdomain = get_domain(&pdev->dev);
3512        if (IS_ERR(pdomain))
3513                return NULL;
3514
3515        /* Only return IOMMUv2 domains */
3516        if (!(pdomain->flags & PD_IOMMUV2_MASK))
3517                return NULL;
3518
3519        return &pdomain->domain;
3520}
3521EXPORT_SYMBOL(amd_iommu_get_v2_domain);
3522
3523void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3524{
3525        struct iommu_dev_data *dev_data;
3526
3527        if (!amd_iommu_v2_supported())
3528                return;
3529
3530        dev_data = get_dev_data(&pdev->dev);
3531        dev_data->errata |= (1 << erratum);
3532}
3533EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
3534
3535int amd_iommu_device_info(struct pci_dev *pdev,
3536                          struct amd_iommu_device_info *info)
3537{
3538        int max_pasids;
3539        int pos;
3540
3541        if (pdev == NULL || info == NULL)
3542                return -EINVAL;
3543
3544        if (!amd_iommu_v2_supported())
3545                return -EINVAL;
3546
3547        memset(info, 0, sizeof(*info));
3548
3549        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3550        if (pos)
3551                info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3552
3553        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3554        if (pos)
3555                info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3556
3557        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3558        if (pos) {
3559                int features;
3560
3561                max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3562                max_pasids = min(max_pasids, (1 << 20));
3563
3564                info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3565                info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3566
3567                features = pci_pasid_features(pdev);
3568                if (features & PCI_PASID_CAP_EXEC)
3569                        info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3570                if (features & PCI_PASID_CAP_PRIV)
3571                        info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3572        }
3573
3574        return 0;
3575}
3576EXPORT_SYMBOL(amd_iommu_device_info);
3577
3578#ifdef CONFIG_IRQ_REMAP
3579
3580/*****************************************************************************
3581 *
3582 * Interrupt Remapping Implementation
3583 *
3584 *****************************************************************************/
3585
3586union irte {
3587        u32 val;
3588        struct {
3589                u32 valid       : 1,
3590                    no_fault    : 1,
3591                    int_type    : 3,
3592                    rq_eoi      : 1,
3593                    dm          : 1,
3594                    rsvd_1      : 1,
3595                    destination : 8,
3596                    vector      : 8,
3597                    rsvd_2      : 8;
3598        } fields;
3599};
3600
3601struct irq_2_irte {
3602        u16 devid; /* Device ID for IRTE table */
3603        u16 index; /* Index into IRTE table*/
3604};
3605
3606struct amd_ir_data {
3607        struct irq_2_irte                       irq_2_irte;
3608        union irte                              irte_entry;
3609        union {
3610                struct msi_msg                  msi_entry;
3611        };
3612};
3613
3614static struct irq_chip amd_ir_chip;
3615
3616#define DTE_IRQ_PHYS_ADDR_MASK  (((1ULL << 45)-1) << 6)
3617#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
3618#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
3619#define DTE_IRQ_REMAP_ENABLE    1ULL
3620
3621static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3622{
3623        u64 dte;
3624
3625        dte     = amd_iommu_dev_table[devid].data[2];
3626        dte     &= ~DTE_IRQ_PHYS_ADDR_MASK;
3627        dte     |= virt_to_phys(table->table);
3628        dte     |= DTE_IRQ_REMAP_INTCTL;
3629        dte     |= DTE_IRQ_TABLE_LEN;
3630        dte     |= DTE_IRQ_REMAP_ENABLE;
3631
3632        amd_iommu_dev_table[devid].data[2] = dte;
3633}
3634
3635#define IRTE_ALLOCATED (~1U)
3636
3637static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3638{
3639        struct irq_remap_table *table = NULL;
3640        struct amd_iommu *iommu;
3641        unsigned long flags;
3642        u16 alias;
3643
3644        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3645
3646        iommu = amd_iommu_rlookup_table[devid];
3647        if (!iommu)
3648                goto out_unlock;
3649
3650        table = irq_lookup_table[devid];
3651        if (table)
3652                goto out;
3653
3654        alias = amd_iommu_alias_table[devid];
3655        table = irq_lookup_table[alias];
3656        if (table) {
3657                irq_lookup_table[devid] = table;
3658                set_dte_irq_entry(devid, table);
3659                iommu_flush_dte(iommu, devid);
3660                goto out;
3661        }
3662
3663        /* Nothing there yet, allocate new irq remapping table */
3664        table = kzalloc(sizeof(*table), GFP_ATOMIC);
3665        if (!table)
3666                goto out;
3667
3668        /* Initialize table spin-lock */
3669        spin_lock_init(&table->lock);
3670
3671        if (ioapic)
3672                /* Keep the first 32 indexes free for IOAPIC interrupts */
3673                table->min_index = 32;
3674
3675        table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
3676        if (!table->table) {
3677                kfree(table);
3678                table = NULL;
3679                goto out;
3680        }
3681
3682        memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
3683
3684        if (ioapic) {
3685                int i;
3686
3687                for (i = 0; i < 32; ++i)
3688                        table->table[i] = IRTE_ALLOCATED;
3689        }
3690
3691        irq_lookup_table[devid] = table;
3692        set_dte_irq_entry(devid, table);
3693        iommu_flush_dte(iommu, devid);
3694        if (devid != alias) {
3695                irq_lookup_table[alias] = table;
3696                set_dte_irq_entry(alias, table);
3697                iommu_flush_dte(iommu, alias);
3698        }
3699
3700out:
3701        iommu_completion_wait(iommu);
3702
3703out_unlock:
3704        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3705
3706        return table;
3707}
3708
3709static int alloc_irq_index(u16 devid, int count)
3710{
3711        struct irq_remap_table *table;
3712        unsigned long flags;
3713        int index, c;
3714
3715        table = get_irq_table(devid, false);
3716        if (!table)
3717                return -ENODEV;
3718
3719        spin_lock_irqsave(&table->lock, flags);
3720
3721        /* Scan table for free entries */
3722        for (c = 0, index = table->min_index;
3723             index < MAX_IRQS_PER_TABLE;
3724             ++index) {
3725                if (table->table[index] == 0)
3726                        c += 1;
3727                else
3728                        c = 0;
3729
3730                if (c == count) {
3731                        for (; c != 0; --c)
3732                                table->table[index - c + 1] = IRTE_ALLOCATED;
3733
3734                        index -= count - 1;
3735                        goto out;
3736                }
3737        }
3738
3739        index = -ENOSPC;
3740
3741out:
3742        spin_unlock_irqrestore(&table->lock, flags);
3743
3744        return index;
3745}
3746
3747static int modify_irte(u16 devid, int index, union irte irte)
3748{
3749        struct irq_remap_table *table;
3750        struct amd_iommu *iommu;
3751        unsigned long flags;
3752
3753        iommu = amd_iommu_rlookup_table[devid];
3754        if (iommu == NULL)
3755                return -EINVAL;
3756
3757        table = get_irq_table(devid, false);
3758        if (!table)
3759                return -ENOMEM;
3760
3761        spin_lock_irqsave(&table->lock, flags);
3762        table->table[index] = irte.val;
3763        spin_unlock_irqrestore(&table->lock, flags);
3764
3765        iommu_flush_irt(iommu, devid);
3766        iommu_completion_wait(iommu);
3767
3768        return 0;
3769}
3770
3771static void free_irte(u16 devid, int index)
3772{
3773        struct irq_remap_table *table;
3774        struct amd_iommu *iommu;
3775        unsigned long flags;
3776
3777        iommu = amd_iommu_rlookup_table[devid];
3778        if (iommu == NULL)
3779                return;
3780
3781        table = get_irq_table(devid, false);
3782        if (!table)
3783                return;
3784
3785        spin_lock_irqsave(&table->lock, flags);
3786        table->table[index] = 0;
3787        spin_unlock_irqrestore(&table->lock, flags);
3788
3789        iommu_flush_irt(iommu, devid);
3790        iommu_completion_wait(iommu);
3791}
3792
3793static int get_devid(struct irq_alloc_info *info)
3794{
3795        int devid = -1;
3796
3797        switch (info->type) {
3798        case X86_IRQ_ALLOC_TYPE_IOAPIC:
3799                devid     = get_ioapic_devid(info->ioapic_id);
3800                break;
3801        case X86_IRQ_ALLOC_TYPE_HPET:
3802                devid     = get_hpet_devid(info->hpet_id);
3803                break;
3804        case X86_IRQ_ALLOC_TYPE_MSI:
3805        case X86_IRQ_ALLOC_TYPE_MSIX:
3806                devid = get_device_id(&info->msi_dev->dev);
3807                break;
3808        default:
3809                BUG_ON(1);
3810                break;
3811        }
3812
3813        return devid;
3814}
3815
3816static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
3817{
3818        struct amd_iommu *iommu;
3819        int devid;
3820
3821        if (!info)
3822                return NULL;
3823
3824        devid = get_devid(info);
3825        if (devid >= 0) {
3826                iommu = amd_iommu_rlookup_table[devid];
3827                if (iommu)
3828                        return iommu->ir_domain;
3829        }
3830
3831        return NULL;
3832}
3833
3834static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
3835{
3836        struct amd_iommu *iommu;
3837        int devid;
3838
3839        if (!info)
3840                return NULL;
3841
3842        switch (info->type) {
3843        case X86_IRQ_ALLOC_TYPE_MSI:
3844        case X86_IRQ_ALLOC_TYPE_MSIX:
3845                devid = get_device_id(&info->msi_dev->dev);
3846                if (devid >= 0) {
3847                        iommu = amd_iommu_rlookup_table[devid];
3848                        if (iommu)
3849                                return iommu->msi_domain;
3850                }
3851                break;
3852        default:
3853                break;
3854        }
3855
3856        return NULL;
3857}
3858
3859struct irq_remap_ops amd_iommu_irq_ops = {
3860        .prepare                = amd_iommu_prepare,
3861        .enable                 = amd_iommu_enable,
3862        .disable                = amd_iommu_disable,
3863        .reenable               = amd_iommu_reenable,
3864        .enable_faulting        = amd_iommu_enable_faulting,
3865        .get_ir_irq_domain      = get_ir_irq_domain,
3866        .get_irq_domain         = get_irq_domain,
3867};
3868
3869static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3870                                       struct irq_cfg *irq_cfg,
3871                                       struct irq_alloc_info *info,
3872                                       int devid, int index, int sub_handle)
3873{
3874        struct irq_2_irte *irte_info = &data->irq_2_irte;
3875        struct msi_msg *msg = &data->msi_entry;
3876        union irte *irte = &data->irte_entry;
3877        struct IO_APIC_route_entry *entry;
3878
3879        data->irq_2_irte.devid = devid;
3880        data->irq_2_irte.index = index + sub_handle;
3881
3882        /* Setup IRTE for IOMMU */
3883        irte->val = 0;
3884        irte->fields.vector      = irq_cfg->vector;
3885        irte->fields.int_type    = apic->irq_delivery_mode;
3886        irte->fields.destination = irq_cfg->dest_apicid;
3887        irte->fields.dm          = apic->irq_dest_mode;
3888        irte->fields.valid       = 1;
3889
3890        switch (info->type) {
3891        case X86_IRQ_ALLOC_TYPE_IOAPIC:
3892                /* Setup IOAPIC entry */
3893                entry = info->ioapic_entry;
3894                info->ioapic_entry = NULL;
3895                memset(entry, 0, sizeof(*entry));
3896                entry->vector        = index;
3897                entry->mask          = 0;
3898                entry->trigger       = info->ioapic_trigger;
3899                entry->polarity      = info->ioapic_polarity;
3900                /* Mask level triggered irqs. */
3901                if (info->ioapic_trigger)
3902                        entry->mask = 1;
3903                break;
3904
3905        case X86_IRQ_ALLOC_TYPE_HPET:
3906        case X86_IRQ_ALLOC_TYPE_MSI:
3907        case X86_IRQ_ALLOC_TYPE_MSIX:
3908                msg->address_hi = MSI_ADDR_BASE_HI;
3909                msg->address_lo = MSI_ADDR_BASE_LO;
3910                msg->data = irte_info->index;
3911                break;
3912
3913        default:
3914                BUG_ON(1);
3915                break;
3916        }
3917}
3918
3919static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3920                               unsigned int nr_irqs, void *arg)
3921{
3922        struct irq_alloc_info *info = arg;
3923        struct irq_data *irq_data;
3924        struct amd_ir_data *data;
3925        struct irq_cfg *cfg;
3926        int i, ret, devid;
3927        int index = -1;
3928
3929        if (!info)
3930                return -EINVAL;
3931        if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
3932            info->type != X86_IRQ_ALLOC_TYPE_MSIX)
3933                return -EINVAL;
3934
3935        /*
3936         * With IRQ remapping enabled, don't need contiguous CPU vectors
3937         * to support multiple MSI interrupts.
3938         */
3939        if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
3940                info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
3941
3942        devid = get_devid(info);
3943        if (devid < 0)
3944                return -EINVAL;
3945
3946        ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3947        if (ret < 0)
3948                return ret;
3949
3950        ret = -ENOMEM;
3951        data = kzalloc(sizeof(*data), GFP_KERNEL);
3952        if (!data)
3953                goto out_free_parent;
3954
3955        if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
3956                if (get_irq_table(devid, true))
3957                        index = info->ioapic_pin;
3958                else
3959                        ret = -ENOMEM;
3960        } else {
3961                index = alloc_irq_index(devid, nr_irqs);
3962        }
3963        if (index < 0) {
3964                pr_warn("Failed to allocate IRTE\n");
3965                kfree(data);
3966                goto out_free_parent;
3967        }
3968
3969        for (i = 0; i < nr_irqs; i++) {
3970                irq_data = irq_domain_get_irq_data(domain, virq + i);
3971                cfg = irqd_cfg(irq_data);
3972                if (!irq_data || !cfg) {
3973                        ret = -EINVAL;
3974                        goto out_free_data;
3975                }
3976
3977                if (i > 0) {
3978                        data = kzalloc(sizeof(*data), GFP_KERNEL);
3979                        if (!data)
3980                                goto out_free_data;
3981                }
3982                irq_data->hwirq = (devid << 16) + i;
3983                irq_data->chip_data = data;
3984                irq_data->chip = &amd_ir_chip;
3985                irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3986                irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
3987        }
3988        return 0;
3989
3990out_free_data:
3991        for (i--; i >= 0; i--) {
3992                irq_data = irq_domain_get_irq_data(domain, virq + i);
3993                if (irq_data)
3994                        kfree(irq_data->chip_data);
3995        }
3996        for (i = 0; i < nr_irqs; i++)
3997                free_irte(devid, index + i);
3998out_free_parent:
3999        irq_domain_free_irqs_common(domain, virq, nr_irqs);
4000        return ret;
4001}
4002
4003static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
4004                               unsigned int nr_irqs)
4005{
4006        struct irq_2_irte *irte_info;
4007        struct irq_data *irq_data;
4008        struct amd_ir_data *data;
4009        int i;
4010
4011        for (i = 0; i < nr_irqs; i++) {
4012                irq_data = irq_domain_get_irq_data(domain, virq  + i);
4013                if (irq_data && irq_data->chip_data) {
4014                        data = irq_data->chip_data;
4015                        irte_info = &data->irq_2_irte;
4016                        free_irte(irte_info->devid, irte_info->index);
4017                        kfree(data);
4018                }
4019        }
4020        irq_domain_free_irqs_common(domain, virq, nr_irqs);
4021}
4022
4023static void irq_remapping_activate(struct irq_domain *domain,
4024                                   struct irq_data *irq_data)
4025{
4026        struct amd_ir_data *data = irq_data->chip_data;
4027        struct irq_2_irte *irte_info = &data->irq_2_irte;
4028
4029        modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
4030}
4031
4032static void irq_remapping_deactivate(struct irq_domain *domain,
4033                                     struct irq_data *irq_data)
4034{
4035        struct amd_ir_data *data = irq_data->chip_data;
4036        struct irq_2_irte *irte_info = &data->irq_2_irte;
4037        union irte entry;
4038
4039        entry.val = 0;
4040        modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
4041}
4042
4043static struct irq_domain_ops amd_ir_domain_ops = {
4044        .alloc = irq_remapping_alloc,
4045        .free = irq_remapping_free,
4046        .activate = irq_remapping_activate,
4047        .deactivate = irq_remapping_deactivate,
4048};
4049
4050static int amd_ir_set_affinity(struct irq_data *data,
4051                               const struct cpumask *mask, bool force)
4052{
4053        struct amd_ir_data *ir_data = data->chip_data;
4054        struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4055        struct irq_cfg *cfg = irqd_cfg(data);
4056        struct irq_data *parent = data->parent_data;
4057        int ret;
4058
4059        ret = parent->chip->irq_set_affinity(parent, mask, force);
4060        if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
4061                return ret;
4062
4063        /*
4064         * Atomically updates the IRTE with the new destination, vector
4065         * and flushes the interrupt entry cache.
4066         */
4067        ir_data->irte_entry.fields.vector = cfg->vector;
4068        ir_data->irte_entry.fields.destination = cfg->dest_apicid;
4069        modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry);
4070
4071        /*
4072         * After this point, all the interrupts will start arriving
4073         * at the new destination. So, time to cleanup the previous
4074         * vector allocation.
4075         */
4076        send_cleanup_vector(cfg);
4077
4078        return IRQ_SET_MASK_OK_DONE;
4079}
4080
4081static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4082{
4083        struct amd_ir_data *ir_data = irq_data->chip_data;
4084
4085        *msg = ir_data->msi_entry;
4086}
4087
4088static struct irq_chip amd_ir_chip = {
4089        .irq_ack = ir_ack_apic_edge,
4090        .irq_set_affinity = amd_ir_set_affinity,
4091        .irq_compose_msi_msg = ir_compose_msi_msg,
4092};
4093
4094int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4095{
4096        iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu);
4097        if (!iommu->ir_domain)
4098                return -ENOMEM;
4099
4100        iommu->ir_domain->parent = arch_get_ir_parent_domain();
4101        iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
4102
4103        return 0;
4104}
4105#endif
4106