linux/arch/x86/kernel/amd_iommu_init.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <joerg.roedel@amd.com>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/pci.h>
  21#include <linux/acpi.h>
  22#include <linux/gfp.h>
  23#include <linux/list.h>
  24#include <linux/sysdev.h>
  25#include <linux/interrupt.h>
  26#include <linux/msi.h>
  27#include <asm/pci-direct.h>
  28#include <asm/amd_iommu_types.h>
  29#include <asm/amd_iommu.h>
  30#include <asm/iommu.h>
  31#include <asm/gart.h>
  32
  33/*
  34 * definitions for the ACPI scanning code
  35 */
  36#define IVRS_HEADER_LENGTH 48
  37
  38#define ACPI_IVHD_TYPE                  0x10
  39#define ACPI_IVMD_TYPE_ALL              0x20
  40#define ACPI_IVMD_TYPE                  0x21
  41#define ACPI_IVMD_TYPE_RANGE            0x22
  42
  43#define IVHD_DEV_ALL                    0x01
  44#define IVHD_DEV_SELECT                 0x02
  45#define IVHD_DEV_SELECT_RANGE_START     0x03
  46#define IVHD_DEV_RANGE_END              0x04
  47#define IVHD_DEV_ALIAS                  0x42
  48#define IVHD_DEV_ALIAS_RANGE            0x43
  49#define IVHD_DEV_EXT_SELECT             0x46
  50#define IVHD_DEV_EXT_SELECT_RANGE       0x47
  51
  52#define IVHD_FLAG_HT_TUN_EN_MASK        0x01
  53#define IVHD_FLAG_PASSPW_EN_MASK        0x02
  54#define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
  55#define IVHD_FLAG_ISOC_EN_MASK          0x08
  56
  57#define IVMD_FLAG_EXCL_RANGE            0x08
  58#define IVMD_FLAG_UNITY_MAP             0x01
  59
  60#define ACPI_DEVFLAG_INITPASS           0x01
  61#define ACPI_DEVFLAG_EXTINT             0x02
  62#define ACPI_DEVFLAG_NMI                0x04
  63#define ACPI_DEVFLAG_SYSMGT1            0x10
  64#define ACPI_DEVFLAG_SYSMGT2            0x20
  65#define ACPI_DEVFLAG_LINT0              0x40
  66#define ACPI_DEVFLAG_LINT1              0x80
  67#define ACPI_DEVFLAG_ATSDIS             0x10000000
  68
  69/*
  70 * ACPI table definitions
  71 *
  72 * These data structures are laid over the table to parse the important values
  73 * out of it.
  74 */
  75
  76/*
  77 * structure describing one IOMMU in the ACPI table. Typically followed by one
  78 * or more ivhd_entrys.
  79 */
  80struct ivhd_header {
  81        u8 type;
  82        u8 flags;
  83        u16 length;
  84        u16 devid;
  85        u16 cap_ptr;
  86        u64 mmio_phys;
  87        u16 pci_seg;
  88        u16 info;
  89        u32 reserved;
  90} __attribute__((packed));
  91
  92/*
  93 * A device entry describing which devices a specific IOMMU translates and
  94 * which requestor ids they use.
  95 */
  96struct ivhd_entry {
  97        u8 type;
  98        u16 devid;
  99        u8 flags;
 100        u32 ext;
 101} __attribute__((packed));
 102
 103/*
 104 * An AMD IOMMU memory definition structure. It defines things like exclusion
 105 * ranges for devices and regions that should be unity mapped.
 106 */
 107struct ivmd_header {
 108        u8 type;
 109        u8 flags;
 110        u16 length;
 111        u16 devid;
 112        u16 aux;
 113        u64 resv;
 114        u64 range_start;
 115        u64 range_length;
 116} __attribute__((packed));
 117
 118bool amd_iommu_dump;
 119
 120static int __initdata amd_iommu_detected;
 121
 122u16 amd_iommu_last_bdf;                 /* largest PCI device id we have
 123                                           to handle */
 124LIST_HEAD(amd_iommu_unity_map);         /* a list of required unity mappings
 125                                           we find in ACPI */
 126#ifdef CONFIG_IOMMU_STRESS
 127bool amd_iommu_isolate = false;
 128#else
 129bool amd_iommu_isolate = true;          /* if true, device isolation is
 130                                           enabled */
 131#endif
 132
 133bool amd_iommu_unmap_flush;             /* if true, flush on every unmap */
 134
 135LIST_HEAD(amd_iommu_list);              /* list of all AMD IOMMUs in the
 136                                           system */
 137
 138/*
 139 * Pointer to the device table which is shared by all AMD IOMMUs
 140 * it is indexed by the PCI device id or the HT unit id and contains
 141 * information about the domain the device belongs to as well as the
 142 * page table root pointer.
 143 */
 144struct dev_table_entry *amd_iommu_dev_table;
 145
 146/*
 147 * The alias table is a driver specific data structure which contains the
 148 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
 149 * More than one device can share the same requestor id.
 150 */
 151u16 *amd_iommu_alias_table;
 152
 153/*
 154 * The rlookup table is used to find the IOMMU which is responsible
 155 * for a specific device. It is also indexed by the PCI device id.
 156 */
 157struct amd_iommu **amd_iommu_rlookup_table;
 158
 159/*
 160 * The pd table (protection domain table) is used to find the protection domain
 161 * data structure a device belongs to. Indexed with the PCI device id too.
 162 */
 163struct protection_domain **amd_iommu_pd_table;
 164
 165/*
 166 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
 167 * to know which ones are already in use.
 168 */
 169unsigned long *amd_iommu_pd_alloc_bitmap;
 170
 171static u32 dev_table_size;      /* size of the device table */
 172static u32 alias_table_size;    /* size of the alias table */
 173static u32 rlookup_table_size;  /* size if the rlookup table */
 174
 175static inline void update_last_devid(u16 devid)
 176{
 177        if (devid > amd_iommu_last_bdf)
 178                amd_iommu_last_bdf = devid;
 179}
 180
 181static inline unsigned long tbl_size(int entry_size)
 182{
 183        unsigned shift = PAGE_SHIFT +
 184                         get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
 185
 186        return 1UL << shift;
 187}
 188
 189/****************************************************************************
 190 *
 191 * AMD IOMMU MMIO register space handling functions
 192 *
 193 * These functions are used to program the IOMMU device registers in
 194 * MMIO space required for that driver.
 195 *
 196 ****************************************************************************/
 197
 198/*
 199 * This function set the exclusion range in the IOMMU. DMA accesses to the
 200 * exclusion range are passed through untranslated
 201 */
 202static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 203{
 204        u64 start = iommu->exclusion_start & PAGE_MASK;
 205        u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
 206        u64 entry;
 207
 208        if (!iommu->exclusion_start)
 209                return;
 210
 211        entry = start | MMIO_EXCL_ENABLE_MASK;
 212        memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
 213                        &entry, sizeof(entry));
 214
 215        entry = limit;
 216        memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
 217                        &entry, sizeof(entry));
 218}
 219
 220/* Programs the physical address of the device table into the IOMMU hardware */
 221static void __init iommu_set_device_table(struct amd_iommu *iommu)
 222{
 223        u64 entry;
 224
 225        BUG_ON(iommu->mmio_base == NULL);
 226
 227        entry = virt_to_phys(amd_iommu_dev_table);
 228        entry |= (dev_table_size >> 12) - 1;
 229        memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
 230                        &entry, sizeof(entry));
 231}
 232
 233/* Generic functions to enable/disable certain features of the IOMMU. */
 234static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
 235{
 236        u32 ctrl;
 237
 238        ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 239        ctrl |= (1 << bit);
 240        writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 241}
 242
 243static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
 244{
 245        u32 ctrl;
 246
 247        ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 248        ctrl &= ~(1 << bit);
 249        writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 250}
 251
 252/* Function to enable the hardware */
 253static void iommu_enable(struct amd_iommu *iommu)
 254{
 255        printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n",
 256               dev_name(&iommu->dev->dev), iommu->cap_ptr);
 257
 258        iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
 259}
 260
 261static void iommu_disable(struct amd_iommu *iommu)
 262{
 263        /* Disable command buffer */
 264        iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 265
 266        /* Disable event logging and event interrupts */
 267        iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
 268        iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
 269
 270        /* Disable IOMMU hardware itself */
 271        iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
 272}
 273
 274/*
 275 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
 276 * the system has one.
 277 */
 278static u8 * __init iommu_map_mmio_space(u64 address)
 279{
 280        u8 *ret;
 281
 282        if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
 283                return NULL;
 284
 285        ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
 286        if (ret != NULL)
 287                return ret;
 288
 289        release_mem_region(address, MMIO_REGION_LENGTH);
 290
 291        return NULL;
 292}
 293
 294static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
 295{
 296        if (iommu->mmio_base)
 297                iounmap(iommu->mmio_base);
 298        release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
 299}
 300
 301/****************************************************************************
 302 *
 303 * The functions below belong to the first pass of AMD IOMMU ACPI table
 304 * parsing. In this pass we try to find out the highest device id this
 305 * code has to handle. Upon this information the size of the shared data
 306 * structures is determined later.
 307 *
 308 ****************************************************************************/
 309
 310/*
 311 * This function calculates the length of a given IVHD entry
 312 */
 313static inline int ivhd_entry_length(u8 *ivhd)
 314{
 315        return 0x04 << (*ivhd >> 6);
 316}
 317
 318/*
 319 * This function reads the last device id the IOMMU has to handle from the PCI
 320 * capability header for this IOMMU
 321 */
 322static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
 323{
 324        u32 cap;
 325
 326        cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
 327        update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
 328
 329        return 0;
 330}
 331
 332/*
 333 * After reading the highest device id from the IOMMU PCI capability header
 334 * this function looks if there is a higher device id defined in the ACPI table
 335 */
 336static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
 337{
 338        u8 *p = (void *)h, *end = (void *)h;
 339        struct ivhd_entry *dev;
 340
 341        p += sizeof(*h);
 342        end += h->length;
 343
 344        find_last_devid_on_pci(PCI_BUS(h->devid),
 345                        PCI_SLOT(h->devid),
 346                        PCI_FUNC(h->devid),
 347                        h->cap_ptr);
 348
 349        while (p < end) {
 350                dev = (struct ivhd_entry *)p;
 351                switch (dev->type) {
 352                case IVHD_DEV_SELECT:
 353                case IVHD_DEV_RANGE_END:
 354                case IVHD_DEV_ALIAS:
 355                case IVHD_DEV_EXT_SELECT:
 356                        /* all the above subfield types refer to device ids */
 357                        update_last_devid(dev->devid);
 358                        break;
 359                default:
 360                        break;
 361                }
 362                p += ivhd_entry_length(p);
 363        }
 364
 365        WARN_ON(p != end);
 366
 367        return 0;
 368}
 369
 370/*
 371 * Iterate over all IVHD entries in the ACPI table and find the highest device
 372 * id which we need to handle. This is the first of three functions which parse
 373 * the ACPI table. So we check the checksum here.
 374 */
 375static int __init find_last_devid_acpi(struct acpi_table_header *table)
 376{
 377        int i;
 378        u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
 379        struct ivhd_header *h;
 380
 381        /*
 382         * Validate checksum here so we don't need to do it when
 383         * we actually parse the table
 384         */
 385        for (i = 0; i < table->length; ++i)
 386                checksum += p[i];
 387        if (checksum != 0)
 388                /* ACPI table corrupt */
 389                return -ENODEV;
 390
 391        p += IVRS_HEADER_LENGTH;
 392
 393        end += table->length;
 394        while (p < end) {
 395                h = (struct ivhd_header *)p;
 396                switch (h->type) {
 397                case ACPI_IVHD_TYPE:
 398                        find_last_devid_from_ivhd(h);
 399                        break;
 400                default:
 401                        break;
 402                }
 403                p += h->length;
 404        }
 405        WARN_ON(p != end);
 406
 407        return 0;
 408}
 409
 410/****************************************************************************
 411 *
 412 * The following functions belong the the code path which parses the ACPI table
 413 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
 414 * data structures, initialize the device/alias/rlookup table and also
 415 * basically initialize the hardware.
 416 *
 417 ****************************************************************************/
 418
 419/*
 420 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
 421 * write commands to that buffer later and the IOMMU will execute them
 422 * asynchronously
 423 */
 424static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
 425{
 426        u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 427                        get_order(CMD_BUFFER_SIZE));
 428
 429        if (cmd_buf == NULL)
 430                return NULL;
 431
 432        iommu->cmd_buf_size = CMD_BUFFER_SIZE;
 433
 434        return cmd_buf;
 435}
 436
 437/*
 438 * This function resets the command buffer if the IOMMU stopped fetching
 439 * commands from it.
 440 */
 441void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
 442{
 443        iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 444
 445        writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
 446        writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 447
 448        iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
 449}
 450
 451/*
 452 * This function writes the command buffer address to the hardware and
 453 * enables it.
 454 */
 455static void iommu_enable_command_buffer(struct amd_iommu *iommu)
 456{
 457        u64 entry;
 458
 459        BUG_ON(iommu->cmd_buf == NULL);
 460
 461        entry = (u64)virt_to_phys(iommu->cmd_buf);
 462        entry |= MMIO_CMD_SIZE_512;
 463
 464        memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
 465                    &entry, sizeof(entry));
 466
 467        amd_iommu_reset_cmd_buffer(iommu);
 468}
 469
 470static void __init free_command_buffer(struct amd_iommu *iommu)
 471{
 472        free_pages((unsigned long)iommu->cmd_buf,
 473                   get_order(iommu->cmd_buf_size));
 474}
 475
 476/* allocates the memory where the IOMMU will log its events to */
 477static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
 478{
 479        iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 480                                                get_order(EVT_BUFFER_SIZE));
 481
 482        if (iommu->evt_buf == NULL)
 483                return NULL;
 484
 485        iommu->evt_buf_size = EVT_BUFFER_SIZE;
 486
 487        return iommu->evt_buf;
 488}
 489
 490static void iommu_enable_event_buffer(struct amd_iommu *iommu)
 491{
 492        u64 entry;
 493
 494        BUG_ON(iommu->evt_buf == NULL);
 495
 496        entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
 497
 498        memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
 499                    &entry, sizeof(entry));
 500
 501        /* set head and tail to zero manually */
 502        writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 503        writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 504
 505        iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
 506}
 507
 508static void __init free_event_buffer(struct amd_iommu *iommu)
 509{
 510        free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
 511}
 512
 513/* sets a specific bit in the device table entry. */
 514static void set_dev_entry_bit(u16 devid, u8 bit)
 515{
 516        int i = (bit >> 5) & 0x07;
 517        int _bit = bit & 0x1f;
 518
 519        amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
 520}
 521
 522static int get_dev_entry_bit(u16 devid, u8 bit)
 523{
 524        int i = (bit >> 5) & 0x07;
 525        int _bit = bit & 0x1f;
 526
 527        return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
 528}
 529
 530
 531void amd_iommu_apply_erratum_63(u16 devid)
 532{
 533        int sysmgt;
 534
 535        sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
 536                 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
 537
 538        if (sysmgt == 0x01)
 539                set_dev_entry_bit(devid, DEV_ENTRY_IW);
 540}
 541
 542/* Writes the specific IOMMU for a device into the rlookup table */
 543static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
 544{
 545        amd_iommu_rlookup_table[devid] = iommu;
 546}
 547
 548/*
 549 * This function takes the device specific flags read from the ACPI
 550 * table and sets up the device table entry with that information
 551 */
 552static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
 553                                           u16 devid, u32 flags, u32 ext_flags)
 554{
 555        if (flags & ACPI_DEVFLAG_INITPASS)
 556                set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
 557        if (flags & ACPI_DEVFLAG_EXTINT)
 558                set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
 559        if (flags & ACPI_DEVFLAG_NMI)
 560                set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
 561        if (flags & ACPI_DEVFLAG_SYSMGT1)
 562                set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
 563        if (flags & ACPI_DEVFLAG_SYSMGT2)
 564                set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
 565        if (flags & ACPI_DEVFLAG_LINT0)
 566                set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
 567        if (flags & ACPI_DEVFLAG_LINT1)
 568                set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
 569
 570        amd_iommu_apply_erratum_63(devid);
 571
 572        set_iommu_for_device(iommu, devid);
 573}
 574
 575/*
 576 * Reads the device exclusion range from ACPI and initialize IOMMU with
 577 * it
 578 */
 579static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
 580{
 581        struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
 582
 583        if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
 584                return;
 585
 586        if (iommu) {
 587                /*
 588                 * We only can configure exclusion ranges per IOMMU, not
 589                 * per device. But we can enable the exclusion range per
 590                 * device. This is done here
 591                 */
 592                set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
 593                iommu->exclusion_start = m->range_start;
 594                iommu->exclusion_length = m->range_length;
 595        }
 596}
 597
 598/*
 599 * This function reads some important data from the IOMMU PCI space and
 600 * initializes the driver data structure with it. It reads the hardware
 601 * capabilities and the first/last device entries
 602 */
 603static void __init init_iommu_from_pci(struct amd_iommu *iommu)
 604{
 605        int cap_ptr = iommu->cap_ptr;
 606        u32 range, misc;
 607
 608        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
 609                              &iommu->cap);
 610        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
 611                              &range);
 612        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
 613                              &misc);
 614
 615        iommu->first_device = calc_devid(MMIO_GET_BUS(range),
 616                                         MMIO_GET_FD(range));
 617        iommu->last_device = calc_devid(MMIO_GET_BUS(range),
 618                                        MMIO_GET_LD(range));
 619        iommu->evt_msi_num = MMIO_MSI_NUM(misc);
 620}
 621
 622/*
 623 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
 624 * initializes the hardware and our data structures with it.
 625 */
 626static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
 627                                        struct ivhd_header *h)
 628{
 629        u8 *p = (u8 *)h;
 630        u8 *end = p, flags = 0;
 631        u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
 632        u32 ext_flags = 0;
 633        bool alias = false;
 634        struct ivhd_entry *e;
 635
 636        /*
 637         * First set the recommended feature enable bits from ACPI
 638         * into the IOMMU control registers
 639         */
 640        h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
 641                iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
 642                iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
 643
 644        h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
 645                iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
 646                iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
 647
 648        h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
 649                iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
 650                iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
 651
 652        h->flags & IVHD_FLAG_ISOC_EN_MASK ?
 653                iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
 654                iommu_feature_disable(iommu, CONTROL_ISOC_EN);
 655
 656        /*
 657         * make IOMMU memory accesses cache coherent
 658         */
 659        iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
 660
 661        /*
 662         * Done. Now parse the device entries
 663         */
 664        p += sizeof(struct ivhd_header);
 665        end += h->length;
 666
 667
 668        while (p < end) {
 669                e = (struct ivhd_entry *)p;
 670                switch (e->type) {
 671                case IVHD_DEV_ALL:
 672
 673                        DUMP_printk("  DEV_ALL\t\t\t first devid: %02x:%02x.%x"
 674                                    " last device %02x:%02x.%x flags: %02x\n",
 675                                    PCI_BUS(iommu->first_device),
 676                                    PCI_SLOT(iommu->first_device),
 677                                    PCI_FUNC(iommu->first_device),
 678                                    PCI_BUS(iommu->last_device),
 679                                    PCI_SLOT(iommu->last_device),
 680                                    PCI_FUNC(iommu->last_device),
 681                                    e->flags);
 682
 683                        for (dev_i = iommu->first_device;
 684                                        dev_i <= iommu->last_device; ++dev_i)
 685                                set_dev_entry_from_acpi(iommu, dev_i,
 686                                                        e->flags, 0);
 687                        break;
 688                case IVHD_DEV_SELECT:
 689
 690                        DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
 691                                    "flags: %02x\n",
 692                                    PCI_BUS(e->devid),
 693                                    PCI_SLOT(e->devid),
 694                                    PCI_FUNC(e->devid),
 695                                    e->flags);
 696
 697                        devid = e->devid;
 698                        set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
 699                        break;
 700                case IVHD_DEV_SELECT_RANGE_START:
 701
 702                        DUMP_printk("  DEV_SELECT_RANGE_START\t "
 703                                    "devid: %02x:%02x.%x flags: %02x\n",
 704                                    PCI_BUS(e->devid),
 705                                    PCI_SLOT(e->devid),
 706                                    PCI_FUNC(e->devid),
 707                                    e->flags);
 708
 709                        devid_start = e->devid;
 710                        flags = e->flags;
 711                        ext_flags = 0;
 712                        alias = false;
 713                        break;
 714                case IVHD_DEV_ALIAS:
 715
 716                        DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
 717                                    "flags: %02x devid_to: %02x:%02x.%x\n",
 718                                    PCI_BUS(e->devid),
 719                                    PCI_SLOT(e->devid),
 720                                    PCI_FUNC(e->devid),
 721                                    e->flags,
 722                                    PCI_BUS(e->ext >> 8),
 723                                    PCI_SLOT(e->ext >> 8),
 724                                    PCI_FUNC(e->ext >> 8));
 725
 726                        devid = e->devid;
 727                        devid_to = e->ext >> 8;
 728                        set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
 729                        set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
 730                        amd_iommu_alias_table[devid] = devid_to;
 731                        break;
 732                case IVHD_DEV_ALIAS_RANGE:
 733
 734                        DUMP_printk("  DEV_ALIAS_RANGE\t\t "
 735                                    "devid: %02x:%02x.%x flags: %02x "
 736                                    "devid_to: %02x:%02x.%x\n",
 737                                    PCI_BUS(e->devid),
 738                                    PCI_SLOT(e->devid),
 739                                    PCI_FUNC(e->devid),
 740                                    e->flags,
 741                                    PCI_BUS(e->ext >> 8),
 742                                    PCI_SLOT(e->ext >> 8),
 743                                    PCI_FUNC(e->ext >> 8));
 744
 745                        devid_start = e->devid;
 746                        flags = e->flags;
 747                        devid_to = e->ext >> 8;
 748                        ext_flags = 0;
 749                        alias = true;
 750                        break;
 751                case IVHD_DEV_EXT_SELECT:
 752
 753                        DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
 754                                    "flags: %02x ext: %08x\n",
 755                                    PCI_BUS(e->devid),
 756                                    PCI_SLOT(e->devid),
 757                                    PCI_FUNC(e->devid),
 758                                    e->flags, e->ext);
 759
 760                        devid = e->devid;
 761                        set_dev_entry_from_acpi(iommu, devid, e->flags,
 762                                                e->ext);
 763                        break;
 764                case IVHD_DEV_EXT_SELECT_RANGE:
 765
 766                        DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
 767                                    "%02x:%02x.%x flags: %02x ext: %08x\n",
 768                                    PCI_BUS(e->devid),
 769                                    PCI_SLOT(e->devid),
 770                                    PCI_FUNC(e->devid),
 771                                    e->flags, e->ext);
 772
 773                        devid_start = e->devid;
 774                        flags = e->flags;
 775                        ext_flags = e->ext;
 776                        alias = false;
 777                        break;
 778                case IVHD_DEV_RANGE_END:
 779
 780                        DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
 781                                    PCI_BUS(e->devid),
 782                                    PCI_SLOT(e->devid),
 783                                    PCI_FUNC(e->devid));
 784
 785                        devid = e->devid;
 786                        for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
 787                                if (alias) {
 788                                        amd_iommu_alias_table[dev_i] = devid_to;
 789                                        set_dev_entry_from_acpi(iommu,
 790                                                devid_to, flags, ext_flags);
 791                                }
 792                                set_dev_entry_from_acpi(iommu, dev_i,
 793                                                        flags, ext_flags);
 794                        }
 795                        break;
 796                default:
 797                        break;
 798                }
 799
 800                p += ivhd_entry_length(p);
 801        }
 802}
 803
 804/* Initializes the device->iommu mapping for the driver */
 805static int __init init_iommu_devices(struct amd_iommu *iommu)
 806{
 807        u16 i;
 808
 809        for (i = iommu->first_device; i <= iommu->last_device; ++i)
 810                set_iommu_for_device(iommu, i);
 811
 812        return 0;
 813}
 814
 815static void __init free_iommu_one(struct amd_iommu *iommu)
 816{
 817        free_command_buffer(iommu);
 818        free_event_buffer(iommu);
 819        iommu_unmap_mmio_space(iommu);
 820}
 821
 822static void __init free_iommu_all(void)
 823{
 824        struct amd_iommu *iommu, *next;
 825
 826        for_each_iommu_safe(iommu, next) {
 827                list_del(&iommu->list);
 828                free_iommu_one(iommu);
 829                kfree(iommu);
 830        }
 831}
 832
 833/*
 834 * This function clues the initialization function for one IOMMU
 835 * together and also allocates the command buffer and programs the
 836 * hardware. It does NOT enable the IOMMU. This is done afterwards.
 837 */
 838static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
 839{
 840        spin_lock_init(&iommu->lock);
 841        list_add_tail(&iommu->list, &amd_iommu_list);
 842
 843        /*
 844         * Copy data from ACPI table entry to the iommu struct
 845         */
 846        iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
 847        if (!iommu->dev)
 848                return 1;
 849
 850        iommu->cap_ptr = h->cap_ptr;
 851        iommu->pci_seg = h->pci_seg;
 852        iommu->mmio_phys = h->mmio_phys;
 853        iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
 854        if (!iommu->mmio_base)
 855                return -ENOMEM;
 856
 857        iommu->cmd_buf = alloc_command_buffer(iommu);
 858        if (!iommu->cmd_buf)
 859                return -ENOMEM;
 860
 861        iommu->evt_buf = alloc_event_buffer(iommu);
 862        if (!iommu->evt_buf)
 863                return -ENOMEM;
 864
 865        iommu->int_enabled = false;
 866
 867        init_iommu_from_pci(iommu);
 868        init_iommu_from_acpi(iommu, h);
 869        init_iommu_devices(iommu);
 870
 871        return pci_enable_device(iommu->dev);
 872}
 873
 874/*
 875 * Iterates over all IOMMU entries in the ACPI table, allocates the
 876 * IOMMU structure and initializes it with init_iommu_one()
 877 */
 878static int __init init_iommu_all(struct acpi_table_header *table)
 879{
 880        u8 *p = (u8 *)table, *end = (u8 *)table;
 881        struct ivhd_header *h;
 882        struct amd_iommu *iommu;
 883        int ret;
 884
 885        end += table->length;
 886        p += IVRS_HEADER_LENGTH;
 887
 888        while (p < end) {
 889                h = (struct ivhd_header *)p;
 890                switch (*p) {
 891                case ACPI_IVHD_TYPE:
 892
 893                        DUMP_printk("device: %02x:%02x.%01x cap: %04x "
 894                                    "seg: %d flags: %01x info %04x\n",
 895                                    PCI_BUS(h->devid), PCI_SLOT(h->devid),
 896                                    PCI_FUNC(h->devid), h->cap_ptr,
 897                                    h->pci_seg, h->flags, h->info);
 898                        DUMP_printk("       mmio-addr: %016llx\n",
 899                                    h->mmio_phys);
 900
 901                        iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
 902                        if (iommu == NULL)
 903                                return -ENOMEM;
 904                        ret = init_iommu_one(iommu, h);
 905                        if (ret)
 906                                return ret;
 907                        break;
 908                default:
 909                        break;
 910                }
 911                p += h->length;
 912
 913        }
 914        WARN_ON(p != end);
 915
 916        return 0;
 917}
 918
 919/****************************************************************************
 920 *
 921 * The following functions initialize the MSI interrupts for all IOMMUs
 922 * in the system. Its a bit challenging because there could be multiple
 923 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
 924 * pci_dev.
 925 *
 926 ****************************************************************************/
 927
 928static int __init iommu_setup_msi(struct amd_iommu *iommu)
 929{
 930        int r;
 931
 932        if (pci_enable_msi(iommu->dev))
 933                return 1;
 934
 935        r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
 936                        IRQF_SAMPLE_RANDOM,
 937                        "AMD-Vi",
 938                        NULL);
 939
 940        if (r) {
 941                pci_disable_msi(iommu->dev);
 942                return 1;
 943        }
 944
 945        iommu->int_enabled = true;
 946        iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
 947
 948        return 0;
 949}
 950
 951static int iommu_init_msi(struct amd_iommu *iommu)
 952{
 953        if (iommu->int_enabled)
 954                return 0;
 955
 956        if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
 957                return iommu_setup_msi(iommu);
 958
 959        return 1;
 960}
 961
 962/****************************************************************************
 963 *
 964 * The next functions belong to the third pass of parsing the ACPI
 965 * table. In this last pass the memory mapping requirements are
 966 * gathered (like exclusion and unity mapping reanges).
 967 *
 968 ****************************************************************************/
 969
 970static void __init free_unity_maps(void)
 971{
 972        struct unity_map_entry *entry, *next;
 973
 974        list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
 975                list_del(&entry->list);
 976                kfree(entry);
 977        }
 978}
 979
 980/* called when we find an exclusion range definition in ACPI */
 981static int __init init_exclusion_range(struct ivmd_header *m)
 982{
 983        int i;
 984
 985        switch (m->type) {
 986        case ACPI_IVMD_TYPE:
 987                set_device_exclusion_range(m->devid, m);
 988                break;
 989        case ACPI_IVMD_TYPE_ALL:
 990                for (i = 0; i <= amd_iommu_last_bdf; ++i)
 991                        set_device_exclusion_range(i, m);
 992                break;
 993        case ACPI_IVMD_TYPE_RANGE:
 994                for (i = m->devid; i <= m->aux; ++i)
 995                        set_device_exclusion_range(i, m);
 996                break;
 997        default:
 998                break;
 999        }
1000
1001        return 0;
1002}
1003
1004/* called for unity map ACPI definition */
1005static int __init init_unity_map_range(struct ivmd_header *m)
1006{
1007        struct unity_map_entry *e = 0;
1008        char *s;
1009
1010        e = kzalloc(sizeof(*e), GFP_KERNEL);
1011        if (e == NULL)
1012                return -ENOMEM;
1013
1014        switch (m->type) {
1015        default:
1016                kfree(e);
1017                return 0;
1018        case ACPI_IVMD_TYPE:
1019                s = "IVMD_TYPEi\t\t\t";
1020                e->devid_start = e->devid_end = m->devid;
1021                break;
1022        case ACPI_IVMD_TYPE_ALL:
1023                s = "IVMD_TYPE_ALL\t\t";
1024                e->devid_start = 0;
1025                e->devid_end = amd_iommu_last_bdf;
1026                break;
1027        case ACPI_IVMD_TYPE_RANGE:
1028                s = "IVMD_TYPE_RANGE\t\t";
1029                e->devid_start = m->devid;
1030                e->devid_end = m->aux;
1031                break;
1032        }
1033        e->address_start = PAGE_ALIGN(m->range_start);
1034        e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1035        e->prot = m->flags >> 1;
1036
1037        DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1038                    " range_start: %016llx range_end: %016llx flags: %x\n", s,
1039                    PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1040                    PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1041                    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1042                    e->address_start, e->address_end, m->flags);
1043
1044        list_add_tail(&e->list, &amd_iommu_unity_map);
1045
1046        return 0;
1047}
1048
1049/* iterates over all memory definitions we find in the ACPI table */
1050static int __init init_memory_definitions(struct acpi_table_header *table)
1051{
1052        u8 *p = (u8 *)table, *end = (u8 *)table;
1053        struct ivmd_header *m;
1054
1055        end += table->length;
1056        p += IVRS_HEADER_LENGTH;
1057
1058        while (p < end) {
1059                m = (struct ivmd_header *)p;
1060                if (m->flags & IVMD_FLAG_EXCL_RANGE)
1061                        init_exclusion_range(m);
1062                else if (m->flags & IVMD_FLAG_UNITY_MAP)
1063                        init_unity_map_range(m);
1064
1065                p += m->length;
1066        }
1067
1068        return 0;
1069}
1070
1071/*
1072 * Init the device table to not allow DMA access for devices and
1073 * suppress all page faults
1074 */
1075static void init_device_table(void)
1076{
1077        u16 devid;
1078
1079        for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1080                set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1081                set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1082        }
1083}
1084
1085/*
1086 * This function finally enables all IOMMUs found in the system after
1087 * they have been initialized
1088 */
1089static void enable_iommus(void)
1090{
1091        struct amd_iommu *iommu;
1092
1093        for_each_iommu(iommu) {
1094                iommu_disable(iommu);
1095                iommu_set_device_table(iommu);
1096                iommu_enable_command_buffer(iommu);
1097                iommu_enable_event_buffer(iommu);
1098                iommu_set_exclusion_range(iommu);
1099                iommu_init_msi(iommu);
1100                iommu_enable(iommu);
1101        }
1102}
1103
1104static void disable_iommus(void)
1105{
1106        struct amd_iommu *iommu;
1107
1108        for_each_iommu(iommu)
1109                iommu_disable(iommu);
1110}
1111
1112/*
1113 * Suspend/Resume support
1114 * disable suspend until real resume implemented
1115 */
1116
1117static int amd_iommu_resume(struct sys_device *dev)
1118{
1119        /* re-load the hardware */
1120        enable_iommus();
1121
1122        /*
1123         * we have to flush after the IOMMUs are enabled because a
1124         * disabled IOMMU will never execute the commands we send
1125         */
1126        amd_iommu_flush_all_devices();
1127        amd_iommu_flush_all_domains();
1128
1129        return 0;
1130}
1131
1132static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
1133{
1134        /* disable IOMMUs to go out of the way for BIOS */
1135        disable_iommus();
1136
1137        return 0;
1138}
1139
1140static struct sysdev_class amd_iommu_sysdev_class = {
1141        .name = "amd_iommu",
1142        .suspend = amd_iommu_suspend,
1143        .resume = amd_iommu_resume,
1144};
1145
1146static struct sys_device device_amd_iommu = {
1147        .id = 0,
1148        .cls = &amd_iommu_sysdev_class,
1149};
1150
1151/*
1152 * This is the core init function for AMD IOMMU hardware in the system.
1153 * This function is called from the generic x86 DMA layer initialization
1154 * code.
1155 *
1156 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1157 * three times:
1158 *
1159 *      1 pass) Find the highest PCI device id the driver has to handle.
1160 *              Upon this information the size of the data structures is
1161 *              determined that needs to be allocated.
1162 *
1163 *      2 pass) Initialize the data structures just allocated with the
1164 *              information in the ACPI table about available AMD IOMMUs
1165 *              in the system. It also maps the PCI devices in the
1166 *              system to specific IOMMUs
1167 *
1168 *      3 pass) After the basic data structures are allocated and
1169 *              initialized we update them with information about memory
1170 *              remapping requirements parsed out of the ACPI table in
1171 *              this last pass.
1172 *
1173 * After that the hardware is initialized and ready to go. In the last
1174 * step we do some Linux specific things like registering the driver in
1175 * the dma_ops interface and initializing the suspend/resume support
1176 * functions. Finally it prints some information about AMD IOMMUs and
1177 * the driver state and enables the hardware.
1178 */
1179int __init amd_iommu_init(void)
1180{
1181        int i, ret = 0;
1182
1183
1184        if (no_iommu) {
1185                printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
1186                return 0;
1187        }
1188
1189        if (!amd_iommu_detected)
1190                return -ENODEV;
1191
1192        /*
1193         * First parse ACPI tables to find the largest Bus/Dev/Func
1194         * we need to handle. Upon this information the shared data
1195         * structures for the IOMMUs in the system will be allocated
1196         */
1197        if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1198                return -ENODEV;
1199
1200        dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
1201        alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1202        rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1203
1204        ret = -ENOMEM;
1205
1206        /* Device table - directly used by all IOMMUs */
1207        amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1208                                      get_order(dev_table_size));
1209        if (amd_iommu_dev_table == NULL)
1210                goto out;
1211
1212        /*
1213         * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1214         * IOMMU see for that device
1215         */
1216        amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1217                        get_order(alias_table_size));
1218        if (amd_iommu_alias_table == NULL)
1219                goto free;
1220
1221        /* IOMMU rlookup table - find the IOMMU for a specific device */
1222        amd_iommu_rlookup_table = (void *)__get_free_pages(
1223                        GFP_KERNEL | __GFP_ZERO,
1224                        get_order(rlookup_table_size));
1225        if (amd_iommu_rlookup_table == NULL)
1226                goto free;
1227
1228        /*
1229         * Protection Domain table - maps devices to protection domains
1230         * This table has the same size as the rlookup_table
1231         */
1232        amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1233                                     get_order(rlookup_table_size));
1234        if (amd_iommu_pd_table == NULL)
1235                goto free;
1236
1237        amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1238                                            GFP_KERNEL | __GFP_ZERO,
1239                                            get_order(MAX_DOMAIN_ID/8));
1240        if (amd_iommu_pd_alloc_bitmap == NULL)
1241                goto free;
1242
1243        /* init the device table */
1244        init_device_table();
1245
1246        /*
1247         * let all alias entries point to itself
1248         */
1249        for (i = 0; i <= amd_iommu_last_bdf; ++i)
1250                amd_iommu_alias_table[i] = i;
1251
1252        /*
1253         * never allocate domain 0 because its used as the non-allocated and
1254         * error value placeholder
1255         */
1256        amd_iommu_pd_alloc_bitmap[0] = 1;
1257
1258        /*
1259         * now the data structures are allocated and basically initialized
1260         * start the real acpi table scan
1261         */
1262        ret = -ENODEV;
1263        if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1264                goto free;
1265
1266        if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1267                goto free;
1268
1269        ret = sysdev_class_register(&amd_iommu_sysdev_class);
1270        if (ret)
1271                goto free;
1272
1273        ret = sysdev_register(&device_amd_iommu);
1274        if (ret)
1275                goto free;
1276
1277        if (iommu_pass_through)
1278                ret = amd_iommu_init_passthrough();
1279        else
1280                ret = amd_iommu_init_dma_ops();
1281        if (ret)
1282                goto free;
1283
1284        enable_iommus();
1285
1286        if (iommu_pass_through)
1287                goto out;
1288
1289        printk(KERN_INFO "AMD-Vi: device isolation ");
1290        if (amd_iommu_isolate)
1291                printk("enabled\n");
1292        else
1293                printk("disabled\n");
1294
1295        if (amd_iommu_unmap_flush)
1296                printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1297        else
1298                printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1299
1300out:
1301        return ret;
1302
1303free:
1304        free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1305                   get_order(MAX_DOMAIN_ID/8));
1306
1307        free_pages((unsigned long)amd_iommu_pd_table,
1308                   get_order(rlookup_table_size));
1309
1310        free_pages((unsigned long)amd_iommu_rlookup_table,
1311                   get_order(rlookup_table_size));
1312
1313        free_pages((unsigned long)amd_iommu_alias_table,
1314                   get_order(alias_table_size));
1315
1316        free_pages((unsigned long)amd_iommu_dev_table,
1317                   get_order(dev_table_size));
1318
1319        free_iommu_all();
1320
1321        free_unity_maps();
1322
1323        goto out;
1324}
1325
1326void amd_iommu_shutdown(void)
1327{
1328        disable_iommus();
1329}
1330
1331/****************************************************************************
1332 *
1333 * Early detect code. This code runs at IOMMU detection time in the DMA
1334 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1335 * IOMMUs
1336 *
1337 ****************************************************************************/
1338static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1339{
1340        return 0;
1341}
1342
1343void __init amd_iommu_detect(void)
1344{
1345        if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture))
1346                return;
1347
1348        if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1349                iommu_detected = 1;
1350                amd_iommu_detected = 1;
1351#ifdef CONFIG_GART_IOMMU
1352                gart_iommu_aperture_disabled = 1;
1353                gart_iommu_aperture = 0;
1354#endif
1355        }
1356}
1357
1358/****************************************************************************
1359 *
1360 * Parsing functions for the AMD IOMMU specific kernel command line
1361 * options.
1362 *
1363 ****************************************************************************/
1364
1365static int __init parse_amd_iommu_dump(char *str)
1366{
1367        amd_iommu_dump = true;
1368
1369        return 1;
1370}
1371
1372static int __init parse_amd_iommu_options(char *str)
1373{
1374        for (; *str; ++str) {
1375                if (strncmp(str, "isolate", 7) == 0)
1376                        amd_iommu_isolate = true;
1377                if (strncmp(str, "share", 5) == 0)
1378                        amd_iommu_isolate = false;
1379                if (strncmp(str, "fullflush", 9) == 0)
1380                        amd_iommu_unmap_flush = true;
1381        }
1382
1383        return 1;
1384}
1385
1386__setup("amd_iommu_dump", parse_amd_iommu_dump);
1387__setup("amd_iommu=", parse_amd_iommu_options);
1388