linux/arch/x86/kernel/amd_gart_64.c
<<
>>
Prefs
   1/*
   2 * Dynamic DMA mapping support for AMD Hammer.
   3 *
   4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
   5 * This allows to use PCI devices that only support 32bit addresses on systems
   6 * with more than 4GB.
   7 *
   8 * See Documentation/DMA-API-HOWTO.txt for the interface specification.
   9 *
  10 * Copyright 2002 Andi Kleen, SuSE Labs.
  11 * Subject to the GNU General Public License v2 only.
  12 */
  13
  14#include <linux/types.h>
  15#include <linux/ctype.h>
  16#include <linux/agp_backend.h>
  17#include <linux/init.h>
  18#include <linux/mm.h>
  19#include <linux/sched.h>
  20#include <linux/sched/debug.h>
  21#include <linux/string.h>
  22#include <linux/spinlock.h>
  23#include <linux/pci.h>
  24#include <linux/topology.h>
  25#include <linux/interrupt.h>
  26#include <linux/bitmap.h>
  27#include <linux/kdebug.h>
  28#include <linux/scatterlist.h>
  29#include <linux/iommu-helper.h>
  30#include <linux/syscore_ops.h>
  31#include <linux/io.h>
  32#include <linux/gfp.h>
  33#include <linux/atomic.h>
  34#include <linux/dma-direct.h>
  35#include <asm/mtrr.h>
  36#include <asm/pgtable.h>
  37#include <asm/proto.h>
  38#include <asm/iommu.h>
  39#include <asm/gart.h>
  40#include <asm/set_memory.h>
  41#include <asm/swiotlb.h>
  42#include <asm/dma.h>
  43#include <asm/amd_nb.h>
  44#include <asm/x86_init.h>
  45#include <asm/iommu_table.h>
  46
  47static unsigned long iommu_bus_base;    /* GART remapping area (physical) */
  48static unsigned long iommu_size;        /* size of remapping area bytes */
  49static unsigned long iommu_pages;       /* .. and in pages */
  50
  51static u32 *iommu_gatt_base;            /* Remapping table */
  52
  53static dma_addr_t bad_dma_addr;
  54
  55/*
  56 * If this is disabled the IOMMU will use an optimized flushing strategy
  57 * of only flushing when an mapping is reused. With it true the GART is
  58 * flushed for every mapping. Problem is that doing the lazy flush seems
  59 * to trigger bugs with some popular PCI cards, in particular 3ware (but
  60 * has been also also seen with Qlogic at least).
  61 */
  62static int iommu_fullflush = 1;
  63
  64/* Allocation bitmap for the remapping area: */
  65static DEFINE_SPINLOCK(iommu_bitmap_lock);
  66/* Guarded by iommu_bitmap_lock: */
  67static unsigned long *iommu_gart_bitmap;
  68
  69static u32 gart_unmapped_entry;
  70
  71#define GPTE_VALID    1
  72#define GPTE_COHERENT 2
  73#define GPTE_ENCODE(x) \
  74        (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
  75#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
  76
  77#define EMERGENCY_PAGES 32 /* = 128KB */
  78
  79#ifdef CONFIG_AGP
  80#define AGPEXTERN extern
  81#else
  82#define AGPEXTERN
  83#endif
  84
  85/* GART can only remap to physical addresses < 1TB */
  86#define GART_MAX_PHYS_ADDR      (1ULL << 40)
  87
  88/* backdoor interface to AGP driver */
  89AGPEXTERN int agp_memory_reserved;
  90AGPEXTERN __u32 *agp_gatt_table;
  91
  92static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
  93static bool need_flush;         /* global flush state. set for each gart wrap */
  94
  95static unsigned long alloc_iommu(struct device *dev, int size,
  96                                 unsigned long align_mask)
  97{
  98        unsigned long offset, flags;
  99        unsigned long boundary_size;
 100        unsigned long base_index;
 101
 102        base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
 103                           PAGE_SIZE) >> PAGE_SHIFT;
 104        boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
 105                              PAGE_SIZE) >> PAGE_SHIFT;
 106
 107        spin_lock_irqsave(&iommu_bitmap_lock, flags);
 108        offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
 109                                  size, base_index, boundary_size, align_mask);
 110        if (offset == -1) {
 111                need_flush = true;
 112                offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
 113                                          size, base_index, boundary_size,
 114                                          align_mask);
 115        }
 116        if (offset != -1) {
 117                next_bit = offset+size;
 118                if (next_bit >= iommu_pages) {
 119                        next_bit = 0;
 120                        need_flush = true;
 121                }
 122        }
 123        if (iommu_fullflush)
 124                need_flush = true;
 125        spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
 126
 127        return offset;
 128}
 129
 130static void free_iommu(unsigned long offset, int size)
 131{
 132        unsigned long flags;
 133
 134        spin_lock_irqsave(&iommu_bitmap_lock, flags);
 135        bitmap_clear(iommu_gart_bitmap, offset, size);
 136        if (offset >= next_bit)
 137                next_bit = offset + size;
 138        spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
 139}
 140
 141/*
 142 * Use global flush state to avoid races with multiple flushers.
 143 */
 144static void flush_gart(void)
 145{
 146        unsigned long flags;
 147
 148        spin_lock_irqsave(&iommu_bitmap_lock, flags);
 149        if (need_flush) {
 150                amd_flush_garts();
 151                need_flush = false;
 152        }
 153        spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
 154}
 155
 156#ifdef CONFIG_IOMMU_LEAK
 157/* Debugging aid for drivers that don't free their IOMMU tables */
 158static int leak_trace;
 159static int iommu_leak_pages = 20;
 160
 161static void dump_leak(void)
 162{
 163        static int dump;
 164
 165        if (dump)
 166                return;
 167        dump = 1;
 168
 169        show_stack(NULL, NULL);
 170        debug_dma_dump_mappings(NULL);
 171}
 172#endif
 173
 174static void iommu_full(struct device *dev, size_t size, int dir)
 175{
 176        /*
 177         * Ran out of IOMMU space for this operation. This is very bad.
 178         * Unfortunately the drivers cannot handle this operation properly.
 179         * Return some non mapped prereserved space in the aperture and
 180         * let the Northbridge deal with it. This will result in garbage
 181         * in the IO operation. When the size exceeds the prereserved space
 182         * memory corruption will occur or random memory will be DMAed
 183         * out. Hopefully no network devices use single mappings that big.
 184         */
 185
 186        dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
 187
 188        if (size > PAGE_SIZE*EMERGENCY_PAGES) {
 189                if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
 190                        panic("PCI-DMA: Memory would be corrupted\n");
 191                if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
 192                        panic(KERN_ERR
 193                                "PCI-DMA: Random memory would be DMAed\n");
 194        }
 195#ifdef CONFIG_IOMMU_LEAK
 196        dump_leak();
 197#endif
 198}
 199
 200static inline int
 201need_iommu(struct device *dev, unsigned long addr, size_t size)
 202{
 203        return force_iommu || !dma_capable(dev, addr, size);
 204}
 205
 206static inline int
 207nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
 208{
 209        return !dma_capable(dev, addr, size);
 210}
 211
 212/* Map a single continuous physical area into the IOMMU.
 213 * Caller needs to check if the iommu is needed and flush.
 214 */
 215static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
 216                                size_t size, int dir, unsigned long align_mask)
 217{
 218        unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
 219        unsigned long iommu_page;
 220        int i;
 221
 222        if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
 223                return bad_dma_addr;
 224
 225        iommu_page = alloc_iommu(dev, npages, align_mask);
 226        if (iommu_page == -1) {
 227                if (!nonforced_iommu(dev, phys_mem, size))
 228                        return phys_mem;
 229                if (panic_on_overflow)
 230                        panic("dma_map_area overflow %lu bytes\n", size);
 231                iommu_full(dev, size, dir);
 232                return bad_dma_addr;
 233        }
 234
 235        for (i = 0; i < npages; i++) {
 236                iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
 237                phys_mem += PAGE_SIZE;
 238        }
 239        return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
 240}
 241
 242/* Map a single area into the IOMMU */
 243static dma_addr_t gart_map_page(struct device *dev, struct page *page,
 244                                unsigned long offset, size_t size,
 245                                enum dma_data_direction dir,
 246                                unsigned long attrs)
 247{
 248        unsigned long bus;
 249        phys_addr_t paddr = page_to_phys(page) + offset;
 250
 251        if (!dev)
 252                dev = &x86_dma_fallback_dev;
 253
 254        if (!need_iommu(dev, paddr, size))
 255                return paddr;
 256
 257        bus = dma_map_area(dev, paddr, size, dir, 0);
 258        flush_gart();
 259
 260        return bus;
 261}
 262
 263/*
 264 * Free a DMA mapping.
 265 */
 266static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
 267                            size_t size, enum dma_data_direction dir,
 268                            unsigned long attrs)
 269{
 270        unsigned long iommu_page;
 271        int npages;
 272        int i;
 273
 274        if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
 275            dma_addr >= iommu_bus_base + iommu_size)
 276                return;
 277
 278        iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
 279        npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
 280        for (i = 0; i < npages; i++) {
 281                iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
 282        }
 283        free_iommu(iommu_page, npages);
 284}
 285
 286/*
 287 * Wrapper for pci_unmap_single working with scatterlists.
 288 */
 289static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 290                          enum dma_data_direction dir, unsigned long attrs)
 291{
 292        struct scatterlist *s;
 293        int i;
 294
 295        for_each_sg(sg, s, nents, i) {
 296                if (!s->dma_length || !s->length)
 297                        break;
 298                gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
 299        }
 300}
 301
 302/* Fallback for dma_map_sg in case of overflow */
 303static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
 304                               int nents, int dir)
 305{
 306        struct scatterlist *s;
 307        int i;
 308
 309#ifdef CONFIG_IOMMU_DEBUG
 310        pr_debug("dma_map_sg overflow\n");
 311#endif
 312
 313        for_each_sg(sg, s, nents, i) {
 314                unsigned long addr = sg_phys(s);
 315
 316                if (nonforced_iommu(dev, addr, s->length)) {
 317                        addr = dma_map_area(dev, addr, s->length, dir, 0);
 318                        if (addr == bad_dma_addr) {
 319                                if (i > 0)
 320                                        gart_unmap_sg(dev, sg, i, dir, 0);
 321                                nents = 0;
 322                                sg[0].dma_length = 0;
 323                                break;
 324                        }
 325                }
 326                s->dma_address = addr;
 327                s->dma_length = s->length;
 328        }
 329        flush_gart();
 330
 331        return nents;
 332}
 333
 334/* Map multiple scatterlist entries continuous into the first. */
 335static int __dma_map_cont(struct device *dev, struct scatterlist *start,
 336                          int nelems, struct scatterlist *sout,
 337                          unsigned long pages)
 338{
 339        unsigned long iommu_start = alloc_iommu(dev, pages, 0);
 340        unsigned long iommu_page = iommu_start;
 341        struct scatterlist *s;
 342        int i;
 343
 344        if (iommu_start == -1)
 345                return -1;
 346
 347        for_each_sg(start, s, nelems, i) {
 348                unsigned long pages, addr;
 349                unsigned long phys_addr = s->dma_address;
 350
 351                BUG_ON(s != start && s->offset);
 352                if (s == start) {
 353                        sout->dma_address = iommu_bus_base;
 354                        sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
 355                        sout->dma_length = s->length;
 356                } else {
 357                        sout->dma_length += s->length;
 358                }
 359
 360                addr = phys_addr;
 361                pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
 362                while (pages--) {
 363                        iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
 364                        addr += PAGE_SIZE;
 365                        iommu_page++;
 366                }
 367        }
 368        BUG_ON(iommu_page - iommu_start != pages);
 369
 370        return 0;
 371}
 372
 373static inline int
 374dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
 375             struct scatterlist *sout, unsigned long pages, int need)
 376{
 377        if (!need) {
 378                BUG_ON(nelems != 1);
 379                sout->dma_address = start->dma_address;
 380                sout->dma_length = start->length;
 381                return 0;
 382        }
 383        return __dma_map_cont(dev, start, nelems, sout, pages);
 384}
 385
 386/*
 387 * DMA map all entries in a scatterlist.
 388 * Merge chunks that have page aligned sizes into a continuous mapping.
 389 */
 390static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 391                       enum dma_data_direction dir, unsigned long attrs)
 392{
 393        struct scatterlist *s, *ps, *start_sg, *sgmap;
 394        int need = 0, nextneed, i, out, start;
 395        unsigned long pages = 0;
 396        unsigned int seg_size;
 397        unsigned int max_seg_size;
 398
 399        if (nents == 0)
 400                return 0;
 401
 402        if (!dev)
 403                dev = &x86_dma_fallback_dev;
 404
 405        out             = 0;
 406        start           = 0;
 407        start_sg        = sg;
 408        sgmap           = sg;
 409        seg_size        = 0;
 410        max_seg_size    = dma_get_max_seg_size(dev);
 411        ps              = NULL; /* shut up gcc */
 412
 413        for_each_sg(sg, s, nents, i) {
 414                dma_addr_t addr = sg_phys(s);
 415
 416                s->dma_address = addr;
 417                BUG_ON(s->length == 0);
 418
 419                nextneed = need_iommu(dev, addr, s->length);
 420
 421                /* Handle the previous not yet processed entries */
 422                if (i > start) {
 423                        /*
 424                         * Can only merge when the last chunk ends on a
 425                         * page boundary and the new one doesn't have an
 426                         * offset.
 427                         */
 428                        if (!iommu_merge || !nextneed || !need || s->offset ||
 429                            (s->length + seg_size > max_seg_size) ||
 430                            (ps->offset + ps->length) % PAGE_SIZE) {
 431                                if (dma_map_cont(dev, start_sg, i - start,
 432                                                 sgmap, pages, need) < 0)
 433                                        goto error;
 434                                out++;
 435
 436                                seg_size        = 0;
 437                                sgmap           = sg_next(sgmap);
 438                                pages           = 0;
 439                                start           = i;
 440                                start_sg        = s;
 441                        }
 442                }
 443
 444                seg_size += s->length;
 445                need = nextneed;
 446                pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
 447                ps = s;
 448        }
 449        if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
 450                goto error;
 451        out++;
 452        flush_gart();
 453        if (out < nents) {
 454                sgmap = sg_next(sgmap);
 455                sgmap->dma_length = 0;
 456        }
 457        return out;
 458
 459error:
 460        flush_gart();
 461        gart_unmap_sg(dev, sg, out, dir, 0);
 462
 463        /* When it was forced or merged try again in a dumb way */
 464        if (force_iommu || iommu_merge) {
 465                out = dma_map_sg_nonforce(dev, sg, nents, dir);
 466                if (out > 0)
 467                        return out;
 468        }
 469        if (panic_on_overflow)
 470                panic("dma_map_sg: overflow on %lu pages\n", pages);
 471
 472        iommu_full(dev, pages << PAGE_SHIFT, dir);
 473        for_each_sg(sg, s, nents, i)
 474                s->dma_address = bad_dma_addr;
 475        return 0;
 476}
 477
 478/* allocate and map a coherent mapping */
 479static void *
 480gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
 481                    gfp_t flag, unsigned long attrs)
 482{
 483        void *vaddr;
 484
 485        vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
 486        if (!vaddr ||
 487            !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
 488                return vaddr;
 489
 490        *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
 491                        DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
 492        flush_gart();
 493        if (unlikely(*dma_addr == bad_dma_addr))
 494                goto out_free;
 495        return vaddr;
 496out_free:
 497        dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
 498        return NULL;
 499}
 500
 501/* free a coherent mapping */
 502static void
 503gart_free_coherent(struct device *dev, size_t size, void *vaddr,
 504                   dma_addr_t dma_addr, unsigned long attrs)
 505{
 506        gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
 507        dma_direct_free(dev, size, vaddr, dma_addr, attrs);
 508}
 509
 510static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
 511{
 512        return (dma_addr == bad_dma_addr);
 513}
 514
 515static int no_agp;
 516
 517static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
 518{
 519        unsigned long a;
 520
 521        if (!iommu_size) {
 522                iommu_size = aper_size;
 523                if (!no_agp)
 524                        iommu_size /= 2;
 525        }
 526
 527        a = aper + iommu_size;
 528        iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
 529
 530        if (iommu_size < 64*1024*1024) {
 531                pr_warning(
 532                        "PCI-DMA: Warning: Small IOMMU %luMB."
 533                        " Consider increasing the AGP aperture in BIOS\n",
 534                                iommu_size >> 20);
 535        }
 536
 537        return iommu_size;
 538}
 539
 540static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
 541{
 542        unsigned aper_size = 0, aper_base_32, aper_order;
 543        u64 aper_base;
 544
 545        pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
 546        pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
 547        aper_order = (aper_order >> 1) & 7;
 548
 549        aper_base = aper_base_32 & 0x7fff;
 550        aper_base <<= 25;
 551
 552        aper_size = (32 * 1024 * 1024) << aper_order;
 553        if (aper_base + aper_size > 0x100000000UL || !aper_size)
 554                aper_base = 0;
 555
 556        *size = aper_size;
 557        return aper_base;
 558}
 559
 560static void enable_gart_translations(void)
 561{
 562        int i;
 563
 564        if (!amd_nb_has_feature(AMD_NB_GART))
 565                return;
 566
 567        for (i = 0; i < amd_nb_num(); i++) {
 568                struct pci_dev *dev = node_to_amd_nb(i)->misc;
 569
 570                enable_gart_translation(dev, __pa(agp_gatt_table));
 571        }
 572
 573        /* Flush the GART-TLB to remove stale entries */
 574        amd_flush_garts();
 575}
 576
 577/*
 578 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
 579 * resume in the same way as they are handled in gart_iommu_hole_init().
 580 */
 581static bool fix_up_north_bridges;
 582static u32 aperture_order;
 583static u32 aperture_alloc;
 584
 585void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
 586{
 587        fix_up_north_bridges = true;
 588        aperture_order = aper_order;
 589        aperture_alloc = aper_alloc;
 590}
 591
 592static void gart_fixup_northbridges(void)
 593{
 594        int i;
 595
 596        if (!fix_up_north_bridges)
 597                return;
 598
 599        if (!amd_nb_has_feature(AMD_NB_GART))
 600                return;
 601
 602        pr_info("PCI-DMA: Restoring GART aperture settings\n");
 603
 604        for (i = 0; i < amd_nb_num(); i++) {
 605                struct pci_dev *dev = node_to_amd_nb(i)->misc;
 606
 607                /*
 608                 * Don't enable translations just yet.  That is the next
 609                 * step.  Restore the pre-suspend aperture settings.
 610                 */
 611                gart_set_size_and_enable(dev, aperture_order);
 612                pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
 613        }
 614}
 615
 616static void gart_resume(void)
 617{
 618        pr_info("PCI-DMA: Resuming GART IOMMU\n");
 619
 620        gart_fixup_northbridges();
 621
 622        enable_gart_translations();
 623}
 624
 625static struct syscore_ops gart_syscore_ops = {
 626        .resume         = gart_resume,
 627
 628};
 629
 630/*
 631 * Private Northbridge GATT initialization in case we cannot use the
 632 * AGP driver for some reason.
 633 */
 634static __init int init_amd_gatt(struct agp_kern_info *info)
 635{
 636        unsigned aper_size, gatt_size, new_aper_size;
 637        unsigned aper_base, new_aper_base;
 638        struct pci_dev *dev;
 639        void *gatt;
 640        int i;
 641
 642        pr_info("PCI-DMA: Disabling AGP.\n");
 643
 644        aper_size = aper_base = info->aper_size = 0;
 645        dev = NULL;
 646        for (i = 0; i < amd_nb_num(); i++) {
 647                dev = node_to_amd_nb(i)->misc;
 648                new_aper_base = read_aperture(dev, &new_aper_size);
 649                if (!new_aper_base)
 650                        goto nommu;
 651
 652                if (!aper_base) {
 653                        aper_size = new_aper_size;
 654                        aper_base = new_aper_base;
 655                }
 656                if (aper_size != new_aper_size || aper_base != new_aper_base)
 657                        goto nommu;
 658        }
 659        if (!aper_base)
 660                goto nommu;
 661
 662        info->aper_base = aper_base;
 663        info->aper_size = aper_size >> 20;
 664
 665        gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
 666        gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 667                                        get_order(gatt_size));
 668        if (!gatt)
 669                panic("Cannot allocate GATT table");
 670        if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
 671                panic("Could not set GART PTEs to uncacheable pages");
 672
 673        agp_gatt_table = gatt;
 674
 675        register_syscore_ops(&gart_syscore_ops);
 676
 677        flush_gart();
 678
 679        pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
 680               aper_base, aper_size>>10);
 681
 682        return 0;
 683
 684 nommu:
 685        /* Should not happen anymore */
 686        pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
 687               "falling back to iommu=soft.\n");
 688        return -1;
 689}
 690
 691static const struct dma_map_ops gart_dma_ops = {
 692        .map_sg                         = gart_map_sg,
 693        .unmap_sg                       = gart_unmap_sg,
 694        .map_page                       = gart_map_page,
 695        .unmap_page                     = gart_unmap_page,
 696        .alloc                          = gart_alloc_coherent,
 697        .free                           = gart_free_coherent,
 698        .mapping_error                  = gart_mapping_error,
 699        .dma_supported                  = dma_direct_supported,
 700};
 701
 702static void gart_iommu_shutdown(void)
 703{
 704        struct pci_dev *dev;
 705        int i;
 706
 707        /* don't shutdown it if there is AGP installed */
 708        if (!no_agp)
 709                return;
 710
 711        if (!amd_nb_has_feature(AMD_NB_GART))
 712                return;
 713
 714        for (i = 0; i < amd_nb_num(); i++) {
 715                u32 ctl;
 716
 717                dev = node_to_amd_nb(i)->misc;
 718                pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
 719
 720                ctl &= ~GARTEN;
 721
 722                pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
 723        }
 724}
 725
 726int __init gart_iommu_init(void)
 727{
 728        struct agp_kern_info info;
 729        unsigned long iommu_start;
 730        unsigned long aper_base, aper_size;
 731        unsigned long start_pfn, end_pfn;
 732        unsigned long scratch;
 733        long i;
 734
 735        if (!amd_nb_has_feature(AMD_NB_GART))
 736                return 0;
 737
 738#ifndef CONFIG_AGP_AMD64
 739        no_agp = 1;
 740#else
 741        /* Makefile puts PCI initialization via subsys_initcall first. */
 742        /* Add other AMD AGP bridge drivers here */
 743        no_agp = no_agp ||
 744                (agp_amd64_init() < 0) ||
 745                (agp_copy_info(agp_bridge, &info) < 0);
 746#endif
 747
 748        if (no_iommu ||
 749            (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
 750            !gart_iommu_aperture ||
 751            (no_agp && init_amd_gatt(&info) < 0)) {
 752                if (max_pfn > MAX_DMA32_PFN) {
 753                        pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
 754                        pr_warning("falling back to iommu=soft.\n");
 755                }
 756                return 0;
 757        }
 758
 759        /* need to map that range */
 760        aper_size       = info.aper_size << 20;
 761        aper_base       = info.aper_base;
 762        end_pfn         = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
 763
 764        start_pfn = PFN_DOWN(aper_base);
 765        if (!pfn_range_is_mapped(start_pfn, end_pfn))
 766                init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
 767
 768        pr_info("PCI-DMA: using GART IOMMU.\n");
 769        iommu_size = check_iommu_size(info.aper_base, aper_size);
 770        iommu_pages = iommu_size >> PAGE_SHIFT;
 771
 772        iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
 773                                                      get_order(iommu_pages/8));
 774        if (!iommu_gart_bitmap)
 775                panic("Cannot allocate iommu bitmap\n");
 776
 777#ifdef CONFIG_IOMMU_LEAK
 778        if (leak_trace) {
 779                int ret;
 780
 781                ret = dma_debug_resize_entries(iommu_pages);
 782                if (ret)
 783                        pr_debug("PCI-DMA: Cannot trace all the entries\n");
 784        }
 785#endif
 786
 787        /*
 788         * Out of IOMMU space handling.
 789         * Reserve some invalid pages at the beginning of the GART.
 790         */
 791        bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
 792
 793        pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
 794               iommu_size >> 20);
 795
 796        agp_memory_reserved     = iommu_size;
 797        iommu_start             = aper_size - iommu_size;
 798        iommu_bus_base          = info.aper_base + iommu_start;
 799        bad_dma_addr            = iommu_bus_base;
 800        iommu_gatt_base         = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
 801
 802        /*
 803         * Unmap the IOMMU part of the GART. The alias of the page is
 804         * always mapped with cache enabled and there is no full cache
 805         * coherency across the GART remapping. The unmapping avoids
 806         * automatic prefetches from the CPU allocating cache lines in
 807         * there. All CPU accesses are done via the direct mapping to
 808         * the backing memory. The GART address is only used by PCI
 809         * devices.
 810         */
 811        set_memory_np((unsigned long)__va(iommu_bus_base),
 812                                iommu_size >> PAGE_SHIFT);
 813        /*
 814         * Tricky. The GART table remaps the physical memory range,
 815         * so the CPU wont notice potential aliases and if the memory
 816         * is remapped to UC later on, we might surprise the PCI devices
 817         * with a stray writeout of a cacheline. So play it sure and
 818         * do an explicit, full-scale wbinvd() _after_ having marked all
 819         * the pages as Not-Present:
 820         */
 821        wbinvd();
 822
 823        /*
 824         * Now all caches are flushed and we can safely enable
 825         * GART hardware.  Doing it early leaves the possibility
 826         * of stale cache entries that can lead to GART PTE
 827         * errors.
 828         */
 829        enable_gart_translations();
 830
 831        /*
 832         * Try to workaround a bug (thanks to BenH):
 833         * Set unmapped entries to a scratch page instead of 0.
 834         * Any prefetches that hit unmapped entries won't get an bus abort
 835         * then. (P2P bridge may be prefetching on DMA reads).
 836         */
 837        scratch = get_zeroed_page(GFP_KERNEL);
 838        if (!scratch)
 839                panic("Cannot allocate iommu scratch page");
 840        gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
 841        for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
 842                iommu_gatt_base[i] = gart_unmapped_entry;
 843
 844        flush_gart();
 845        dma_ops = &gart_dma_ops;
 846        x86_platform.iommu_shutdown = gart_iommu_shutdown;
 847        swiotlb = 0;
 848
 849        return 0;
 850}
 851
 852void __init gart_parse_options(char *p)
 853{
 854        int arg;
 855
 856#ifdef CONFIG_IOMMU_LEAK
 857        if (!strncmp(p, "leak", 4)) {
 858                leak_trace = 1;
 859                p += 4;
 860                if (*p == '=')
 861                        ++p;
 862                if (isdigit(*p) && get_option(&p, &arg))
 863                        iommu_leak_pages = arg;
 864        }
 865#endif
 866        if (isdigit(*p) && get_option(&p, &arg))
 867                iommu_size = arg;
 868        if (!strncmp(p, "fullflush", 9))
 869                iommu_fullflush = 1;
 870        if (!strncmp(p, "nofullflush", 11))
 871                iommu_fullflush = 0;
 872        if (!strncmp(p, "noagp", 5))
 873                no_agp = 1;
 874        if (!strncmp(p, "noaperture", 10))
 875                fix_aperture = 0;
 876        /* duplicated from pci-dma.c */
 877        if (!strncmp(p, "force", 5))
 878                gart_iommu_aperture_allowed = 1;
 879        if (!strncmp(p, "allowed", 7))
 880                gart_iommu_aperture_allowed = 1;
 881        if (!strncmp(p, "memaper", 7)) {
 882                fallback_aper_force = 1;
 883                p += 7;
 884                if (*p == '=') {
 885                        ++p;
 886                        if (get_option(&p, &arg))
 887                                fallback_aper_order = arg;
 888                }
 889        }
 890}
 891IOMMU_INIT_POST(gart_iommu_hole_init);
 892