linux/arch/ia64/hp/common/sba_iommu.c
<<
>>
Prefs
   1/*
   2**  IA64 System Bus Adapter (SBA) I/O MMU manager
   3**
   4**      (c) Copyright 2002-2005 Alex Williamson
   5**      (c) Copyright 2002-2003 Grant Grundler
   6**      (c) Copyright 2002-2005 Hewlett-Packard Company
   7**
   8**      Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
   9**      Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
  10**
  11**      This program is free software; you can redistribute it and/or modify
  12**      it under the terms of the GNU General Public License as published by
  13**      the Free Software Foundation; either version 2 of the License, or
  14**      (at your option) any later version.
  15**
  16**
  17** This module initializes the IOC (I/O Controller) found on HP
  18** McKinley machines and their successors.
  19**
  20*/
  21
  22#include <linux/types.h>
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/spinlock.h>
  26#include <linux/slab.h>
  27#include <linux/init.h>
  28#include <linux/mm.h>
  29#include <linux/string.h>
  30#include <linux/pci.h>
  31#include <linux/proc_fs.h>
  32#include <linux/seq_file.h>
  33#include <linux/acpi.h>
  34#include <linux/efi.h>
  35#include <linux/nodemask.h>
  36#include <linux/bitops.h>         /* hweight64() */
  37#include <linux/crash_dump.h>
  38#include <linux/iommu-helper.h>
  39#include <linux/dma-mapping.h>
  40
  41#include <asm/delay.h>          /* ia64_get_itc() */
  42#include <asm/io.h>
  43#include <asm/page.h>           /* PAGE_OFFSET */
  44#include <asm/dma.h>
  45#include <asm/system.h>         /* wmb() */
  46
  47#include <asm/acpi-ext.h>
  48
  49extern int swiotlb_late_init_with_default_size (size_t size);
  50
  51#define PFX "IOC: "
  52
  53/*
  54** Enabling timing search of the pdir resource map.  Output in /proc.
  55** Disabled by default to optimize performance.
  56*/
  57#undef PDIR_SEARCH_TIMING
  58
  59/*
  60** This option allows cards capable of 64bit DMA to bypass the IOMMU.  If
  61** not defined, all DMA will be 32bit and go through the TLB.
  62** There's potentially a conflict in the bio merge code with us
  63** advertising an iommu, but then bypassing it.  Since I/O MMU bypassing
  64** appears to give more performance than bio-level virtual merging, we'll
  65** do the former for now.  NOTE: BYPASS_SG also needs to be undef'd to
  66** completely restrict DMA to the IOMMU.
  67*/
  68#define ALLOW_IOV_BYPASS
  69
  70/*
  71** This option specifically allows/disallows bypassing scatterlists with
  72** multiple entries.  Coalescing these entries can allow better DMA streaming
  73** and in some cases shows better performance than entirely bypassing the
  74** IOMMU.  Performance increase on the order of 1-2% sequential output/input
  75** using bonnie++ on a RAID0 MD device (sym2 & mpt).
  76*/
  77#undef ALLOW_IOV_BYPASS_SG
  78
  79/*
  80** If a device prefetches beyond the end of a valid pdir entry, it will cause
  81** a hard failure, ie. MCA.  Version 3.0 and later of the zx1 LBA should
  82** disconnect on 4k boundaries and prevent such issues.  If the device is
  83** particularly aggressive, this option will keep the entire pdir valid such
  84** that prefetching will hit a valid address.  This could severely impact
  85** error containment, and is therefore off by default.  The page that is
  86** used for spill-over is poisoned, so that should help debugging somewhat.
  87*/
  88#undef FULL_VALID_PDIR
  89
  90#define ENABLE_MARK_CLEAN
  91
  92/*
  93** The number of debug flags is a clue - this code is fragile.  NOTE: since
  94** tightening the use of res_lock the resource bitmap and actual pdir are no
  95** longer guaranteed to stay in sync.  The sanity checking code isn't going to
  96** like that.
  97*/
  98#undef DEBUG_SBA_INIT
  99#undef DEBUG_SBA_RUN
 100#undef DEBUG_SBA_RUN_SG
 101#undef DEBUG_SBA_RESOURCE
 102#undef ASSERT_PDIR_SANITY
 103#undef DEBUG_LARGE_SG_ENTRIES
 104#undef DEBUG_BYPASS
 105
 106#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
 107#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
 108#endif
 109
 110#define SBA_INLINE      __inline__
 111/* #define SBA_INLINE */
 112
 113#ifdef DEBUG_SBA_INIT
 114#define DBG_INIT(x...)  printk(x)
 115#else
 116#define DBG_INIT(x...)
 117#endif
 118
 119#ifdef DEBUG_SBA_RUN
 120#define DBG_RUN(x...)   printk(x)
 121#else
 122#define DBG_RUN(x...)
 123#endif
 124
 125#ifdef DEBUG_SBA_RUN_SG
 126#define DBG_RUN_SG(x...)        printk(x)
 127#else
 128#define DBG_RUN_SG(x...)
 129#endif
 130
 131
 132#ifdef DEBUG_SBA_RESOURCE
 133#define DBG_RES(x...)   printk(x)
 134#else
 135#define DBG_RES(x...)
 136#endif
 137
 138#ifdef DEBUG_BYPASS
 139#define DBG_BYPASS(x...)        printk(x)
 140#else
 141#define DBG_BYPASS(x...)
 142#endif
 143
 144#ifdef ASSERT_PDIR_SANITY
 145#define ASSERT(expr) \
 146        if(!(expr)) { \
 147                printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
 148                panic(#expr); \
 149        }
 150#else
 151#define ASSERT(expr)
 152#endif
 153
 154/*
 155** The number of pdir entries to "free" before issuing
 156** a read to PCOM register to flush out PCOM writes.
 157** Interacts with allocation granularity (ie 4 or 8 entries
 158** allocated and free'd/purged at a time might make this
 159** less interesting).
 160*/
 161#define DELAYED_RESOURCE_CNT    64
 162
 163#define PCI_DEVICE_ID_HP_SX2000_IOC     0x12ec
 164
 165#define ZX1_IOC_ID      ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
 166#define ZX2_IOC_ID      ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
 167#define REO_IOC_ID      ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
 168#define SX1000_IOC_ID   ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
 169#define SX2000_IOC_ID   ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
 170
 171#define ZX1_IOC_OFFSET  0x1000  /* ACPI reports SBA, we want IOC */
 172
 173#define IOC_FUNC_ID     0x000
 174#define IOC_FCLASS      0x008   /* function class, bist, header, rev... */
 175#define IOC_IBASE       0x300   /* IO TLB */
 176#define IOC_IMASK       0x308
 177#define IOC_PCOM        0x310
 178#define IOC_TCNFG       0x318
 179#define IOC_PDIR_BASE   0x320
 180
 181#define IOC_ROPE0_CFG   0x500
 182#define   IOC_ROPE_AO     0x10  /* Allow "Relaxed Ordering" */
 183
 184
 185/* AGP GART driver looks for this */
 186#define ZX1_SBA_IOMMU_COOKIE    0x0000badbadc0ffeeUL
 187
 188/*
 189** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
 190**
 191** Some IOCs (sx1000) can run at the above pages sizes, but are
 192** really only supported using the IOC at a 4k page size.
 193**
 194** iovp_size could only be greater than PAGE_SIZE if we are
 195** confident the drivers really only touch the next physical
 196** page iff that driver instance owns it.
 197*/
 198static unsigned long iovp_size;
 199static unsigned long iovp_shift;
 200static unsigned long iovp_mask;
 201
 202struct ioc {
 203        void __iomem    *ioc_hpa;       /* I/O MMU base address */
 204        char            *res_map;       /* resource map, bit == pdir entry */
 205        u64             *pdir_base;     /* physical base address */
 206        unsigned long   ibase;          /* pdir IOV Space base */
 207        unsigned long   imask;          /* pdir IOV Space mask */
 208
 209        unsigned long   *res_hint;      /* next avail IOVP - circular search */
 210        unsigned long   dma_mask;
 211        spinlock_t      res_lock;       /* protects the resource bitmap, but must be held when */
 212                                        /* clearing pdir to prevent races with allocations. */
 213        unsigned int    res_bitshift;   /* from the RIGHT! */
 214        unsigned int    res_size;       /* size of resource map in bytes */
 215#ifdef CONFIG_NUMA
 216        unsigned int    node;           /* node where this IOC lives */
 217#endif
 218#if DELAYED_RESOURCE_CNT > 0
 219        spinlock_t      saved_lock;     /* may want to try to get this on a separate cacheline */
 220                                        /* than res_lock for bigger systems. */
 221        int             saved_cnt;
 222        struct sba_dma_pair {
 223                dma_addr_t      iova;
 224                size_t          size;
 225        } saved[DELAYED_RESOURCE_CNT];
 226#endif
 227
 228#ifdef PDIR_SEARCH_TIMING
 229#define SBA_SEARCH_SAMPLE       0x100
 230        unsigned long avg_search[SBA_SEARCH_SAMPLE];
 231        unsigned long avg_idx;  /* current index into avg_search */
 232#endif
 233
 234        /* Stuff we don't need in performance path */
 235        struct ioc      *next;          /* list of IOC's in system */
 236        acpi_handle     handle;         /* for multiple IOC's */
 237        const char      *name;
 238        unsigned int    func_id;
 239        unsigned int    rev;            /* HW revision of chip */
 240        u32             iov_size;
 241        unsigned int    pdir_size;      /* in bytes, determined by IOV Space size */
 242        struct pci_dev  *sac_only_dev;
 243};
 244
 245static struct ioc *ioc_list;
 246static int reserve_sba_gart = 1;
 247
 248static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
 249static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
 250
 251#define sba_sg_address(sg)      sg_virt((sg))
 252
 253#ifdef FULL_VALID_PDIR
 254static u64 prefetch_spill_page;
 255#endif
 256
 257#ifdef CONFIG_PCI
 258# define GET_IOC(dev)   (((dev)->bus == &pci_bus_type)                                          \
 259                         ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
 260#else
 261# define GET_IOC(dev)   NULL
 262#endif
 263
 264/*
 265** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
 266** (or rather not merge) DMAs into manageable chunks.
 267** On parisc, this is more of the software/tuning constraint
 268** rather than the HW. I/O MMU allocation algorithms can be
 269** faster with smaller sizes (to some degree).
 270*/
 271#define DMA_CHUNK_SIZE  (BITS_PER_LONG*iovp_size)
 272
 273#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
 274
 275/************************************
 276** SBA register read and write support
 277**
 278** BE WARNED: register writes are posted.
 279**  (ie follow writes which must reach HW with a read)
 280**
 281*/
 282#define READ_REG(addr)       __raw_readq(addr)
 283#define WRITE_REG(val, addr) __raw_writeq(val, addr)
 284
 285#ifdef DEBUG_SBA_INIT
 286
 287/**
 288 * sba_dump_tlb - debugging only - print IOMMU operating parameters
 289 * @hpa: base address of the IOMMU
 290 *
 291 * Print the size/location of the IO MMU PDIR.
 292 */
 293static void
 294sba_dump_tlb(char *hpa)
 295{
 296        DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
 297        DBG_INIT("IOC_IBASE    : %016lx\n", READ_REG(hpa+IOC_IBASE));
 298        DBG_INIT("IOC_IMASK    : %016lx\n", READ_REG(hpa+IOC_IMASK));
 299        DBG_INIT("IOC_TCNFG    : %016lx\n", READ_REG(hpa+IOC_TCNFG));
 300        DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
 301        DBG_INIT("\n");
 302}
 303#endif
 304
 305
 306#ifdef ASSERT_PDIR_SANITY
 307
 308/**
 309 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
 310 * @ioc: IO MMU structure which owns the pdir we are interested in.
 311 * @msg: text to print ont the output line.
 312 * @pide: pdir index.
 313 *
 314 * Print one entry of the IO MMU PDIR in human readable form.
 315 */
 316static void
 317sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
 318{
 319        /* start printing from lowest pde in rval */
 320        u64 *ptr = &ioc->pdir_base[pide  & ~(BITS_PER_LONG - 1)];
 321        unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
 322        uint rcnt;
 323
 324        printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
 325                 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
 326
 327        rcnt = 0;
 328        while (rcnt < BITS_PER_LONG) {
 329                printk(KERN_DEBUG "%s %2d %p %016Lx\n",
 330                       (rcnt == (pide & (BITS_PER_LONG - 1)))
 331                       ? "    -->" : "       ",
 332                       rcnt, ptr, (unsigned long long) *ptr );
 333                rcnt++;
 334                ptr++;
 335        }
 336        printk(KERN_DEBUG "%s", msg);
 337}
 338
 339
 340/**
 341 * sba_check_pdir - debugging only - consistency checker
 342 * @ioc: IO MMU structure which owns the pdir we are interested in.
 343 * @msg: text to print ont the output line.
 344 *
 345 * Verify the resource map and pdir state is consistent
 346 */
 347static int
 348sba_check_pdir(struct ioc *ioc, char *msg)
 349{
 350        u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
 351        u64 *rptr = (u64 *) ioc->res_map;       /* resource map ptr */
 352        u64 *pptr = ioc->pdir_base;     /* pdir ptr */
 353        uint pide = 0;
 354
 355        while (rptr < rptr_end) {
 356                u64 rval;
 357                int rcnt; /* number of bits we might check */
 358
 359                rval = *rptr;
 360                rcnt = 64;
 361
 362                while (rcnt) {
 363                        /* Get last byte and highest bit from that */
 364                        u32 pde = ((u32)((*pptr >> (63)) & 0x1));
 365                        if ((rval & 0x1) ^ pde)
 366                        {
 367                                /*
 368                                ** BUMMER!  -- res_map != pdir --
 369                                ** Dump rval and matching pdir entries
 370                                */
 371                                sba_dump_pdir_entry(ioc, msg, pide);
 372                                return(1);
 373                        }
 374                        rcnt--;
 375                        rval >>= 1;     /* try the next bit */
 376                        pptr++;
 377                        pide++;
 378                }
 379                rptr++; /* look at next word of res_map */
 380        }
 381        /* It'd be nice if we always got here :^) */
 382        return 0;
 383}
 384
 385
 386/**
 387 * sba_dump_sg - debugging only - print Scatter-Gather list
 388 * @ioc: IO MMU structure which owns the pdir we are interested in.
 389 * @startsg: head of the SG list
 390 * @nents: number of entries in SG list
 391 *
 392 * print the SG list so we can verify it's correct by hand.
 393 */
 394static void
 395sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
 396{
 397        while (nents-- > 0) {
 398                printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
 399                       startsg->dma_address, startsg->dma_length,
 400                       sba_sg_address(startsg));
 401                startsg = sg_next(startsg);
 402        }
 403}
 404
 405static void
 406sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
 407{
 408        struct scatterlist *the_sg = startsg;
 409        int the_nents = nents;
 410
 411        while (the_nents-- > 0) {
 412                if (sba_sg_address(the_sg) == 0x0UL)
 413                        sba_dump_sg(NULL, startsg, nents);
 414                the_sg = sg_next(the_sg);
 415        }
 416}
 417
 418#endif /* ASSERT_PDIR_SANITY */
 419
 420
 421
 422
 423/**************************************************************
 424*
 425*   I/O Pdir Resource Management
 426*
 427*   Bits set in the resource map are in use.
 428*   Each bit can represent a number of pages.
 429*   LSbs represent lower addresses (IOVA's).
 430*
 431***************************************************************/
 432#define PAGES_PER_RANGE 1       /* could increase this to 4 or 8 if needed */
 433
 434/* Convert from IOVP to IOVA and vice versa. */
 435#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
 436#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
 437
 438#define PDIR_ENTRY_SIZE sizeof(u64)
 439
 440#define PDIR_INDEX(iovp)   ((iovp)>>iovp_shift)
 441
 442#define RESMAP_MASK(n)    ~(~0UL << (n))
 443#define RESMAP_IDX_MASK   (sizeof(unsigned long) - 1)
 444
 445
 446/**
 447 * For most cases the normal get_order is sufficient, however it limits us
 448 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
 449 * It only incurs about 1 clock cycle to use this one with the static variable
 450 * and makes the code more intuitive.
 451 */
 452static SBA_INLINE int
 453get_iovp_order (unsigned long size)
 454{
 455        long double d = size - 1;
 456        long order;
 457
 458        order = ia64_getf_exp(d);
 459        order = order - iovp_shift - 0xffff + 1;
 460        if (order < 0)
 461                order = 0;
 462        return order;
 463}
 464
 465static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
 466                                 unsigned int bitshiftcnt)
 467{
 468        return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
 469                + bitshiftcnt;
 470}
 471
 472/**
 473 * sba_search_bitmap - find free space in IO PDIR resource bitmap
 474 * @ioc: IO MMU structure which owns the pdir we are interested in.
 475 * @bits_wanted: number of entries we need.
 476 * @use_hint: use res_hint to indicate where to start looking
 477 *
 478 * Find consecutive free bits in resource bitmap.
 479 * Each bit represents one entry in the IO Pdir.
 480 * Cool perf optimization: search for log2(size) bits at a time.
 481 */
 482static SBA_INLINE unsigned long
 483sba_search_bitmap(struct ioc *ioc, struct device *dev,
 484                  unsigned long bits_wanted, int use_hint)
 485{
 486        unsigned long *res_ptr;
 487        unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
 488        unsigned long flags, pide = ~0UL, tpide;
 489        unsigned long boundary_size;
 490        unsigned long shift;
 491        int ret;
 492
 493        ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
 494        ASSERT(res_ptr < res_end);
 495
 496        boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
 497        boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
 498
 499        BUG_ON(ioc->ibase & ~iovp_mask);
 500        shift = ioc->ibase >> iovp_shift;
 501
 502        spin_lock_irqsave(&ioc->res_lock, flags);
 503
 504        /* Allow caller to force a search through the entire resource space */
 505        if (likely(use_hint)) {
 506                res_ptr = ioc->res_hint;
 507        } else {
 508                res_ptr = (ulong *)ioc->res_map;
 509                ioc->res_bitshift = 0;
 510        }
 511
 512        /*
 513         * N.B.  REO/Grande defect AR2305 can cause TLB fetch timeouts
 514         * if a TLB entry is purged while in use.  sba_mark_invalid()
 515         * purges IOTLB entries in power-of-two sizes, so we also
 516         * allocate IOVA space in power-of-two sizes.
 517         */
 518        bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
 519
 520        if (likely(bits_wanted == 1)) {
 521                unsigned int bitshiftcnt;
 522                for(; res_ptr < res_end ; res_ptr++) {
 523                        if (likely(*res_ptr != ~0UL)) {
 524                                bitshiftcnt = ffz(*res_ptr);
 525                                *res_ptr |= (1UL << bitshiftcnt);
 526                                pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
 527                                ioc->res_bitshift = bitshiftcnt + bits_wanted;
 528                                goto found_it;
 529                        }
 530                }
 531                goto not_found;
 532
 533        }
 534        
 535        if (likely(bits_wanted <= BITS_PER_LONG/2)) {
 536                /*
 537                ** Search the resource bit map on well-aligned values.
 538                ** "o" is the alignment.
 539                ** We need the alignment to invalidate I/O TLB using
 540                ** SBA HW features in the unmap path.
 541                */
 542                unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
 543                uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
 544                unsigned long mask, base_mask;
 545
 546                base_mask = RESMAP_MASK(bits_wanted);
 547                mask = base_mask << bitshiftcnt;
 548
 549                DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
 550                for(; res_ptr < res_end ; res_ptr++)
 551                { 
 552                        DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);
 553                        ASSERT(0 != mask);
 554                        for (; mask ; mask <<= o, bitshiftcnt += o) {
 555                                tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
 556                                ret = iommu_is_span_boundary(tpide, bits_wanted,
 557                                                             shift,
 558                                                             boundary_size);
 559                                if ((0 == ((*res_ptr) & mask)) && !ret) {
 560                                        *res_ptr |= mask;     /* mark resources busy! */
 561                                        pide = tpide;
 562                                        ioc->res_bitshift = bitshiftcnt + bits_wanted;
 563                                        goto found_it;
 564                                }
 565                        }
 566
 567                        bitshiftcnt = 0;
 568                        mask = base_mask;
 569
 570                }
 571
 572        } else {
 573                int qwords, bits, i;
 574                unsigned long *end;
 575
 576                qwords = bits_wanted >> 6; /* /64 */
 577                bits = bits_wanted - (qwords * BITS_PER_LONG);
 578
 579                end = res_end - qwords;
 580
 581                for (; res_ptr < end; res_ptr++) {
 582                        tpide = ptr_to_pide(ioc, res_ptr, 0);
 583                        ret = iommu_is_span_boundary(tpide, bits_wanted,
 584                                                     shift, boundary_size);
 585                        if (ret)
 586                                goto next_ptr;
 587                        for (i = 0 ; i < qwords ; i++) {
 588                                if (res_ptr[i] != 0)
 589                                        goto next_ptr;
 590                        }
 591                        if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
 592                                continue;
 593
 594                        /* Found it, mark it */
 595                        for (i = 0 ; i < qwords ; i++)
 596                                res_ptr[i] = ~0UL;
 597                        res_ptr[i] |= RESMAP_MASK(bits);
 598
 599                        pide = tpide;
 600                        res_ptr += qwords;
 601                        ioc->res_bitshift = bits;
 602                        goto found_it;
 603next_ptr:
 604                        ;
 605                }
 606        }
 607
 608not_found:
 609        prefetch(ioc->res_map);
 610        ioc->res_hint = (unsigned long *) ioc->res_map;
 611        ioc->res_bitshift = 0;
 612        spin_unlock_irqrestore(&ioc->res_lock, flags);
 613        return (pide);
 614
 615found_it:
 616        ioc->res_hint = res_ptr;
 617        spin_unlock_irqrestore(&ioc->res_lock, flags);
 618        return (pide);
 619}
 620
 621
 622/**
 623 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
 624 * @ioc: IO MMU structure which owns the pdir we are interested in.
 625 * @size: number of bytes to create a mapping for
 626 *
 627 * Given a size, find consecutive unmarked and then mark those bits in the
 628 * resource bit map.
 629 */
 630static int
 631sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
 632{
 633        unsigned int pages_needed = size >> iovp_shift;
 634#ifdef PDIR_SEARCH_TIMING
 635        unsigned long itc_start;
 636#endif
 637        unsigned long pide;
 638
 639        ASSERT(pages_needed);
 640        ASSERT(0 == (size & ~iovp_mask));
 641
 642#ifdef PDIR_SEARCH_TIMING
 643        itc_start = ia64_get_itc();
 644#endif
 645        /*
 646        ** "seek and ye shall find"...praying never hurts either...
 647        */
 648        pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
 649        if (unlikely(pide >= (ioc->res_size << 3))) {
 650                pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
 651                if (unlikely(pide >= (ioc->res_size << 3))) {
 652#if DELAYED_RESOURCE_CNT > 0
 653                        unsigned long flags;
 654
 655                        /*
 656                        ** With delayed resource freeing, we can give this one more shot.  We're
 657                        ** getting close to being in trouble here, so do what we can to make this
 658                        ** one count.
 659                        */
 660                        spin_lock_irqsave(&ioc->saved_lock, flags);
 661                        if (ioc->saved_cnt > 0) {
 662                                struct sba_dma_pair *d;
 663                                int cnt = ioc->saved_cnt;
 664
 665                                d = &(ioc->saved[ioc->saved_cnt - 1]);
 666
 667                                spin_lock(&ioc->res_lock);
 668                                while (cnt--) {
 669                                        sba_mark_invalid(ioc, d->iova, d->size);
 670                                        sba_free_range(ioc, d->iova, d->size);
 671                                        d--;
 672                                }
 673                                ioc->saved_cnt = 0;
 674                                READ_REG(ioc->ioc_hpa+IOC_PCOM);        /* flush purges */
 675                                spin_unlock(&ioc->res_lock);
 676                        }
 677                        spin_unlock_irqrestore(&ioc->saved_lock, flags);
 678
 679                        pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
 680                        if (unlikely(pide >= (ioc->res_size << 3))) {
 681                                printk(KERN_WARNING "%s: I/O MMU @ %p is"
 682                                       "out of mapping resources, %u %u %lx\n",
 683                                       __func__, ioc->ioc_hpa, ioc->res_size,
 684                                       pages_needed, dma_get_seg_boundary(dev));
 685                                return -1;
 686                        }
 687#else
 688                        printk(KERN_WARNING "%s: I/O MMU @ %p is"
 689                               "out of mapping resources, %u %u %lx\n",
 690                               __func__, ioc->ioc_hpa, ioc->res_size,
 691                               pages_needed, dma_get_seg_boundary(dev));
 692                        return -1;
 693#endif
 694                }
 695        }
 696
 697#ifdef PDIR_SEARCH_TIMING
 698        ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
 699        ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
 700#endif
 701
 702        prefetchw(&(ioc->pdir_base[pide]));
 703
 704#ifdef ASSERT_PDIR_SANITY
 705        /* verify the first enable bit is clear */
 706        if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
 707                sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
 708        }
 709#endif
 710
 711        DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
 712                __func__, size, pages_needed, pide,
 713                (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
 714                ioc->res_bitshift );
 715
 716        return (pide);
 717}
 718
 719
 720/**
 721 * sba_free_range - unmark bits in IO PDIR resource bitmap
 722 * @ioc: IO MMU structure which owns the pdir we are interested in.
 723 * @iova: IO virtual address which was previously allocated.
 724 * @size: number of bytes to create a mapping for
 725 *
 726 * clear bits in the ioc's resource map
 727 */
 728static SBA_INLINE void
 729sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
 730{
 731        unsigned long iovp = SBA_IOVP(ioc, iova);
 732        unsigned int pide = PDIR_INDEX(iovp);
 733        unsigned int ridx = pide >> 3;  /* convert bit to byte address */
 734        unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
 735        int bits_not_wanted = size >> iovp_shift;
 736        unsigned long m;
 737
 738        /* Round up to power-of-two size: see AR2305 note above */
 739        bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
 740        for (; bits_not_wanted > 0 ; res_ptr++) {
 741                
 742                if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
 743
 744                        /* these mappings start 64bit aligned */
 745                        *res_ptr = 0UL;
 746                        bits_not_wanted -= BITS_PER_LONG;
 747                        pide += BITS_PER_LONG;
 748
 749                } else {
 750
 751                        /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
 752                        m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
 753                        bits_not_wanted = 0;
 754
 755                        DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
 756                                bits_not_wanted, m, pide, res_ptr, *res_ptr);
 757
 758                        ASSERT(m != 0);
 759                        ASSERT(bits_not_wanted);
 760                        ASSERT((*res_ptr & m) == m); /* verify same bits are set */
 761                        *res_ptr &= ~m;
 762                }
 763        }
 764}
 765
 766
 767/**************************************************************
 768*
 769*   "Dynamic DMA Mapping" support (aka "Coherent I/O")
 770*
 771***************************************************************/
 772
 773/**
 774 * sba_io_pdir_entry - fill in one IO PDIR entry
 775 * @pdir_ptr:  pointer to IO PDIR entry
 776 * @vba: Virtual CPU address of buffer to map
 777 *
 778 * SBA Mapping Routine
 779 *
 780 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
 781 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
 782 * Each IO Pdir entry consists of 8 bytes as shown below
 783 * (LSB == bit 0):
 784 *
 785 *  63                    40                                 11    7        0
 786 * +-+---------------------+----------------------------------+----+--------+
 787 * |V|        U            |            PPN[39:12]            | U  |   FF   |
 788 * +-+---------------------+----------------------------------+----+--------+
 789 *
 790 *  V  == Valid Bit
 791 *  U  == Unused
 792 * PPN == Physical Page Number
 793 *
 794 * The physical address fields are filled with the results of virt_to_phys()
 795 * on the vba.
 796 */
 797
 798#if 1
 799#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL)    \
 800                                                      | 0x8000000000000000ULL)
 801#else
 802void SBA_INLINE
 803sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
 804{
 805        *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
 806}
 807#endif
 808
 809#ifdef ENABLE_MARK_CLEAN
 810/**
 811 * Since DMA is i-cache coherent, any (complete) pages that were written via
 812 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
 813 * flush them when they get mapped into an executable vm-area.
 814 */
 815static void
 816mark_clean (void *addr, size_t size)
 817{
 818        unsigned long pg_addr, end;
 819
 820        pg_addr = PAGE_ALIGN((unsigned long) addr);
 821        end = (unsigned long) addr + size;
 822        while (pg_addr + PAGE_SIZE <= end) {
 823                struct page *page = virt_to_page((void *)pg_addr);
 824                set_bit(PG_arch_1, &page->flags);
 825                pg_addr += PAGE_SIZE;
 826        }
 827}
 828#endif
 829
 830/**
 831 * sba_mark_invalid - invalidate one or more IO PDIR entries
 832 * @ioc: IO MMU structure which owns the pdir we are interested in.
 833 * @iova:  IO Virtual Address mapped earlier
 834 * @byte_cnt:  number of bytes this mapping covers.
 835 *
 836 * Marking the IO PDIR entry(ies) as Invalid and invalidate
 837 * corresponding IO TLB entry. The PCOM (Purge Command Register)
 838 * is to purge stale entries in the IO TLB when unmapping entries.
 839 *
 840 * The PCOM register supports purging of multiple pages, with a minium
 841 * of 1 page and a maximum of 2GB. Hardware requires the address be
 842 * aligned to the size of the range being purged. The size of the range
 843 * must be a power of 2. The "Cool perf optimization" in the
 844 * allocation routine helps keep that true.
 845 */
 846static SBA_INLINE void
 847sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
 848{
 849        u32 iovp = (u32) SBA_IOVP(ioc,iova);
 850
 851        int off = PDIR_INDEX(iovp);
 852
 853        /* Must be non-zero and rounded up */
 854        ASSERT(byte_cnt > 0);
 855        ASSERT(0 == (byte_cnt & ~iovp_mask));
 856
 857#ifdef ASSERT_PDIR_SANITY
 858        /* Assert first pdir entry is set */
 859        if (!(ioc->pdir_base[off] >> 60)) {
 860                sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
 861        }
 862#endif
 863
 864        if (byte_cnt <= iovp_size)
 865        {
 866                ASSERT(off < ioc->pdir_size);
 867
 868                iovp |= iovp_shift;     /* set "size" field for PCOM */
 869
 870#ifndef FULL_VALID_PDIR
 871                /*
 872                ** clear I/O PDIR entry "valid" bit
 873                ** Do NOT clear the rest - save it for debugging.
 874                ** We should only clear bits that have previously
 875                ** been enabled.
 876                */
 877                ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
 878#else
 879                /*
 880                ** If we want to maintain the PDIR as valid, put in
 881                ** the spill page so devices prefetching won't
 882                ** cause a hard fail.
 883                */
 884                ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
 885#endif
 886        } else {
 887                u32 t = get_iovp_order(byte_cnt) + iovp_shift;
 888
 889                iovp |= t;
 890                ASSERT(t <= 31);   /* 2GB! Max value of "size" field */
 891
 892                do {
 893                        /* verify this pdir entry is enabled */
 894                        ASSERT(ioc->pdir_base[off]  >> 63);
 895#ifndef FULL_VALID_PDIR
 896                        /* clear I/O Pdir entry "valid" bit first */
 897                        ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
 898#else
 899                        ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
 900#endif
 901                        off++;
 902                        byte_cnt -= iovp_size;
 903                } while (byte_cnt > 0);
 904        }
 905
 906        WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
 907}
 908
 909/**
 910 * sba_map_single_attrs - map one buffer and return IOVA for DMA
 911 * @dev: instance of PCI owned by the driver that's asking.
 912 * @addr:  driver buffer to map.
 913 * @size:  number of bytes to map in driver buffer.
 914 * @dir:  R/W or both.
 915 * @attrs: optional dma attributes
 916 *
 917 * See Documentation/PCI/PCI-DMA-mapping.txt
 918 */
 919static dma_addr_t sba_map_page(struct device *dev, struct page *page,
 920                               unsigned long poff, size_t size,
 921                               enum dma_data_direction dir,
 922                               struct dma_attrs *attrs)
 923{
 924        struct ioc *ioc;
 925        void *addr = page_address(page) + poff;
 926        dma_addr_t iovp;
 927        dma_addr_t offset;
 928        u64 *pdir_start;
 929        int pide;
 930#ifdef ASSERT_PDIR_SANITY
 931        unsigned long flags;
 932#endif
 933#ifdef ALLOW_IOV_BYPASS
 934        unsigned long pci_addr = virt_to_phys(addr);
 935#endif
 936
 937#ifdef ALLOW_IOV_BYPASS
 938        ASSERT(to_pci_dev(dev)->dma_mask);
 939        /*
 940        ** Check if the PCI device can DMA to ptr... if so, just return ptr
 941        */
 942        if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
 943                /*
 944                ** Device is bit capable of DMA'ing to the buffer...
 945                ** just return the PCI address of ptr
 946                */
 947                DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
 948                           "0x%lx/0x%lx\n",
 949                           to_pci_dev(dev)->dma_mask, pci_addr);
 950                return pci_addr;
 951        }
 952#endif
 953        ioc = GET_IOC(dev);
 954        ASSERT(ioc);
 955
 956        prefetch(ioc->res_hint);
 957
 958        ASSERT(size > 0);
 959        ASSERT(size <= DMA_CHUNK_SIZE);
 960
 961        /* save offset bits */
 962        offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
 963
 964        /* round up to nearest iovp_size */
 965        size = (size + offset + ~iovp_mask) & iovp_mask;
 966
 967#ifdef ASSERT_PDIR_SANITY
 968        spin_lock_irqsave(&ioc->res_lock, flags);
 969        if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
 970                panic("Sanity check failed");
 971        spin_unlock_irqrestore(&ioc->res_lock, flags);
 972#endif
 973
 974        pide = sba_alloc_range(ioc, dev, size);
 975        if (pide < 0)
 976                return 0;
 977
 978        iovp = (dma_addr_t) pide << iovp_shift;
 979
 980        DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
 981
 982        pdir_start = &(ioc->pdir_base[pide]);
 983
 984        while (size > 0) {
 985                ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
 986                sba_io_pdir_entry(pdir_start, (unsigned long) addr);
 987
 988                DBG_RUN("     pdir 0x%p %lx\n", pdir_start, *pdir_start);
 989
 990                addr += iovp_size;
 991                size -= iovp_size;
 992                pdir_start++;
 993        }
 994        /* force pdir update */
 995        wmb();
 996
 997        /* form complete address */
 998#ifdef ASSERT_PDIR_SANITY
 999        spin_lock_irqsave(&ioc->res_lock, flags);
1000        sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
1001        spin_unlock_irqrestore(&ioc->res_lock, flags);
1002#endif
1003        return SBA_IOVA(ioc, iovp, offset);
1004}
1005
1006static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
1007                                       size_t size, enum dma_data_direction dir,
1008                                       struct dma_attrs *attrs)
1009{
1010        return sba_map_page(dev, virt_to_page(addr),
1011                            (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1012}
1013
1014#ifdef ENABLE_MARK_CLEAN
1015static SBA_INLINE void
1016sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1017{
1018        u32     iovp = (u32) SBA_IOVP(ioc,iova);
1019        int     off = PDIR_INDEX(iovp);
1020        void    *addr;
1021
1022        if (size <= iovp_size) {
1023                addr = phys_to_virt(ioc->pdir_base[off] &
1024                                    ~0xE000000000000FFFULL);
1025                mark_clean(addr, size);
1026        } else {
1027                do {
1028                        addr = phys_to_virt(ioc->pdir_base[off] &
1029                                            ~0xE000000000000FFFULL);
1030                        mark_clean(addr, min(size, iovp_size));
1031                        off++;
1032                        size -= iovp_size;
1033                } while (size > 0);
1034        }
1035}
1036#endif
1037
1038/**
1039 * sba_unmap_single_attrs - unmap one IOVA and free resources
1040 * @dev: instance of PCI owned by the driver that's asking.
1041 * @iova:  IOVA of driver buffer previously mapped.
1042 * @size:  number of bytes mapped in driver buffer.
1043 * @dir:  R/W or both.
1044 * @attrs: optional dma attributes
1045 *
1046 * See Documentation/PCI/PCI-DMA-mapping.txt
1047 */
1048static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1049                           enum dma_data_direction dir, struct dma_attrs *attrs)
1050{
1051        struct ioc *ioc;
1052#if DELAYED_RESOURCE_CNT > 0
1053        struct sba_dma_pair *d;
1054#endif
1055        unsigned long flags;
1056        dma_addr_t offset;
1057
1058        ioc = GET_IOC(dev);
1059        ASSERT(ioc);
1060
1061#ifdef ALLOW_IOV_BYPASS
1062        if (likely((iova & ioc->imask) != ioc->ibase)) {
1063                /*
1064                ** Address does not fall w/in IOVA, must be bypassing
1065                */
1066                DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
1067                           iova);
1068
1069#ifdef ENABLE_MARK_CLEAN
1070                if (dir == DMA_FROM_DEVICE) {
1071                        mark_clean(phys_to_virt(iova), size);
1072                }
1073#endif
1074                return;
1075        }
1076#endif
1077        offset = iova & ~iovp_mask;
1078
1079        DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1080
1081        iova ^= offset;        /* clear offset bits */
1082        size += offset;
1083        size = ROUNDUP(size, iovp_size);
1084
1085#ifdef ENABLE_MARK_CLEAN
1086        if (dir == DMA_FROM_DEVICE)
1087                sba_mark_clean(ioc, iova, size);
1088#endif
1089
1090#if DELAYED_RESOURCE_CNT > 0
1091        spin_lock_irqsave(&ioc->saved_lock, flags);
1092        d = &(ioc->saved[ioc->saved_cnt]);
1093        d->iova = iova;
1094        d->size = size;
1095        if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1096                int cnt = ioc->saved_cnt;
1097                spin_lock(&ioc->res_lock);
1098                while (cnt--) {
1099                        sba_mark_invalid(ioc, d->iova, d->size);
1100                        sba_free_range(ioc, d->iova, d->size);
1101                        d--;
1102                }
1103                ioc->saved_cnt = 0;
1104                READ_REG(ioc->ioc_hpa+IOC_PCOM);        /* flush purges */
1105                spin_unlock(&ioc->res_lock);
1106        }
1107        spin_unlock_irqrestore(&ioc->saved_lock, flags);
1108#else /* DELAYED_RESOURCE_CNT == 0 */
1109        spin_lock_irqsave(&ioc->res_lock, flags);
1110        sba_mark_invalid(ioc, iova, size);
1111        sba_free_range(ioc, iova, size);
1112        READ_REG(ioc->ioc_hpa+IOC_PCOM);        /* flush purges */
1113        spin_unlock_irqrestore(&ioc->res_lock, flags);
1114#endif /* DELAYED_RESOURCE_CNT == 0 */
1115}
1116
1117void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1118                            enum dma_data_direction dir, struct dma_attrs *attrs)
1119{
1120        sba_unmap_page(dev, iova, size, dir, attrs);
1121}
1122
1123/**
1124 * sba_alloc_coherent - allocate/map shared mem for DMA
1125 * @dev: instance of PCI owned by the driver that's asking.
1126 * @size:  number of bytes mapped in driver buffer.
1127 * @dma_handle:  IOVA of new buffer.
1128 *
1129 * See Documentation/PCI/PCI-DMA-mapping.txt
1130 */
1131static void *
1132sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
1133{
1134        struct ioc *ioc;
1135        void *addr;
1136
1137        ioc = GET_IOC(dev);
1138        ASSERT(ioc);
1139
1140#ifdef CONFIG_NUMA
1141        {
1142                struct page *page;
1143                page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
1144                                        numa_node_id() : ioc->node, flags,
1145                                        get_order(size));
1146
1147                if (unlikely(!page))
1148                        return NULL;
1149
1150                addr = page_address(page);
1151        }
1152#else
1153        addr = (void *) __get_free_pages(flags, get_order(size));
1154#endif
1155        if (unlikely(!addr))
1156                return NULL;
1157
1158        memset(addr, 0, size);
1159        *dma_handle = virt_to_phys(addr);
1160
1161#ifdef ALLOW_IOV_BYPASS
1162        ASSERT(dev->coherent_dma_mask);
1163        /*
1164        ** Check if the PCI device can DMA to ptr... if so, just return ptr
1165        */
1166        if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1167                DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1168                           dev->coherent_dma_mask, *dma_handle);
1169
1170                return addr;
1171        }
1172#endif
1173
1174        /*
1175         * If device can't bypass or bypass is disabled, pass the 32bit fake
1176         * device to map single to get an iova mapping.
1177         */
1178        *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1179                                           size, 0, NULL);
1180
1181        return addr;
1182}
1183
1184
1185/**
1186 * sba_free_coherent - free/unmap shared mem for DMA
1187 * @dev: instance of PCI owned by the driver that's asking.
1188 * @size:  number of bytes mapped in driver buffer.
1189 * @vaddr:  virtual address IOVA of "consistent" buffer.
1190 * @dma_handler:  IO virtual address of "consistent" buffer.
1191 *
1192 * See Documentation/PCI/PCI-DMA-mapping.txt
1193 */
1194static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
1195                               dma_addr_t dma_handle)
1196{
1197        sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1198        free_pages((unsigned long) vaddr, get_order(size));
1199}
1200
1201
1202/*
1203** Since 0 is a valid pdir_base index value, can't use that
1204** to determine if a value is valid or not. Use a flag to indicate
1205** the SG list entry contains a valid pdir index.
1206*/
1207#define PIDE_FLAG 0x1UL
1208
1209#ifdef DEBUG_LARGE_SG_ENTRIES
1210int dump_run_sg = 0;
1211#endif
1212
1213
1214/**
1215 * sba_fill_pdir - write allocated SG entries into IO PDIR
1216 * @ioc: IO MMU structure which owns the pdir we are interested in.
1217 * @startsg:  list of IOVA/size pairs
1218 * @nents: number of entries in startsg list
1219 *
1220 * Take preprocessed SG list and write corresponding entries
1221 * in the IO PDIR.
1222 */
1223
1224static SBA_INLINE int
1225sba_fill_pdir(
1226        struct ioc *ioc,
1227        struct scatterlist *startsg,
1228        int nents)
1229{
1230        struct scatterlist *dma_sg = startsg;   /* pointer to current DMA */
1231        int n_mappings = 0;
1232        u64 *pdirp = NULL;
1233        unsigned long dma_offset = 0;
1234
1235        while (nents-- > 0) {
1236                int     cnt = startsg->dma_length;
1237                startsg->dma_length = 0;
1238
1239#ifdef DEBUG_LARGE_SG_ENTRIES
1240                if (dump_run_sg)
1241                        printk(" %2d : %08lx/%05x %p\n",
1242                                nents, startsg->dma_address, cnt,
1243                                sba_sg_address(startsg));
1244#else
1245                DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1246                                nents, startsg->dma_address, cnt,
1247                                sba_sg_address(startsg));
1248#endif
1249                /*
1250                ** Look for the start of a new DMA stream
1251                */
1252                if (startsg->dma_address & PIDE_FLAG) {
1253                        u32 pide = startsg->dma_address & ~PIDE_FLAG;
1254                        dma_offset = (unsigned long) pide & ~iovp_mask;
1255                        startsg->dma_address = 0;
1256                        if (n_mappings)
1257                                dma_sg = sg_next(dma_sg);
1258                        dma_sg->dma_address = pide | ioc->ibase;
1259                        pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1260                        n_mappings++;
1261                }
1262
1263                /*
1264                ** Look for a VCONTIG chunk
1265                */
1266                if (cnt) {
1267                        unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1268                        ASSERT(pdirp);
1269
1270                        /* Since multiple Vcontig blocks could make up
1271                        ** one DMA stream, *add* cnt to dma_len.
1272                        */
1273                        dma_sg->dma_length += cnt;
1274                        cnt += dma_offset;
1275                        dma_offset=0;   /* only want offset on first chunk */
1276                        cnt = ROUNDUP(cnt, iovp_size);
1277                        do {
1278                                sba_io_pdir_entry(pdirp, vaddr);
1279                                vaddr += iovp_size;
1280                                cnt -= iovp_size;
1281                                pdirp++;
1282                        } while (cnt > 0);
1283                }
1284                startsg = sg_next(startsg);
1285        }
1286        /* force pdir update */
1287        wmb();
1288
1289#ifdef DEBUG_LARGE_SG_ENTRIES
1290        dump_run_sg = 0;
1291#endif
1292        return(n_mappings);
1293}
1294
1295
1296/*
1297** Two address ranges are DMA contiguous *iff* "end of prev" and
1298** "start of next" are both on an IOV page boundary.
1299**
1300** (shift left is a quick trick to mask off upper bits)
1301*/
1302#define DMA_CONTIG(__X, __Y) \
1303        (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1304
1305
1306/**
1307 * sba_coalesce_chunks - preprocess the SG list
1308 * @ioc: IO MMU structure which owns the pdir we are interested in.
1309 * @startsg:  list of IOVA/size pairs
1310 * @nents: number of entries in startsg list
1311 *
1312 * First pass is to walk the SG list and determine where the breaks are
1313 * in the DMA stream. Allocates PDIR entries but does not fill them.
1314 * Returns the number of DMA chunks.
1315 *
1316 * Doing the fill separate from the coalescing/allocation keeps the
1317 * code simpler. Future enhancement could make one pass through
1318 * the sglist do both.
1319 */
1320static SBA_INLINE int
1321sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1322        struct scatterlist *startsg,
1323        int nents)
1324{
1325        struct scatterlist *vcontig_sg;    /* VCONTIG chunk head */
1326        unsigned long vcontig_len;         /* len of VCONTIG chunk */
1327        unsigned long vcontig_end;
1328        struct scatterlist *dma_sg;        /* next DMA stream head */
1329        unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1330        int n_mappings = 0;
1331        unsigned int max_seg_size = dma_get_max_seg_size(dev);
1332        int idx;
1333
1334        while (nents > 0) {
1335                unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1336
1337                /*
1338                ** Prepare for first/next DMA stream
1339                */
1340                dma_sg = vcontig_sg = startsg;
1341                dma_len = vcontig_len = vcontig_end = startsg->length;
1342                vcontig_end +=  vaddr;
1343                dma_offset = vaddr & ~iovp_mask;
1344
1345                /* PARANOID: clear entries */
1346                startsg->dma_address = startsg->dma_length = 0;
1347
1348                /*
1349                ** This loop terminates one iteration "early" since
1350                ** it's always looking one "ahead".
1351                */
1352                while (--nents > 0) {
1353                        unsigned long vaddr;    /* tmp */
1354
1355                        startsg = sg_next(startsg);
1356
1357                        /* PARANOID */
1358                        startsg->dma_address = startsg->dma_length = 0;
1359
1360                        /* catch brokenness in SCSI layer */
1361                        ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1362
1363                        /*
1364                        ** First make sure current dma stream won't
1365                        ** exceed DMA_CHUNK_SIZE if we coalesce the
1366                        ** next entry.
1367                        */
1368                        if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1369                            > DMA_CHUNK_SIZE)
1370                                break;
1371
1372                        if (dma_len + startsg->length > max_seg_size)
1373                                break;
1374
1375                        /*
1376                        ** Then look for virtually contiguous blocks.
1377                        **
1378                        ** append the next transaction?
1379                        */
1380                        vaddr = (unsigned long) sba_sg_address(startsg);
1381                        if  (vcontig_end == vaddr)
1382                        {
1383                                vcontig_len += startsg->length;
1384                                vcontig_end += startsg->length;
1385                                dma_len     += startsg->length;
1386                                continue;
1387                        }
1388
1389#ifdef DEBUG_LARGE_SG_ENTRIES
1390                        dump_run_sg = (vcontig_len > iovp_size);
1391#endif
1392
1393                        /*
1394                        ** Not virtually contiguous.
1395                        ** Terminate prev chunk.
1396                        ** Start a new chunk.
1397                        **
1398                        ** Once we start a new VCONTIG chunk, dma_offset
1399                        ** can't change. And we need the offset from the first
1400                        ** chunk - not the last one. Ergo Successive chunks
1401                        ** must start on page boundaries and dove tail
1402                        ** with it's predecessor.
1403                        */
1404                        vcontig_sg->dma_length = vcontig_len;
1405
1406                        vcontig_sg = startsg;
1407                        vcontig_len = startsg->length;
1408
1409                        /*
1410                        ** 3) do the entries end/start on page boundaries?
1411                        **    Don't update vcontig_end until we've checked.
1412                        */
1413                        if (DMA_CONTIG(vcontig_end, vaddr))
1414                        {
1415                                vcontig_end = vcontig_len + vaddr;
1416                                dma_len += vcontig_len;
1417                                continue;
1418                        } else {
1419                                break;
1420                        }
1421                }
1422
1423                /*
1424                ** End of DMA Stream
1425                ** Terminate last VCONTIG block.
1426                ** Allocate space for DMA stream.
1427                */
1428                vcontig_sg->dma_length = vcontig_len;
1429                dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1430                ASSERT(dma_len <= DMA_CHUNK_SIZE);
1431                idx = sba_alloc_range(ioc, dev, dma_len);
1432                if (idx < 0) {
1433                        dma_sg->dma_length = 0;
1434                        return -1;
1435                }
1436                dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1437                                                   | dma_offset);
1438                n_mappings++;
1439        }
1440
1441        return n_mappings;
1442}
1443
1444static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1445                               int nents, enum dma_data_direction dir,
1446                               struct dma_attrs *attrs);
1447/**
1448 * sba_map_sg - map Scatter/Gather list
1449 * @dev: instance of PCI owned by the driver that's asking.
1450 * @sglist:  array of buffer/length pairs
1451 * @nents:  number of entries in list
1452 * @dir:  R/W or both.
1453 * @attrs: optional dma attributes
1454 *
1455 * See Documentation/PCI/PCI-DMA-mapping.txt
1456 */
1457static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1458                            int nents, enum dma_data_direction dir,
1459                            struct dma_attrs *attrs)
1460{
1461        struct ioc *ioc;
1462        int coalesced, filled = 0;
1463#ifdef ASSERT_PDIR_SANITY
1464        unsigned long flags;
1465#endif
1466#ifdef ALLOW_IOV_BYPASS_SG
1467        struct scatterlist *sg;
1468#endif
1469
1470        DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1471        ioc = GET_IOC(dev);
1472        ASSERT(ioc);
1473
1474#ifdef ALLOW_IOV_BYPASS_SG
1475        ASSERT(to_pci_dev(dev)->dma_mask);
1476        if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1477                for_each_sg(sglist, sg, nents, filled) {
1478                        sg->dma_length = sg->length;
1479                        sg->dma_address = virt_to_phys(sba_sg_address(sg));
1480                }
1481                return filled;
1482        }
1483#endif
1484        /* Fast path single entry scatterlists. */
1485        if (nents == 1) {
1486                sglist->dma_length = sglist->length;
1487                sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
1488                return 1;
1489        }
1490
1491#ifdef ASSERT_PDIR_SANITY
1492        spin_lock_irqsave(&ioc->res_lock, flags);
1493        if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1494        {
1495                sba_dump_sg(ioc, sglist, nents);
1496                panic("Check before sba_map_sg_attrs()");
1497        }
1498        spin_unlock_irqrestore(&ioc->res_lock, flags);
1499#endif
1500
1501        prefetch(ioc->res_hint);
1502
1503        /*
1504        ** First coalesce the chunks and allocate I/O pdir space
1505        **
1506        ** If this is one DMA stream, we can properly map using the
1507        ** correct virtual address associated with each DMA page.
1508        ** w/o this association, we wouldn't have coherent DMA!
1509        ** Access to the virtual address is what forces a two pass algorithm.
1510        */
1511        coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1512        if (coalesced < 0) {
1513                sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1514                return 0;
1515        }
1516
1517        /*
1518        ** Program the I/O Pdir
1519        **
1520        ** map the virtual addresses to the I/O Pdir
1521        ** o dma_address will contain the pdir index
1522        ** o dma_len will contain the number of bytes to map
1523        ** o address contains the virtual address.
1524        */
1525        filled = sba_fill_pdir(ioc, sglist, nents);
1526
1527#ifdef ASSERT_PDIR_SANITY
1528        spin_lock_irqsave(&ioc->res_lock, flags);
1529        if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1530        {
1531                sba_dump_sg(ioc, sglist, nents);
1532                panic("Check after sba_map_sg_attrs()\n");
1533        }
1534        spin_unlock_irqrestore(&ioc->res_lock, flags);
1535#endif
1536
1537        ASSERT(coalesced == filled);
1538        DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1539
1540        return filled;
1541}
1542
1543/**
1544 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1545 * @dev: instance of PCI owned by the driver that's asking.
1546 * @sglist:  array of buffer/length pairs
1547 * @nents:  number of entries in list
1548 * @dir:  R/W or both.
1549 * @attrs: optional dma attributes
1550 *
1551 * See Documentation/PCI/PCI-DMA-mapping.txt
1552 */
1553static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1554                               int nents, enum dma_data_direction dir,
1555                               struct dma_attrs *attrs)
1556{
1557#ifdef ASSERT_PDIR_SANITY
1558        struct ioc *ioc;
1559        unsigned long flags;
1560#endif
1561
1562        DBG_RUN_SG("%s() START %d entries,  %p,%x\n",
1563                   __func__, nents, sba_sg_address(sglist), sglist->length);
1564
1565#ifdef ASSERT_PDIR_SANITY
1566        ioc = GET_IOC(dev);
1567        ASSERT(ioc);
1568
1569        spin_lock_irqsave(&ioc->res_lock, flags);
1570        sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1571        spin_unlock_irqrestore(&ioc->res_lock, flags);
1572#endif
1573
1574        while (nents && sglist->dma_length) {
1575
1576                sba_unmap_single_attrs(dev, sglist->dma_address,
1577                                       sglist->dma_length, dir, attrs);
1578                sglist = sg_next(sglist);
1579                nents--;
1580        }
1581
1582        DBG_RUN_SG("%s() DONE (nents %d)\n", __func__,  nents);
1583
1584#ifdef ASSERT_PDIR_SANITY
1585        spin_lock_irqsave(&ioc->res_lock, flags);
1586        sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1587        spin_unlock_irqrestore(&ioc->res_lock, flags);
1588#endif
1589
1590}
1591
1592/**************************************************************
1593*
1594*   Initialization and claim
1595*
1596***************************************************************/
1597
1598static void __init
1599ioc_iova_init(struct ioc *ioc)
1600{
1601        int tcnfg;
1602        int agp_found = 0;
1603        struct pci_dev *device = NULL;
1604#ifdef FULL_VALID_PDIR
1605        unsigned long index;
1606#endif
1607
1608        /*
1609        ** Firmware programs the base and size of a "safe IOVA space"
1610        ** (one that doesn't overlap memory or LMMIO space) in the
1611        ** IBASE and IMASK registers.
1612        */
1613        ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1614        ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1615
1616        ioc->iov_size = ~ioc->imask + 1;
1617
1618        DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1619                __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1620                ioc->iov_size >> 20);
1621
1622        switch (iovp_size) {
1623                case  4*1024: tcnfg = 0; break;
1624                case  8*1024: tcnfg = 1; break;
1625                case 16*1024: tcnfg = 2; break;
1626                case 64*1024: tcnfg = 3; break;
1627                default:
1628                        panic(PFX "Unsupported IOTLB page size %ldK",
1629                                iovp_size >> 10);
1630                        break;
1631        }
1632        WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1633
1634        ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1635        ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1636                                                   get_order(ioc->pdir_size));
1637        if (!ioc->pdir_base)
1638                panic(PFX "Couldn't allocate I/O Page Table\n");
1639
1640        memset(ioc->pdir_base, 0, ioc->pdir_size);
1641
1642        DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1643                iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1644
1645        ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1646        WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1647
1648        /*
1649        ** If an AGP device is present, only use half of the IOV space
1650        ** for PCI DMA.  Unfortunately we can't know ahead of time
1651        ** whether GART support will actually be used, for now we
1652        ** can just key on an AGP device found in the system.
1653        ** We program the next pdir index after we stop w/ a key for
1654        ** the GART code to handshake on.
1655        */
1656        for_each_pci_dev(device)        
1657                agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1658
1659        if (agp_found && reserve_sba_gart) {
1660                printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1661                      ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1662                ioc->pdir_size /= 2;
1663                ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1664        }
1665#ifdef FULL_VALID_PDIR
1666        /*
1667        ** Check to see if the spill page has been allocated, we don't need more than
1668        ** one across multiple SBAs.
1669        */
1670        if (!prefetch_spill_page) {
1671                char *spill_poison = "SBAIOMMU POISON";
1672                int poison_size = 16;
1673                void *poison_addr, *addr;
1674
1675                addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1676                if (!addr)
1677                        panic(PFX "Couldn't allocate PDIR spill page\n");
1678
1679                poison_addr = addr;
1680                for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1681                        memcpy(poison_addr, spill_poison, poison_size);
1682
1683                prefetch_spill_page = virt_to_phys(addr);
1684
1685                DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1686        }
1687        /*
1688        ** Set all the PDIR entries valid w/ the spill page as the target
1689        */
1690        for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1691                ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1692#endif
1693
1694        /* Clear I/O TLB of any possible entries */
1695        WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1696        READ_REG(ioc->ioc_hpa + IOC_PCOM);
1697
1698        /* Enable IOVA translation */
1699        WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1700        READ_REG(ioc->ioc_hpa + IOC_IBASE);
1701}
1702
1703static void __init
1704ioc_resource_init(struct ioc *ioc)
1705{
1706        spin_lock_init(&ioc->res_lock);
1707#if DELAYED_RESOURCE_CNT > 0
1708        spin_lock_init(&ioc->saved_lock);
1709#endif
1710
1711        /* resource map size dictated by pdir_size */
1712        ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1713        ioc->res_size >>= 3;  /* convert bit count to byte count */
1714        DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1715
1716        ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1717                                                 get_order(ioc->res_size));
1718        if (!ioc->res_map)
1719                panic(PFX "Couldn't allocate resource map\n");
1720
1721        memset(ioc->res_map, 0, ioc->res_size);
1722        /* next available IOVP - circular search */
1723        ioc->res_hint = (unsigned long *) ioc->res_map;
1724
1725#ifdef ASSERT_PDIR_SANITY
1726        /* Mark first bit busy - ie no IOVA 0 */
1727        ioc->res_map[0] = 0x1;
1728        ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1729#endif
1730#ifdef FULL_VALID_PDIR
1731        /* Mark the last resource used so we don't prefetch beyond IOVA space */
1732        ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1733        ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1734                                                              | prefetch_spill_page);
1735#endif
1736
1737        DBG_INIT("%s() res_map %x %p\n", __func__,
1738                 ioc->res_size, (void *) ioc->res_map);
1739}
1740
1741static void __init
1742ioc_sac_init(struct ioc *ioc)
1743{
1744        struct pci_dev *sac = NULL;
1745        struct pci_controller *controller = NULL;
1746
1747        /*
1748         * pci_alloc_coherent() must return a DMA address which is
1749         * SAC (single address cycle) addressable, so allocate a
1750         * pseudo-device to enforce that.
1751         */
1752        sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1753        if (!sac)
1754                panic(PFX "Couldn't allocate struct pci_dev");
1755
1756        controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1757        if (!controller)
1758                panic(PFX "Couldn't allocate struct pci_controller");
1759
1760        controller->iommu = ioc;
1761        sac->sysdata = controller;
1762        sac->dma_mask = 0xFFFFFFFFUL;
1763#ifdef CONFIG_PCI
1764        sac->dev.bus = &pci_bus_type;
1765#endif
1766        ioc->sac_only_dev = sac;
1767}
1768
1769static void __init
1770ioc_zx1_init(struct ioc *ioc)
1771{
1772        unsigned long rope_config;
1773        unsigned int i;
1774
1775        if (ioc->rev < 0x20)
1776                panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1777
1778        /* 38 bit memory controller + extra bit for range displaced by MMIO */
1779        ioc->dma_mask = (0x1UL << 39) - 1;
1780
1781        /*
1782        ** Clear ROPE(N)_CONFIG AO bit.
1783        ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1784        ** Overrides bit 1 in DMA Hint Sets.
1785        ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1786        */
1787        for (i=0; i<(8*8); i+=8) {
1788                rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1789                rope_config &= ~IOC_ROPE_AO;
1790                WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1791        }
1792}
1793
1794typedef void (initfunc)(struct ioc *);
1795
1796struct ioc_iommu {
1797        u32 func_id;
1798        char *name;
1799        initfunc *init;
1800};
1801
1802static struct ioc_iommu ioc_iommu_info[] __initdata = {
1803        { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1804        { ZX2_IOC_ID, "zx2", NULL },
1805        { SX1000_IOC_ID, "sx1000", NULL },
1806        { SX2000_IOC_ID, "sx2000", NULL },
1807};
1808
1809static struct ioc * __init
1810ioc_init(unsigned long hpa, void *handle)
1811{
1812        struct ioc *ioc;
1813        struct ioc_iommu *info;
1814
1815        ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1816        if (!ioc)
1817                return NULL;
1818
1819        ioc->next = ioc_list;
1820        ioc_list = ioc;
1821
1822        ioc->handle = handle;
1823        ioc->ioc_hpa = ioremap(hpa, 0x1000);
1824
1825        ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1826        ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1827        ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL;   /* conservative */
1828
1829        for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1830                if (ioc->func_id == info->func_id) {
1831                        ioc->name = info->name;
1832                        if (info->init)
1833                                (info->init)(ioc);
1834                }
1835        }
1836
1837        iovp_size = (1 << iovp_shift);
1838        iovp_mask = ~(iovp_size - 1);
1839
1840        DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1841                PAGE_SIZE >> 10, iovp_size >> 10);
1842
1843        if (!ioc->name) {
1844                ioc->name = kmalloc(24, GFP_KERNEL);
1845                if (ioc->name)
1846                        sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1847                                ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1848                else
1849                        ioc->name = "Unknown";
1850        }
1851
1852        ioc_iova_init(ioc);
1853        ioc_resource_init(ioc);
1854        ioc_sac_init(ioc);
1855
1856        if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
1857                ia64_max_iommu_merge_mask = ~iovp_mask;
1858
1859        printk(KERN_INFO PFX
1860                "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1861                ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1862                hpa, ioc->iov_size >> 20, ioc->ibase);
1863
1864        return ioc;
1865}
1866
1867
1868
1869/**************************************************************************
1870**
1871**   SBA initialization code (HW and SW)
1872**
1873**   o identify SBA chip itself
1874**   o FIXME: initialize DMA hints for reasonable defaults
1875**
1876**************************************************************************/
1877
1878#ifdef CONFIG_PROC_FS
1879static void *
1880ioc_start(struct seq_file *s, loff_t *pos)
1881{
1882        struct ioc *ioc;
1883        loff_t n = *pos;
1884
1885        for (ioc = ioc_list; ioc; ioc = ioc->next)
1886                if (!n--)
1887                        return ioc;
1888
1889        return NULL;
1890}
1891
1892static void *
1893ioc_next(struct seq_file *s, void *v, loff_t *pos)
1894{
1895        struct ioc *ioc = v;
1896
1897        ++*pos;
1898        return ioc->next;
1899}
1900
1901static void
1902ioc_stop(struct seq_file *s, void *v)
1903{
1904}
1905
1906static int
1907ioc_show(struct seq_file *s, void *v)
1908{
1909        struct ioc *ioc = v;
1910        unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1911        int i, used = 0;
1912
1913        seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1914                ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1915#ifdef CONFIG_NUMA
1916        if (ioc->node != MAX_NUMNODES)
1917                seq_printf(s, "NUMA node       : %d\n", ioc->node);
1918#endif
1919        seq_printf(s, "IOVA size       : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1920        seq_printf(s, "IOVA page size  : %ld kb\n", iovp_size/1024);
1921
1922        for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1923                used += hweight64(*res_ptr);
1924
1925        seq_printf(s, "PDIR size       : %d entries\n", ioc->pdir_size >> 3);
1926        seq_printf(s, "PDIR used       : %d entries\n", used);
1927
1928#ifdef PDIR_SEARCH_TIMING
1929        {
1930                unsigned long i = 0, avg = 0, min, max;
1931                min = max = ioc->avg_search[0];
1932                for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1933                        avg += ioc->avg_search[i];
1934                        if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1935                        if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1936                }
1937                avg /= SBA_SEARCH_SAMPLE;
1938                seq_printf(s, "Bitmap search   : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1939                           min, avg, max);
1940        }
1941#endif
1942#ifndef ALLOW_IOV_BYPASS
1943         seq_printf(s, "IOVA bypass disabled\n");
1944#endif
1945        return 0;
1946}
1947
1948static const struct seq_operations ioc_seq_ops = {
1949        .start = ioc_start,
1950        .next  = ioc_next,
1951        .stop  = ioc_stop,
1952        .show  = ioc_show
1953};
1954
1955static int
1956ioc_open(struct inode *inode, struct file *file)
1957{
1958        return seq_open(file, &ioc_seq_ops);
1959}
1960
1961static const struct file_operations ioc_fops = {
1962        .open    = ioc_open,
1963        .read    = seq_read,
1964        .llseek  = seq_lseek,
1965        .release = seq_release
1966};
1967
1968static void __init
1969ioc_proc_init(void)
1970{
1971        struct proc_dir_entry *dir;
1972
1973        dir = proc_mkdir("bus/mckinley", NULL);
1974        if (!dir)
1975                return;
1976
1977        proc_create(ioc_list->name, 0, dir, &ioc_fops);
1978}
1979#endif
1980
1981static void
1982sba_connect_bus(struct pci_bus *bus)
1983{
1984        acpi_handle handle, parent;
1985        acpi_status status;
1986        struct ioc *ioc;
1987
1988        if (!PCI_CONTROLLER(bus))
1989                panic(PFX "no sysdata on bus %d!\n", bus->number);
1990
1991        if (PCI_CONTROLLER(bus)->iommu)
1992                return;
1993
1994        handle = PCI_CONTROLLER(bus)->acpi_handle;
1995        if (!handle)
1996                return;
1997
1998        /*
1999         * The IOC scope encloses PCI root bridges in the ACPI
2000         * namespace, so work our way out until we find an IOC we
2001         * claimed previously.
2002         */
2003        do {
2004                for (ioc = ioc_list; ioc; ioc = ioc->next)
2005                        if (ioc->handle == handle) {
2006                                PCI_CONTROLLER(bus)->iommu = ioc;
2007                                return;
2008                        }
2009
2010                status = acpi_get_parent(handle, &parent);
2011                handle = parent;
2012        } while (ACPI_SUCCESS(status));
2013
2014        printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
2015}
2016
2017#ifdef CONFIG_NUMA
2018static void __init
2019sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2020{
2021        unsigned int node;
2022        int pxm;
2023
2024        ioc->node = MAX_NUMNODES;
2025
2026        pxm = acpi_get_pxm(handle);
2027
2028        if (pxm < 0)
2029                return;
2030
2031        node = pxm_to_node(pxm);
2032
2033        if (node >= MAX_NUMNODES || !node_online(node))
2034                return;
2035
2036        ioc->node = node;
2037        return;
2038}
2039#else
2040#define sba_map_ioc_to_node(ioc, handle)
2041#endif
2042
2043static int __init
2044acpi_sba_ioc_add(struct acpi_device *device)
2045{
2046        struct ioc *ioc;
2047        acpi_status status;
2048        u64 hpa, length;
2049        struct acpi_device_info *adi;
2050
2051        status = hp_acpi_csr_space(device->handle, &hpa, &length);
2052        if (ACPI_FAILURE(status))
2053                return 1;
2054
2055        status = acpi_get_object_info(device->handle, &adi);
2056        if (ACPI_FAILURE(status))
2057                return 1;
2058
2059        /*
2060         * For HWP0001, only SBA appears in ACPI namespace.  It encloses the PCI
2061         * root bridges, and its CSR space includes the IOC function.
2062         */
2063        if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
2064                hpa += ZX1_IOC_OFFSET;
2065                /* zx1 based systems default to kernel page size iommu pages */
2066                if (!iovp_shift)
2067                        iovp_shift = min(PAGE_SHIFT, 16);
2068        }
2069        kfree(adi);
2070
2071        /*
2072         * default anything not caught above or specified on cmdline to 4k
2073         * iommu page size
2074         */
2075        if (!iovp_shift)
2076                iovp_shift = 12;
2077
2078        ioc = ioc_init(hpa, device->handle);
2079        if (!ioc)
2080                return 1;
2081
2082        /* setup NUMA node association */
2083        sba_map_ioc_to_node(ioc, device->handle);
2084        return 0;
2085}
2086
2087static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2088        {"HWP0001", 0},
2089        {"HWP0004", 0},
2090        {"", 0},
2091};
2092static struct acpi_driver acpi_sba_ioc_driver = {
2093        .name           = "IOC IOMMU Driver",
2094        .ids            = hp_ioc_iommu_device_ids,
2095        .ops            = {
2096                .add    = acpi_sba_ioc_add,
2097        },
2098};
2099
2100extern struct dma_map_ops swiotlb_dma_ops;
2101
2102static int __init
2103sba_init(void)
2104{
2105        if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2106                return 0;
2107
2108#if defined(CONFIG_IA64_GENERIC)
2109        /* If we are booting a kdump kernel, the sba_iommu will
2110         * cause devices that were not shutdown properly to MCA
2111         * as soon as they are turned back on.  Our only option for
2112         * a successful kdump kernel boot is to use the swiotlb.
2113         */
2114        if (is_kdump_kernel()) {
2115                dma_ops = &swiotlb_dma_ops;
2116                if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2117                        panic("Unable to initialize software I/O TLB:"
2118                                  " Try machvec=dig boot option");
2119                machvec_init("dig");
2120                return 0;
2121        }
2122#endif
2123
2124        acpi_bus_register_driver(&acpi_sba_ioc_driver);
2125        if (!ioc_list) {
2126#ifdef CONFIG_IA64_GENERIC
2127                /*
2128                 * If we didn't find something sba_iommu can claim, we
2129                 * need to setup the swiotlb and switch to the dig machvec.
2130                 */
2131                dma_ops = &swiotlb_dma_ops;
2132                if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2133                        panic("Unable to find SBA IOMMU or initialize "
2134                              "software I/O TLB: Try machvec=dig boot option");
2135                machvec_init("dig");
2136#else
2137                panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2138#endif
2139                return 0;
2140        }
2141
2142#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2143        /*
2144         * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2145         * buffer setup to support devices with smaller DMA masks than
2146         * sba_iommu can handle.
2147         */
2148        if (ia64_platform_is("hpzx1_swiotlb")) {
2149                extern void hwsw_init(void);
2150
2151                hwsw_init();
2152        }
2153#endif
2154
2155#ifdef CONFIG_PCI
2156        {
2157                struct pci_bus *b = NULL;
2158                while ((b = pci_find_next_bus(b)) != NULL)
2159                        sba_connect_bus(b);
2160        }
2161#endif
2162
2163#ifdef CONFIG_PROC_FS
2164        ioc_proc_init();
2165#endif
2166        return 0;
2167}
2168
2169subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2170
2171static int __init
2172nosbagart(char *str)
2173{
2174        reserve_sba_gart = 0;
2175        return 1;
2176}
2177
2178static int sba_dma_supported (struct device *dev, u64 mask)
2179{
2180        /* make sure it's at least 32bit capable */
2181        return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2182}
2183
2184static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2185{
2186        return 0;
2187}
2188
2189__setup("nosbagart", nosbagart);
2190
2191static int __init
2192sba_page_override(char *str)
2193{
2194        unsigned long page_size;
2195
2196        page_size = memparse(str, &str);
2197        switch (page_size) {
2198                case 4096:
2199                case 8192:
2200                case 16384:
2201                case 65536:
2202                        iovp_shift = ffs(page_size) - 1;
2203                        break;
2204                default:
2205                        printk("%s: unknown/unsupported iommu page size %ld\n",
2206                               __func__, page_size);
2207        }
2208
2209        return 1;
2210}
2211
2212__setup("sbapagesize=",sba_page_override);
2213
2214struct dma_map_ops sba_dma_ops = {
2215        .alloc_coherent         = sba_alloc_coherent,
2216        .free_coherent          = sba_free_coherent,
2217        .map_page               = sba_map_page,
2218        .unmap_page             = sba_unmap_page,
2219        .map_sg                 = sba_map_sg_attrs,
2220        .unmap_sg               = sba_unmap_sg_attrs,
2221        .sync_single_for_cpu    = machvec_dma_sync_single,
2222        .sync_sg_for_cpu        = machvec_dma_sync_sg,
2223        .sync_single_for_device = machvec_dma_sync_single,
2224        .sync_sg_for_device     = machvec_dma_sync_sg,
2225        .dma_supported          = sba_dma_supported,
2226        .mapping_error          = sba_dma_mapping_error,
2227};
2228
2229void sba_dma_init(void)
2230{
2231        dma_ops = &sba_dma_ops;
2232}
2233