linux/arch/ia64/sn/pci/tioca_provider.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2003-2005 Silicon Graphics, Inc.  All Rights Reserved.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/interrupt.h>
  11#include <linux/pci.h>
  12#include <asm/sn/sn_sal.h>
  13#include <asm/sn/addrs.h>
  14#include <asm/sn/io.h>
  15#include <asm/sn/pcidev.h>
  16#include <asm/sn/pcibus_provider_defs.h>
  17#include <asm/sn/tioca_provider.h>
  18
  19u32 tioca_gart_found;
  20EXPORT_SYMBOL(tioca_gart_found);        /* used by agp-sgi */
  21
  22LIST_HEAD(tioca_list);
  23EXPORT_SYMBOL(tioca_list);      /* used by agp-sgi */
  24
  25static int tioca_gart_init(struct tioca_kernel *);
  26
  27/**
  28 * tioca_gart_init - Initialize SGI TIOCA GART
  29 * @tioca_common: ptr to common prom/kernel struct identifying the 
  30 *
  31 * If the indicated tioca has devices present, initialize its associated
  32 * GART MMR's and kernel memory.
  33 */
  34static int
  35tioca_gart_init(struct tioca_kernel *tioca_kern)
  36{
  37        u64 ap_reg;
  38        u64 offset;
  39        struct page *tmp;
  40        struct tioca_common *tioca_common;
  41        struct tioca __iomem *ca_base;
  42
  43        tioca_common = tioca_kern->ca_common;
  44        ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
  45
  46        if (list_empty(tioca_kern->ca_devices))
  47                return 0;
  48
  49        ap_reg = 0;
  50
  51        /*
  52         * Validate aperature size
  53         */
  54
  55        switch (CA_APERATURE_SIZE >> 20) {
  56        case 4:
  57                ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT);      /* 4MB */
  58                break;
  59        case 8:
  60                ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT);      /* 8MB */
  61                break;
  62        case 16:
  63                ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT);      /* 16MB */
  64                break;
  65        case 32:
  66                ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT);      /* 32 MB */
  67                break;
  68        case 64:
  69                ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT);      /* 64 MB */
  70                break;
  71        case 128:
  72                ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT);      /* 128 MB */
  73                break;
  74        case 256:
  75                ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT);      /* 256 MB */
  76                break;
  77        case 512:
  78                ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT);      /* 512 MB */
  79                break;
  80        case 1024:
  81                ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT);      /* 1GB */
  82                break;
  83        case 2048:
  84                ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT);      /* 2GB */
  85                break;
  86        case 4096:
  87                ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT);      /* 4 GB */
  88                break;
  89        default:
  90                printk(KERN_ERR "%s:  Invalid CA_APERATURE_SIZE "
  91                       "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
  92                return -1;
  93        }
  94
  95        /*
  96         * Set up other aperature parameters
  97         */
  98
  99        if (PAGE_SIZE >= 16384) {
 100                tioca_kern->ca_ap_pagesize = 16384;
 101                ap_reg |= CA_GART_PAGE_SIZE;
 102        } else {
 103                tioca_kern->ca_ap_pagesize = 4096;
 104        }
 105
 106        tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
 107        tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
 108        tioca_kern->ca_gart_entries =
 109            tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
 110
 111        ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
 112        ap_reg |= tioca_kern->ca_ap_bus_base;
 113
 114        /*
 115         * Allocate and set up the GART
 116         */
 117
 118        tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
 119        tmp =
 120            alloc_pages_node(tioca_kern->ca_closest_node,
 121                             GFP_KERNEL | __GFP_ZERO,
 122                             get_order(tioca_kern->ca_gart_size));
 123
 124        if (!tmp) {
 125                printk(KERN_ERR "%s:  Could not allocate "
 126                       "%llu bytes (order %d) for GART\n",
 127                       __func__,
 128                       tioca_kern->ca_gart_size,
 129                       get_order(tioca_kern->ca_gart_size));
 130                return -ENOMEM;
 131        }
 132
 133        tioca_kern->ca_gart = page_address(tmp);
 134        tioca_kern->ca_gart_coretalk_addr =
 135            PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
 136
 137        /*
 138         * Compute PCI/AGP convenience fields 
 139         */
 140
 141        offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
 142        tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
 143        tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
 144        tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
 145        tioca_kern->ca_pcigart_base =
 146            tioca_kern->ca_gart_coretalk_addr + offset;
 147        tioca_kern->ca_pcigart =
 148            &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
 149        tioca_kern->ca_pcigart_entries =
 150            tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
 151        tioca_kern->ca_pcigart_pagemap =
 152            kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
 153        if (!tioca_kern->ca_pcigart_pagemap) {
 154                free_pages((unsigned long)tioca_kern->ca_gart,
 155                           get_order(tioca_kern->ca_gart_size));
 156                return -1;
 157        }
 158
 159        offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
 160        tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
 161        tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
 162        tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
 163        tioca_kern->ca_gfxgart_base =
 164            tioca_kern->ca_gart_coretalk_addr + offset;
 165        tioca_kern->ca_gfxgart =
 166            &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
 167        tioca_kern->ca_gfxgart_entries =
 168            tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
 169
 170        /*
 171         * various control settings:
 172         *      use agp op-combining
 173         *      use GET semantics to fetch memory
 174         *      participate in coherency domain
 175         *      DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
 176         */
 177
 178        __sn_setq_relaxed(&ca_base->ca_control1,
 179                        CA_AGPDMA_OP_ENB_COMBDELAY);    /* PV895469 ? */
 180        __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
 181        __sn_setq_relaxed(&ca_base->ca_control2,
 182                        (0x2ull << CA_GART_MEM_PARAM_SHFT));
 183        tioca_kern->ca_gart_iscoherent = 1;
 184        __sn_clrq_relaxed(&ca_base->ca_control2,
 185                        (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
 186
 187        /*
 188         * Unmask GART fetch error interrupts.  Clear residual errors first.
 189         */
 190
 191        writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
 192        writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
 193        __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
 194
 195        /*
 196         * Program the aperature and gart registers in TIOCA
 197         */
 198
 199        writeq(ap_reg, &ca_base->ca_gart_aperature);
 200        writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
 201
 202        return 0;
 203}
 204
 205/**
 206 * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
 207 * @tioca_kernel: structure representing the CA
 208 *
 209 * Given a CA, scan all attached functions making sure they all support
 210 * FastWrite.  If so, enable FastWrite for all functions and the CA itself.
 211 */
 212
 213void
 214tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
 215{
 216        int cap_ptr;
 217        u32 reg;
 218        struct tioca __iomem *tioca_base;
 219        struct pci_dev *pdev;
 220        struct tioca_common *common;
 221
 222        common = tioca_kern->ca_common;
 223
 224        /*
 225         * Scan all vga controllers on this bus making sure they all
 226         * support FW.  If not, return.
 227         */
 228
 229        list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
 230                if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
 231                        continue;
 232
 233                cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
 234                if (!cap_ptr)
 235                        return; /* no AGP CAP means no FW */
 236
 237                pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg);
 238                if (!(reg & PCI_AGP_STATUS_FW))
 239                        return; /* function doesn't support FW */
 240        }
 241
 242        /*
 243         * Set fw for all vga fn's
 244         */
 245
 246        list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
 247                if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
 248                        continue;
 249
 250                cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
 251                pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg);
 252                reg |= PCI_AGP_COMMAND_FW;
 253                pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
 254        }
 255
 256        /*
 257         * Set ca's fw to match
 258         */
 259
 260        tioca_base = (struct tioca __iomem*)common->ca_common.bs_base;
 261        __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
 262}
 263
 264EXPORT_SYMBOL(tioca_fastwrite_enable);  /* used by agp-sgi */
 265
 266/**
 267 * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
 268 * @paddr: system physical address
 269 *
 270 * Map @paddr into 64-bit CA bus space.  No device context is necessary.
 271 * Bits 53:0 come from the coretalk address.  We just need to mask in the
 272 * following optional bits of the 64-bit pci address:
 273 *
 274 * 63:60 - Coretalk Packet Type -  0x1 for Mem Get/Put (coherent)
 275 *                                 0x2 for PIO (non-coherent)
 276 *                                 We will always use 0x1
 277 * 55:55 - Swap bytes              Currently unused
 278 */
 279static u64
 280tioca_dma_d64(unsigned long paddr)
 281{
 282        dma_addr_t bus_addr;
 283
 284        bus_addr = PHYS_TO_TIODMA(paddr);
 285
 286        BUG_ON(!bus_addr);
 287        BUG_ON(bus_addr >> 54);
 288
 289        /* Set upper nibble to Cache Coherent Memory op */
 290        bus_addr |= (1UL << 60);
 291
 292        return bus_addr;
 293}
 294
 295/**
 296 * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
 297 * @pdev: linux pci_dev representing the function
 298 * @paddr: system physical address
 299 *
 300 * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
 301 *
 302 * The CA agp 48 bit direct address falls out as follows:
 303 *
 304 * When direct mapping AGP addresses, the 48 bit AGP address is
 305 * constructed as follows:
 306 *
 307 * [47:40] - Low 8 bits of the page Node ID extracted from coretalk
 308 *              address [47:40].  The upper 8 node bits are fixed
 309 *              and come from the xxx register bits [5:0]
 310 * [39:38] - Chiplet ID extracted from coretalk address [39:38]
 311 * [37:00] - node offset extracted from coretalk address [37:00]
 312 * 
 313 * Since the node id in general will be non-zero, and the chiplet id
 314 * will always be non-zero, it follows that the device must support
 315 * a dma mask of at least 0xffffffffff (40 bits) to target node 0
 316 * and in general should be 0xffffffffffff (48 bits) to target nodes
 317 * up to 255.  Nodes above 255 need the support of the xxx register,
 318 * and so a given CA can only directly target nodes in the range
 319 * xxx - xxx+255.
 320 */
 321static u64
 322tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
 323{
 324        struct tioca_common *tioca_common;
 325        struct tioca __iomem *ca_base;
 326        u64 ct_addr;
 327        dma_addr_t bus_addr;
 328        u32 node_upper;
 329        u64 agp_dma_extn;
 330        struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
 331
 332        tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
 333        ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
 334
 335        ct_addr = PHYS_TO_TIODMA(paddr);
 336        if (!ct_addr)
 337                return 0;
 338
 339        bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL);
 340        node_upper = ct_addr >> 48;
 341
 342        if (node_upper > 64) {
 343                printk(KERN_ERR "%s:  coretalk addr 0x%p node id out "
 344                       "of range\n", __func__, (void *)ct_addr);
 345                return 0;
 346        }
 347
 348        agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
 349        if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
 350                printk(KERN_ERR "%s:  coretalk upper node (%u) "
 351                       "mismatch with ca_agp_dma_addr_extn (%llu)\n",
 352                       __func__,
 353                       node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
 354                return 0;
 355        }
 356
 357        return bus_addr;
 358}
 359
 360/**
 361 * tioca_dma_mapped - create a DMA mapping using a CA GART 
 362 * @pdev: linux pci_dev representing the function
 363 * @paddr: host physical address to map
 364 * @req_size: len (bytes) to map
 365 *
 366 * Map @paddr into CA address space using the GART mechanism.  The mapped
 367 * dma_addr_t is guaranteed to be contiguous in CA bus space.
 368 */
 369static dma_addr_t
 370tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
 371{
 372        int i, ps, ps_shift, entry, entries, mapsize, last_entry;
 373        u64 xio_addr, end_xio_addr;
 374        struct tioca_common *tioca_common;
 375        struct tioca_kernel *tioca_kern;
 376        dma_addr_t bus_addr = 0;
 377        struct tioca_dmamap *ca_dmamap;
 378        void *map;
 379        unsigned long flags;
 380        struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
 381
 382        tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
 383        tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
 384
 385        xio_addr = PHYS_TO_TIODMA(paddr);
 386        if (!xio_addr)
 387                return 0;
 388
 389        spin_lock_irqsave(&tioca_kern->ca_lock, flags);
 390
 391        /*
 392         * allocate a map struct
 393         */
 394
 395        ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
 396        if (!ca_dmamap)
 397                goto map_return;
 398
 399        /*
 400         * Locate free entries that can hold req_size.  Account for
 401         * unaligned start/length when allocating.
 402         */
 403
 404        ps = tioca_kern->ca_ap_pagesize;        /* will be power of 2 */
 405        ps_shift = ffs(ps) - 1;
 406        end_xio_addr = xio_addr + req_size - 1;
 407
 408        entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
 409
 410        map = tioca_kern->ca_pcigart_pagemap;
 411        mapsize = tioca_kern->ca_pcigart_entries;
 412
 413        entry = find_first_zero_bit(map, mapsize);
 414        while (entry < mapsize) {
 415                last_entry = find_next_bit(map, mapsize, entry);
 416
 417                if (last_entry - entry >= entries)
 418                        break;
 419
 420                entry = find_next_zero_bit(map, mapsize, last_entry);
 421        }
 422
 423        if (entry > mapsize) {
 424                kfree(ca_dmamap);
 425                goto map_return;
 426        }
 427
 428        for (i = 0; i < entries; i++)
 429                set_bit(entry + i, map);
 430
 431        bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
 432
 433        ca_dmamap->cad_dma_addr = bus_addr;
 434        ca_dmamap->cad_gart_size = entries;
 435        ca_dmamap->cad_gart_entry = entry;
 436        list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
 437
 438        if (xio_addr % ps) {
 439                tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
 440                bus_addr += xio_addr & (ps - 1);
 441                xio_addr &= ~(ps - 1);
 442                xio_addr += ps;
 443                entry++;
 444        }
 445
 446        while (xio_addr < end_xio_addr) {
 447                tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
 448                xio_addr += ps;
 449                entry++;
 450        }
 451
 452        tioca_tlbflush(tioca_kern);
 453
 454map_return:
 455        spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
 456        return bus_addr;
 457}
 458
 459/**
 460 * tioca_dma_unmap - release CA mapping resources
 461 * @pdev: linux pci_dev representing the function
 462 * @bus_addr: bus address returned by an earlier tioca_dma_map
 463 * @dir: mapping direction (unused)
 464 *
 465 * Locate mapping resources associated with @bus_addr and release them.
 466 * For mappings created using the direct modes (64 or 48) there are no
 467 * resources to release.
 468 */
 469static void
 470tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
 471{
 472        int i, entry;
 473        struct tioca_common *tioca_common;
 474        struct tioca_kernel *tioca_kern;
 475        struct tioca_dmamap *map;
 476        struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
 477        unsigned long flags;
 478
 479        tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
 480        tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
 481
 482        /* return straight away if this isn't be a mapped address */
 483
 484        if (bus_addr < tioca_kern->ca_pciap_base ||
 485            bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
 486                return;
 487
 488        spin_lock_irqsave(&tioca_kern->ca_lock, flags);
 489
 490        list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
 491            if (map->cad_dma_addr == bus_addr)
 492                break;
 493
 494        BUG_ON(map == NULL);
 495
 496        entry = map->cad_gart_entry;
 497
 498        for (i = 0; i < map->cad_gart_size; i++, entry++) {
 499                clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
 500                tioca_kern->ca_pcigart[entry] = 0;
 501        }
 502        tioca_tlbflush(tioca_kern);
 503
 504        list_del(&map->cad_list);
 505        spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
 506        kfree(map);
 507}
 508
 509/**
 510 * tioca_dma_map - map pages for PCI DMA
 511 * @pdev: linux pci_dev representing the function
 512 * @paddr: host physical address to map
 513 * @byte_count: bytes to map
 514 *
 515 * This is the main wrapper for mapping host physical pages to CA PCI space.
 516 * The mapping mode used is based on the devices dma_mask.  As a last resort
 517 * use the GART mapped mode.
 518 */
 519static u64
 520tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
 521{
 522        u64 mapaddr;
 523
 524        /*
 525         * Not supported for now ...
 526         */
 527        if (dma_flags & SN_DMA_MSI)
 528                return 0;
 529
 530        /*
 531         * If card is 64 or 48 bit addressable, use a direct mapping.  32
 532         * bit direct is so restrictive w.r.t. where the memory resides that
 533         * we don't use it even though CA has some support.
 534         */
 535
 536        if (pdev->dma_mask == ~0UL)
 537                mapaddr = tioca_dma_d64(paddr);
 538        else if (pdev->dma_mask == 0xffffffffffffUL)
 539                mapaddr = tioca_dma_d48(pdev, paddr);
 540        else
 541                mapaddr = 0;
 542
 543        /* Last resort ... use PCI portion of CA GART */
 544
 545        if (mapaddr == 0)
 546                mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
 547
 548        return mapaddr;
 549}
 550
 551/**
 552 * tioca_error_intr_handler - SGI TIO CA error interrupt handler
 553 * @irq: unused
 554 * @arg: pointer to tioca_common struct for the given CA
 555 *
 556 * Handle a CA error interrupt.  Simply a wrapper around a SAL call which
 557 * defers processing to the SGI prom.
 558 */
 559static irqreturn_t
 560tioca_error_intr_handler(int irq, void *arg)
 561{
 562        struct tioca_common *soft = arg;
 563        struct ia64_sal_retval ret_stuff;
 564        u64 segment;
 565        u64 busnum;
 566        ret_stuff.status = 0;
 567        ret_stuff.v0 = 0;
 568
 569        segment = soft->ca_common.bs_persist_segment;
 570        busnum = soft->ca_common.bs_persist_busnum;
 571
 572        SAL_CALL_NOLOCK(ret_stuff,
 573                        (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
 574                        segment, busnum, 0, 0, 0, 0, 0);
 575
 576        return IRQ_HANDLED;
 577}
 578
 579/**
 580 * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
 581 * @prom_bussoft: Common prom/kernel struct representing the bus
 582 *
 583 * Replicates the tioca_common pointed to by @prom_bussoft in kernel
 584 * space.  Allocates and initializes a kernel-only area for a given CA,
 585 * and sets up an irq for handling CA error interrupts.
 586 *
 587 * On successful setup, returns the kernel version of tioca_common back to
 588 * the caller.
 589 */
 590static void *
 591tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
 592{
 593        struct tioca_common *tioca_common;
 594        struct tioca_kernel *tioca_kern;
 595        struct pci_bus *bus;
 596
 597        /* sanity check prom rev */
 598
 599        if (is_shub1() && sn_sal_rev() < 0x0406) {
 600                printk
 601                    (KERN_ERR "%s:  SGI prom rev 4.06 or greater required "
 602                     "for tioca support\n", __func__);
 603                return NULL;
 604        }
 605
 606        /*
 607         * Allocate kernel bus soft and copy from prom.
 608         */
 609
 610        tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
 611        if (!tioca_common)
 612                return NULL;
 613
 614        memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
 615        tioca_common->ca_common.bs_base = (unsigned long)
 616                ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
 617                        sizeof(struct tioca_common));
 618
 619        /* init kernel-private area */
 620
 621        tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
 622        if (!tioca_kern) {
 623                kfree(tioca_common);
 624                return NULL;
 625        }
 626
 627        tioca_kern->ca_common = tioca_common;
 628        spin_lock_init(&tioca_kern->ca_lock);
 629        INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
 630        tioca_kern->ca_closest_node =
 631            nasid_to_cnodeid(tioca_common->ca_closest_nasid);
 632        tioca_common->ca_kernel_private = (u64) tioca_kern;
 633
 634        bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
 635                tioca_common->ca_common.bs_persist_busnum);
 636        BUG_ON(!bus);
 637        tioca_kern->ca_devices = &bus->devices;
 638
 639        /* init GART */
 640
 641        if (tioca_gart_init(tioca_kern) < 0) {
 642                kfree(tioca_kern);
 643                kfree(tioca_common);
 644                return NULL;
 645        }
 646
 647        tioca_gart_found++;
 648        list_add(&tioca_kern->ca_list, &tioca_list);
 649
 650        if (request_irq(SGI_TIOCA_ERROR,
 651                        tioca_error_intr_handler,
 652                        IRQF_SHARED, "TIOCA error", (void *)tioca_common))
 653                printk(KERN_WARNING
 654                       "%s:  Unable to get irq %d.  "
 655                       "Error interrupts won't be routed for TIOCA bus %d\n",
 656                       __func__, SGI_TIOCA_ERROR,
 657                       (int)tioca_common->ca_common.bs_persist_busnum);
 658
 659        sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
 660
 661        /* Setup locality information */
 662        controller->node = tioca_kern->ca_closest_node;
 663        return tioca_common;
 664}
 665
 666static struct sn_pcibus_provider tioca_pci_interfaces = {
 667        .dma_map = tioca_dma_map,
 668        .dma_map_consistent = tioca_dma_map,
 669        .dma_unmap = tioca_dma_unmap,
 670        .bus_fixup = tioca_bus_fixup,
 671        .force_interrupt = NULL,
 672        .target_interrupt = NULL
 673};
 674
 675/**
 676 * tioca_init_provider - init SN PCI provider ops for TIO CA
 677 */
 678int
 679tioca_init_provider(void)
 680{
 681        sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;
 682        return 0;
 683}
 684