linux/arch/sparc/kernel/pci_sun4v.c
<<
>>
Prefs
   1/* pci_sun4v.c: SUN4V specific PCI controller support.
   2 *
   3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/pci.h>
   9#include <linux/init.h>
  10#include <linux/slab.h>
  11#include <linux/interrupt.h>
  12#include <linux/percpu.h>
  13#include <linux/irq.h>
  14#include <linux/msi.h>
  15#include <linux/log2.h>
  16#include <linux/of_device.h>
  17
  18#include <asm/iommu.h>
  19#include <asm/irq.h>
  20#include <asm/hypervisor.h>
  21#include <asm/prom.h>
  22
  23#include "pci_impl.h"
  24#include "iommu_common.h"
  25
  26#include "pci_sun4v.h"
  27
  28#define DRIVER_NAME     "pci_sun4v"
  29#define PFX             DRIVER_NAME ": "
  30
  31static unsigned long vpci_major = 1;
  32static unsigned long vpci_minor = 1;
  33
  34#define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
  35
  36struct iommu_batch {
  37        struct device   *dev;           /* Device mapping is for.       */
  38        unsigned long   prot;           /* IOMMU page protections       */
  39        unsigned long   entry;          /* Index into IOTSB.            */
  40        u64             *pglist;        /* List of physical pages       */
  41        unsigned long   npages;         /* Number of pages in list.     */
  42};
  43
  44static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  45static int iommu_batch_initialized;
  46
  47/* Interrupts must be disabled.  */
  48static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  49{
  50        struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  51
  52        p->dev          = dev;
  53        p->prot         = prot;
  54        p->entry        = entry;
  55        p->npages       = 0;
  56}
  57
  58/* Interrupts must be disabled.  */
  59static long iommu_batch_flush(struct iommu_batch *p)
  60{
  61        struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
  62        unsigned long devhandle = pbm->devhandle;
  63        unsigned long prot = p->prot;
  64        unsigned long entry = p->entry;
  65        u64 *pglist = p->pglist;
  66        unsigned long npages = p->npages;
  67
  68        while (npages != 0) {
  69                long num;
  70
  71                num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  72                                          npages, prot, __pa(pglist));
  73                if (unlikely(num < 0)) {
  74                        if (printk_ratelimit())
  75                                printk("iommu_batch_flush: IOMMU map of "
  76                                       "[%08lx:%08llx:%lx:%lx:%lx] failed with "
  77                                       "status %ld\n",
  78                                       devhandle, HV_PCI_TSBID(0, entry),
  79                                       npages, prot, __pa(pglist), num);
  80                        return -1;
  81                }
  82
  83                entry += num;
  84                npages -= num;
  85                pglist += num;
  86        }
  87
  88        p->entry = entry;
  89        p->npages = 0;
  90
  91        return 0;
  92}
  93
  94static inline void iommu_batch_new_entry(unsigned long entry)
  95{
  96        struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  97
  98        if (p->entry + p->npages == entry)
  99                return;
 100        if (p->entry != ~0UL)
 101                iommu_batch_flush(p);
 102        p->entry = entry;
 103}
 104
 105/* Interrupts must be disabled.  */
 106static inline long iommu_batch_add(u64 phys_page)
 107{
 108        struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 109
 110        BUG_ON(p->npages >= PGLIST_NENTS);
 111
 112        p->pglist[p->npages++] = phys_page;
 113        if (p->npages == PGLIST_NENTS)
 114                return iommu_batch_flush(p);
 115
 116        return 0;
 117}
 118
 119/* Interrupts must be disabled.  */
 120static inline long iommu_batch_end(void)
 121{
 122        struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 123
 124        BUG_ON(p->npages >= PGLIST_NENTS);
 125
 126        return iommu_batch_flush(p);
 127}
 128
 129static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 130                                   dma_addr_t *dma_addrp, gfp_t gfp)
 131{
 132        unsigned long flags, order, first_page, npages, n;
 133        struct iommu *iommu;
 134        struct page *page;
 135        void *ret;
 136        long entry;
 137        int nid;
 138
 139        size = IO_PAGE_ALIGN(size);
 140        order = get_order(size);
 141        if (unlikely(order >= MAX_ORDER))
 142                return NULL;
 143
 144        npages = size >> IO_PAGE_SHIFT;
 145
 146        nid = dev->archdata.numa_node;
 147        page = alloc_pages_node(nid, gfp, order);
 148        if (unlikely(!page))
 149                return NULL;
 150
 151        first_page = (unsigned long) page_address(page);
 152        memset((char *)first_page, 0, PAGE_SIZE << order);
 153
 154        iommu = dev->archdata.iommu;
 155
 156        spin_lock_irqsave(&iommu->lock, flags);
 157        entry = iommu_range_alloc(dev, iommu, npages, NULL);
 158        spin_unlock_irqrestore(&iommu->lock, flags);
 159
 160        if (unlikely(entry == DMA_ERROR_CODE))
 161                goto range_alloc_fail;
 162
 163        *dma_addrp = (iommu->page_table_map_base +
 164                      (entry << IO_PAGE_SHIFT));
 165        ret = (void *) first_page;
 166        first_page = __pa(first_page);
 167
 168        local_irq_save(flags);
 169
 170        iommu_batch_start(dev,
 171                          (HV_PCI_MAP_ATTR_READ |
 172                           HV_PCI_MAP_ATTR_WRITE),
 173                          entry);
 174
 175        for (n = 0; n < npages; n++) {
 176                long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
 177                if (unlikely(err < 0L))
 178                        goto iommu_map_fail;
 179        }
 180
 181        if (unlikely(iommu_batch_end() < 0L))
 182                goto iommu_map_fail;
 183
 184        local_irq_restore(flags);
 185
 186        return ret;
 187
 188iommu_map_fail:
 189        /* Interrupts are disabled.  */
 190        spin_lock(&iommu->lock);
 191        iommu_range_free(iommu, *dma_addrp, npages);
 192        spin_unlock_irqrestore(&iommu->lock, flags);
 193
 194range_alloc_fail:
 195        free_pages(first_page, order);
 196        return NULL;
 197}
 198
 199static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 200                                 dma_addr_t dvma)
 201{
 202        struct pci_pbm_info *pbm;
 203        struct iommu *iommu;
 204        unsigned long flags, order, npages, entry;
 205        u32 devhandle;
 206
 207        npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 208        iommu = dev->archdata.iommu;
 209        pbm = dev->archdata.host_controller;
 210        devhandle = pbm->devhandle;
 211        entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 212
 213        spin_lock_irqsave(&iommu->lock, flags);
 214
 215        iommu_range_free(iommu, dvma, npages);
 216
 217        do {
 218                unsigned long num;
 219
 220                num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 221                                            npages);
 222                entry += num;
 223                npages -= num;
 224        } while (npages != 0);
 225
 226        spin_unlock_irqrestore(&iommu->lock, flags);
 227
 228        order = get_order(size);
 229        if (order < 10)
 230                free_pages((unsigned long)cpu, order);
 231}
 232
 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 234                                  unsigned long offset, size_t sz,
 235                                  enum dma_data_direction direction,
 236                                  struct dma_attrs *attrs)
 237{
 238        struct iommu *iommu;
 239        unsigned long flags, npages, oaddr;
 240        unsigned long i, base_paddr;
 241        u32 bus_addr, ret;
 242        unsigned long prot;
 243        long entry;
 244
 245        iommu = dev->archdata.iommu;
 246
 247        if (unlikely(direction == DMA_NONE))
 248                goto bad;
 249
 250        oaddr = (unsigned long)(page_address(page) + offset);
 251        npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 252        npages >>= IO_PAGE_SHIFT;
 253
 254        spin_lock_irqsave(&iommu->lock, flags);
 255        entry = iommu_range_alloc(dev, iommu, npages, NULL);
 256        spin_unlock_irqrestore(&iommu->lock, flags);
 257
 258        if (unlikely(entry == DMA_ERROR_CODE))
 259                goto bad;
 260
 261        bus_addr = (iommu->page_table_map_base +
 262                    (entry << IO_PAGE_SHIFT));
 263        ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 264        base_paddr = __pa(oaddr & IO_PAGE_MASK);
 265        prot = HV_PCI_MAP_ATTR_READ;
 266        if (direction != DMA_TO_DEVICE)
 267                prot |= HV_PCI_MAP_ATTR_WRITE;
 268
 269        local_irq_save(flags);
 270
 271        iommu_batch_start(dev, prot, entry);
 272
 273        for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
 274                long err = iommu_batch_add(base_paddr);
 275                if (unlikely(err < 0L))
 276                        goto iommu_map_fail;
 277        }
 278        if (unlikely(iommu_batch_end() < 0L))
 279                goto iommu_map_fail;
 280
 281        local_irq_restore(flags);
 282
 283        return ret;
 284
 285bad:
 286        if (printk_ratelimit())
 287                WARN_ON(1);
 288        return DMA_ERROR_CODE;
 289
 290iommu_map_fail:
 291        /* Interrupts are disabled.  */
 292        spin_lock(&iommu->lock);
 293        iommu_range_free(iommu, bus_addr, npages);
 294        spin_unlock_irqrestore(&iommu->lock, flags);
 295
 296        return DMA_ERROR_CODE;
 297}
 298
 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 300                              size_t sz, enum dma_data_direction direction,
 301                              struct dma_attrs *attrs)
 302{
 303        struct pci_pbm_info *pbm;
 304        struct iommu *iommu;
 305        unsigned long flags, npages;
 306        long entry;
 307        u32 devhandle;
 308
 309        if (unlikely(direction == DMA_NONE)) {
 310                if (printk_ratelimit())
 311                        WARN_ON(1);
 312                return;
 313        }
 314
 315        iommu = dev->archdata.iommu;
 316        pbm = dev->archdata.host_controller;
 317        devhandle = pbm->devhandle;
 318
 319        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 320        npages >>= IO_PAGE_SHIFT;
 321        bus_addr &= IO_PAGE_MASK;
 322
 323        spin_lock_irqsave(&iommu->lock, flags);
 324
 325        iommu_range_free(iommu, bus_addr, npages);
 326
 327        entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
 328        do {
 329                unsigned long num;
 330
 331                num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 332                                            npages);
 333                entry += num;
 334                npages -= num;
 335        } while (npages != 0);
 336
 337        spin_unlock_irqrestore(&iommu->lock, flags);
 338}
 339
 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 341                         int nelems, enum dma_data_direction direction,
 342                         struct dma_attrs *attrs)
 343{
 344        struct scatterlist *s, *outs, *segstart;
 345        unsigned long flags, handle, prot;
 346        dma_addr_t dma_next = 0, dma_addr;
 347        unsigned int max_seg_size;
 348        unsigned long seg_boundary_size;
 349        int outcount, incount, i;
 350        struct iommu *iommu;
 351        unsigned long base_shift;
 352        long err;
 353
 354        BUG_ON(direction == DMA_NONE);
 355
 356        iommu = dev->archdata.iommu;
 357        if (nelems == 0 || !iommu)
 358                return 0;
 359        
 360        prot = HV_PCI_MAP_ATTR_READ;
 361        if (direction != DMA_TO_DEVICE)
 362                prot |= HV_PCI_MAP_ATTR_WRITE;
 363
 364        outs = s = segstart = &sglist[0];
 365        outcount = 1;
 366        incount = nelems;
 367        handle = 0;
 368
 369        /* Init first segment length for backout at failure */
 370        outs->dma_length = 0;
 371
 372        spin_lock_irqsave(&iommu->lock, flags);
 373
 374        iommu_batch_start(dev, prot, ~0UL);
 375
 376        max_seg_size = dma_get_max_seg_size(dev);
 377        seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 378                                  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 379        base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
 380        for_each_sg(sglist, s, nelems, i) {
 381                unsigned long paddr, npages, entry, out_entry = 0, slen;
 382
 383                slen = s->length;
 384                /* Sanity check */
 385                if (slen == 0) {
 386                        dma_next = 0;
 387                        continue;
 388                }
 389                /* Allocate iommu entries for that segment */
 390                paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 391                npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 392                entry = iommu_range_alloc(dev, iommu, npages, &handle);
 393
 394                /* Handle failure */
 395                if (unlikely(entry == DMA_ERROR_CODE)) {
 396                        if (printk_ratelimit())
 397                                printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
 398                                       " npages %lx\n", iommu, paddr, npages);
 399                        goto iommu_map_failed;
 400                }
 401
 402                iommu_batch_new_entry(entry);
 403
 404                /* Convert entry to a dma_addr_t */
 405                dma_addr = iommu->page_table_map_base +
 406                        (entry << IO_PAGE_SHIFT);
 407                dma_addr |= (s->offset & ~IO_PAGE_MASK);
 408
 409                /* Insert into HW table */
 410                paddr &= IO_PAGE_MASK;
 411                while (npages--) {
 412                        err = iommu_batch_add(paddr);
 413                        if (unlikely(err < 0L))
 414                                goto iommu_map_failed;
 415                        paddr += IO_PAGE_SIZE;
 416                }
 417
 418                /* If we are in an open segment, try merging */
 419                if (segstart != s) {
 420                        /* We cannot merge if:
 421                         * - allocated dma_addr isn't contiguous to previous allocation
 422                         */
 423                        if ((dma_addr != dma_next) ||
 424                            (outs->dma_length + s->length > max_seg_size) ||
 425                            (is_span_boundary(out_entry, base_shift,
 426                                              seg_boundary_size, outs, s))) {
 427                                /* Can't merge: create a new segment */
 428                                segstart = s;
 429                                outcount++;
 430                                outs = sg_next(outs);
 431                        } else {
 432                                outs->dma_length += s->length;
 433                        }
 434                }
 435
 436                if (segstart == s) {
 437                        /* This is a new segment, fill entries */
 438                        outs->dma_address = dma_addr;
 439                        outs->dma_length = slen;
 440                        out_entry = entry;
 441                }
 442
 443                /* Calculate next page pointer for contiguous check */
 444                dma_next = dma_addr + slen;
 445        }
 446
 447        err = iommu_batch_end();
 448
 449        if (unlikely(err < 0L))
 450                goto iommu_map_failed;
 451
 452        spin_unlock_irqrestore(&iommu->lock, flags);
 453
 454        if (outcount < incount) {
 455                outs = sg_next(outs);
 456                outs->dma_address = DMA_ERROR_CODE;
 457                outs->dma_length = 0;
 458        }
 459
 460        return outcount;
 461
 462iommu_map_failed:
 463        for_each_sg(sglist, s, nelems, i) {
 464                if (s->dma_length != 0) {
 465                        unsigned long vaddr, npages;
 466
 467                        vaddr = s->dma_address & IO_PAGE_MASK;
 468                        npages = iommu_num_pages(s->dma_address, s->dma_length,
 469                                                 IO_PAGE_SIZE);
 470                        iommu_range_free(iommu, vaddr, npages);
 471                        /* XXX demap? XXX */
 472                        s->dma_address = DMA_ERROR_CODE;
 473                        s->dma_length = 0;
 474                }
 475                if (s == outs)
 476                        break;
 477        }
 478        spin_unlock_irqrestore(&iommu->lock, flags);
 479
 480        return 0;
 481}
 482
 483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 484                            int nelems, enum dma_data_direction direction,
 485                            struct dma_attrs *attrs)
 486{
 487        struct pci_pbm_info *pbm;
 488        struct scatterlist *sg;
 489        struct iommu *iommu;
 490        unsigned long flags;
 491        u32 devhandle;
 492
 493        BUG_ON(direction == DMA_NONE);
 494
 495        iommu = dev->archdata.iommu;
 496        pbm = dev->archdata.host_controller;
 497        devhandle = pbm->devhandle;
 498        
 499        spin_lock_irqsave(&iommu->lock, flags);
 500
 501        sg = sglist;
 502        while (nelems--) {
 503                dma_addr_t dma_handle = sg->dma_address;
 504                unsigned int len = sg->dma_length;
 505                unsigned long npages, entry;
 506
 507                if (!len)
 508                        break;
 509                npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 510                iommu_range_free(iommu, dma_handle, npages);
 511
 512                entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 513                while (npages) {
 514                        unsigned long num;
 515
 516                        num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 517                                                    npages);
 518                        entry += num;
 519                        npages -= num;
 520                }
 521
 522                sg = sg_next(sg);
 523        }
 524
 525        spin_unlock_irqrestore(&iommu->lock, flags);
 526}
 527
 528static struct dma_map_ops sun4v_dma_ops = {
 529        .alloc_coherent                 = dma_4v_alloc_coherent,
 530        .free_coherent                  = dma_4v_free_coherent,
 531        .map_page                       = dma_4v_map_page,
 532        .unmap_page                     = dma_4v_unmap_page,
 533        .map_sg                         = dma_4v_map_sg,
 534        .unmap_sg                       = dma_4v_unmap_sg,
 535};
 536
 537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
 538                                         struct device *parent)
 539{
 540        struct property *prop;
 541        struct device_node *dp;
 542
 543        dp = pbm->op->node;
 544        prop = of_find_property(dp, "66mhz-capable", NULL);
 545        pbm->is_66mhz_capable = (prop != NULL);
 546        pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
 547
 548        /* XXX register error interrupt handlers XXX */
 549}
 550
 551static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
 552                                                      struct iommu *iommu)
 553{
 554        struct iommu_arena *arena = &iommu->arena;
 555        unsigned long i, cnt = 0;
 556        u32 devhandle;
 557
 558        devhandle = pbm->devhandle;
 559        for (i = 0; i < arena->limit; i++) {
 560                unsigned long ret, io_attrs, ra;
 561
 562                ret = pci_sun4v_iommu_getmap(devhandle,
 563                                             HV_PCI_TSBID(0, i),
 564                                             &io_attrs, &ra);
 565                if (ret == HV_EOK) {
 566                        if (page_in_phys_avail(ra)) {
 567                                pci_sun4v_iommu_demap(devhandle,
 568                                                      HV_PCI_TSBID(0, i), 1);
 569                        } else {
 570                                cnt++;
 571                                __set_bit(i, arena->map);
 572                        }
 573                }
 574        }
 575
 576        return cnt;
 577}
 578
 579static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 580{
 581        static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
 582        struct iommu *iommu = pbm->iommu;
 583        unsigned long num_tsb_entries, sz, tsbsize;
 584        u32 dma_mask, dma_offset;
 585        const u32 *vdma;
 586
 587        vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
 588        if (!vdma)
 589                vdma = vdma_default;
 590
 591        if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
 592                printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
 593                       vdma[0], vdma[1]);
 594                return -EINVAL;
 595        };
 596
 597        dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
 598        num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
 599        tsbsize = num_tsb_entries * sizeof(iopte_t);
 600
 601        dma_offset = vdma[0];
 602
 603        /* Setup initial software IOMMU state. */
 604        spin_lock_init(&iommu->lock);
 605        iommu->ctx_lowest_free = 1;
 606        iommu->page_table_map_base = dma_offset;
 607        iommu->dma_addr_mask = dma_mask;
 608
 609        /* Allocate and initialize the free area map.  */
 610        sz = (num_tsb_entries + 7) / 8;
 611        sz = (sz + 7UL) & ~7UL;
 612        iommu->arena.map = kzalloc(sz, GFP_KERNEL);
 613        if (!iommu->arena.map) {
 614                printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
 615                return -ENOMEM;
 616        }
 617        iommu->arena.limit = num_tsb_entries;
 618
 619        sz = probe_existing_entries(pbm, iommu);
 620        if (sz)
 621                printk("%s: Imported %lu TSB entries from OBP\n",
 622                       pbm->name, sz);
 623
 624        return 0;
 625}
 626
 627#ifdef CONFIG_PCI_MSI
 628struct pci_sun4v_msiq_entry {
 629        u64             version_type;
 630#define MSIQ_VERSION_MASK               0xffffffff00000000UL
 631#define MSIQ_VERSION_SHIFT              32
 632#define MSIQ_TYPE_MASK                  0x00000000000000ffUL
 633#define MSIQ_TYPE_SHIFT                 0
 634#define MSIQ_TYPE_NONE                  0x00
 635#define MSIQ_TYPE_MSG                   0x01
 636#define MSIQ_TYPE_MSI32                 0x02
 637#define MSIQ_TYPE_MSI64                 0x03
 638#define MSIQ_TYPE_INTX                  0x08
 639#define MSIQ_TYPE_NONE2                 0xff
 640
 641        u64             intx_sysino;
 642        u64             reserved1;
 643        u64             stick;
 644        u64             req_id;  /* bus/device/func */
 645#define MSIQ_REQID_BUS_MASK             0xff00UL
 646#define MSIQ_REQID_BUS_SHIFT            8
 647#define MSIQ_REQID_DEVICE_MASK          0x00f8UL
 648#define MSIQ_REQID_DEVICE_SHIFT         3
 649#define MSIQ_REQID_FUNC_MASK            0x0007UL
 650#define MSIQ_REQID_FUNC_SHIFT           0
 651
 652        u64             msi_address;
 653
 654        /* The format of this value is message type dependent.
 655         * For MSI bits 15:0 are the data from the MSI packet.
 656         * For MSI-X bits 31:0 are the data from the MSI packet.
 657         * For MSG, the message code and message routing code where:
 658         *      bits 39:32 is the bus/device/fn of the msg target-id
 659         *      bits 18:16 is the message routing code
 660         *      bits 7:0 is the message code
 661         * For INTx the low order 2-bits are:
 662         *      00 - INTA
 663         *      01 - INTB
 664         *      10 - INTC
 665         *      11 - INTD
 666         */
 667        u64             msi_data;
 668
 669        u64             reserved2;
 670};
 671
 672static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 673                              unsigned long *head)
 674{
 675        unsigned long err, limit;
 676
 677        err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
 678        if (unlikely(err))
 679                return -ENXIO;
 680
 681        limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 682        if (unlikely(*head >= limit))
 683                return -EFBIG;
 684
 685        return 0;
 686}
 687
 688static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
 689                                 unsigned long msiqid, unsigned long *head,
 690                                 unsigned long *msi)
 691{
 692        struct pci_sun4v_msiq_entry *ep;
 693        unsigned long err, type;
 694
 695        /* Note: void pointer arithmetic, 'head' is a byte offset  */
 696        ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
 697                                 (pbm->msiq_ent_count *
 698                                  sizeof(struct pci_sun4v_msiq_entry))) +
 699              *head);
 700
 701        if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
 702                return 0;
 703
 704        type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
 705        if (unlikely(type != MSIQ_TYPE_MSI32 &&
 706                     type != MSIQ_TYPE_MSI64))
 707                return -EINVAL;
 708
 709        *msi = ep->msi_data;
 710
 711        err = pci_sun4v_msi_setstate(pbm->devhandle,
 712                                     ep->msi_data /* msi_num */,
 713                                     HV_MSISTATE_IDLE);
 714        if (unlikely(err))
 715                return -ENXIO;
 716
 717        /* Clear the entry.  */
 718        ep->version_type &= ~MSIQ_TYPE_MASK;
 719
 720        (*head) += sizeof(struct pci_sun4v_msiq_entry);
 721        if (*head >=
 722            (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
 723                *head = 0;
 724
 725        return 1;
 726}
 727
 728static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 729                              unsigned long head)
 730{
 731        unsigned long err;
 732
 733        err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
 734        if (unlikely(err))
 735                return -EINVAL;
 736
 737        return 0;
 738}
 739
 740static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
 741                               unsigned long msi, int is_msi64)
 742{
 743        if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
 744                                  (is_msi64 ?
 745                                   HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
 746                return -ENXIO;
 747        if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
 748                return -ENXIO;
 749        if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
 750                return -ENXIO;
 751        return 0;
 752}
 753
 754static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
 755{
 756        unsigned long err, msiqid;
 757
 758        err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
 759        if (err)
 760                return -ENXIO;
 761
 762        pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
 763
 764        return 0;
 765}
 766
 767static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
 768{
 769        unsigned long q_size, alloc_size, pages, order;
 770        int i;
 771
 772        q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 773        alloc_size = (pbm->msiq_num * q_size);
 774        order = get_order(alloc_size);
 775        pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
 776        if (pages == 0UL) {
 777                printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
 778                       order);
 779                return -ENOMEM;
 780        }
 781        memset((char *)pages, 0, PAGE_SIZE << order);
 782        pbm->msi_queues = (void *) pages;
 783
 784        for (i = 0; i < pbm->msiq_num; i++) {
 785                unsigned long err, base = __pa(pages + (i * q_size));
 786                unsigned long ret1, ret2;
 787
 788                err = pci_sun4v_msiq_conf(pbm->devhandle,
 789                                          pbm->msiq_first + i,
 790                                          base, pbm->msiq_ent_count);
 791                if (err) {
 792                        printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
 793                               err);
 794                        goto h_error;
 795                }
 796
 797                err = pci_sun4v_msiq_info(pbm->devhandle,
 798                                          pbm->msiq_first + i,
 799                                          &ret1, &ret2);
 800                if (err) {
 801                        printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
 802                               err);
 803                        goto h_error;
 804                }
 805                if (ret1 != base || ret2 != pbm->msiq_ent_count) {
 806                        printk(KERN_ERR "MSI: Bogus qconf "
 807                               "expected[%lx:%x] got[%lx:%lx]\n",
 808                               base, pbm->msiq_ent_count,
 809                               ret1, ret2);
 810                        goto h_error;
 811                }
 812        }
 813
 814        return 0;
 815
 816h_error:
 817        free_pages(pages, order);
 818        return -EINVAL;
 819}
 820
 821static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
 822{
 823        unsigned long q_size, alloc_size, pages, order;
 824        int i;
 825
 826        for (i = 0; i < pbm->msiq_num; i++) {
 827                unsigned long msiqid = pbm->msiq_first + i;
 828
 829                (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
 830        }
 831
 832        q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 833        alloc_size = (pbm->msiq_num * q_size);
 834        order = get_order(alloc_size);
 835
 836        pages = (unsigned long) pbm->msi_queues;
 837
 838        free_pages(pages, order);
 839
 840        pbm->msi_queues = NULL;
 841}
 842
 843static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
 844                                    unsigned long msiqid,
 845                                    unsigned long devino)
 846{
 847        unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
 848
 849        if (!virt_irq)
 850                return -ENOMEM;
 851
 852        if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
 853                return -EINVAL;
 854        if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
 855                return -EINVAL;
 856
 857        return virt_irq;
 858}
 859
 860static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
 861        .get_head       =       pci_sun4v_get_head,
 862        .dequeue_msi    =       pci_sun4v_dequeue_msi,
 863        .set_head       =       pci_sun4v_set_head,
 864        .msi_setup      =       pci_sun4v_msi_setup,
 865        .msi_teardown   =       pci_sun4v_msi_teardown,
 866        .msiq_alloc     =       pci_sun4v_msiq_alloc,
 867        .msiq_free      =       pci_sun4v_msiq_free,
 868        .msiq_build_irq =       pci_sun4v_msiq_build_irq,
 869};
 870
 871static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 872{
 873        sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
 874}
 875#else /* CONFIG_PCI_MSI */
 876static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 877{
 878}
 879#endif /* !(CONFIG_PCI_MSI) */
 880
 881static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
 882                                        struct of_device *op, u32 devhandle)
 883{
 884        struct device_node *dp = op->node;
 885        int err;
 886
 887        pbm->numa_node = of_node_to_nid(dp);
 888
 889        pbm->pci_ops = &sun4v_pci_ops;
 890        pbm->config_space_reg_bits = 12;
 891
 892        pbm->index = pci_num_pbms++;
 893
 894        pbm->op = op;
 895
 896        pbm->devhandle = devhandle;
 897
 898        pbm->name = dp->full_name;
 899
 900        printk("%s: SUN4V PCI Bus Module\n", pbm->name);
 901        printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
 902
 903        pci_determine_mem_io_space(pbm);
 904
 905        pci_get_pbm_props(pbm);
 906
 907        err = pci_sun4v_iommu_init(pbm);
 908        if (err)
 909                return err;
 910
 911        pci_sun4v_msi_init(pbm);
 912
 913        pci_sun4v_scan_bus(pbm, &op->dev);
 914
 915        pbm->next = pci_pbm_root;
 916        pci_pbm_root = pbm;
 917
 918        return 0;
 919}
 920
 921static int __devinit pci_sun4v_probe(struct of_device *op,
 922                                     const struct of_device_id *match)
 923{
 924        const struct linux_prom64_registers *regs;
 925        static int hvapi_negotiated = 0;
 926        struct pci_pbm_info *pbm;
 927        struct device_node *dp;
 928        struct iommu *iommu;
 929        u32 devhandle;
 930        int i, err;
 931
 932        dp = op->node;
 933
 934        if (!hvapi_negotiated++) {
 935                err = sun4v_hvapi_register(HV_GRP_PCI,
 936                                           vpci_major,
 937                                           &vpci_minor);
 938
 939                if (err) {
 940                        printk(KERN_ERR PFX "Could not register hvapi, "
 941                               "err=%d\n", err);
 942                        return err;
 943                }
 944                printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
 945                       vpci_major, vpci_minor);
 946
 947                dma_ops = &sun4v_dma_ops;
 948        }
 949
 950        regs = of_get_property(dp, "reg", NULL);
 951        err = -ENODEV;
 952        if (!regs) {
 953                printk(KERN_ERR PFX "Could not find config registers\n");
 954                goto out_err;
 955        }
 956        devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
 957
 958        err = -ENOMEM;
 959        if (!iommu_batch_initialized) {
 960                for_each_possible_cpu(i) {
 961                        unsigned long page = get_zeroed_page(GFP_KERNEL);
 962
 963                        if (!page)
 964                                goto out_err;
 965
 966                        per_cpu(iommu_batch, i).pglist = (u64 *) page;
 967                }
 968                iommu_batch_initialized = 1;
 969        }
 970
 971        pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
 972        if (!pbm) {
 973                printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
 974                goto out_err;
 975        }
 976
 977        iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
 978        if (!iommu) {
 979                printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
 980                goto out_free_controller;
 981        }
 982
 983        pbm->iommu = iommu;
 984
 985        err = pci_sun4v_pbm_init(pbm, op, devhandle);
 986        if (err)
 987                goto out_free_iommu;
 988
 989        dev_set_drvdata(&op->dev, pbm);
 990
 991        return 0;
 992
 993out_free_iommu:
 994        kfree(pbm->iommu);
 995
 996out_free_controller:
 997        kfree(pbm);
 998
 999out_err:
1000        return err;
1001}
1002
1003static struct of_device_id __initdata pci_sun4v_match[] = {
1004        {
1005                .name = "pci",
1006                .compatible = "SUNW,sun4v-pci",
1007        },
1008        {},
1009};
1010
1011static struct of_platform_driver pci_sun4v_driver = {
1012        .name           = DRIVER_NAME,
1013        .match_table    = pci_sun4v_match,
1014        .probe          = pci_sun4v_probe,
1015};
1016
1017static int __init pci_sun4v_init(void)
1018{
1019        return of_register_driver(&pci_sun4v_driver, &of_bus_type);
1020}
1021
1022subsys_initcall(pci_sun4v_init);
1023