linux/arch/s390/pci/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright IBM Corp. 2012
   4 *
   5 * Author(s):
   6 *   Jan Glauber <jang@linux.vnet.ibm.com>
   7 *
   8 * The System z PCI code is a rewrite from a prototype by
   9 * the following people (Kudoz!):
  10 *   Alexander Schmidt
  11 *   Christoph Raisch
  12 *   Hannes Hering
  13 *   Hoang-Nam Nguyen
  14 *   Jan-Bernd Themann
  15 *   Stefan Roscher
  16 *   Thomas Klein
  17 */
  18
  19#define KMSG_COMPONENT "zpci"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/kernel.h>
  23#include <linux/slab.h>
  24#include <linux/err.h>
  25#include <linux/export.h>
  26#include <linux/delay.h>
  27#include <linux/seq_file.h>
  28#include <linux/jump_label.h>
  29#include <linux/pci.h>
  30#include <linux/printk.h>
  31
  32#include <asm/isc.h>
  33#include <asm/airq.h>
  34#include <asm/facility.h>
  35#include <asm/pci_insn.h>
  36#include <asm/pci_clp.h>
  37#include <asm/pci_dma.h>
  38
  39#include "pci_bus.h"
  40#include "pci_iov.h"
  41
  42/* list of all detected zpci devices */
  43static LIST_HEAD(zpci_list);
  44static DEFINE_SPINLOCK(zpci_list_lock);
  45
  46static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
  47static DEFINE_SPINLOCK(zpci_domain_lock);
  48
  49#define ZPCI_IOMAP_ENTRIES                                              \
  50        min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),   \
  51            ZPCI_IOMAP_MAX_ENTRIES)
  52
  53unsigned int s390_pci_no_rid;
  54
  55static DEFINE_SPINLOCK(zpci_iomap_lock);
  56static unsigned long *zpci_iomap_bitmap;
  57struct zpci_iomap_entry *zpci_iomap_start;
  58EXPORT_SYMBOL_GPL(zpci_iomap_start);
  59
  60DEFINE_STATIC_KEY_FALSE(have_mio);
  61
  62static struct kmem_cache *zdev_fmb_cache;
  63
  64struct zpci_dev *get_zdev_by_fid(u32 fid)
  65{
  66        struct zpci_dev *tmp, *zdev = NULL;
  67
  68        spin_lock(&zpci_list_lock);
  69        list_for_each_entry(tmp, &zpci_list, entry) {
  70                if (tmp->fid == fid) {
  71                        zdev = tmp;
  72                        break;
  73                }
  74        }
  75        spin_unlock(&zpci_list_lock);
  76        return zdev;
  77}
  78
  79void zpci_remove_reserved_devices(void)
  80{
  81        struct zpci_dev *tmp, *zdev;
  82        enum zpci_state state;
  83        LIST_HEAD(remove);
  84
  85        spin_lock(&zpci_list_lock);
  86        list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
  87                if (zdev->state == ZPCI_FN_STATE_STANDBY &&
  88                    !clp_get_state(zdev->fid, &state) &&
  89                    state == ZPCI_FN_STATE_RESERVED)
  90                        list_move_tail(&zdev->entry, &remove);
  91        }
  92        spin_unlock(&zpci_list_lock);
  93
  94        list_for_each_entry_safe(zdev, tmp, &remove, entry)
  95                zpci_device_reserved(zdev);
  96}
  97
  98int pci_domain_nr(struct pci_bus *bus)
  99{
 100        return ((struct zpci_bus *) bus->sysdata)->domain_nr;
 101}
 102EXPORT_SYMBOL_GPL(pci_domain_nr);
 103
 104int pci_proc_domain(struct pci_bus *bus)
 105{
 106        return pci_domain_nr(bus);
 107}
 108EXPORT_SYMBOL_GPL(pci_proc_domain);
 109
 110/* Modify PCI: Register I/O address translation parameters */
 111int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
 112                       u64 base, u64 limit, u64 iota)
 113{
 114        u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
 115        struct zpci_fib fib = {0};
 116        u8 cc, status;
 117
 118        WARN_ON_ONCE(iota & 0x3fff);
 119        fib.pba = base;
 120        fib.pal = limit;
 121        fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
 122        cc = zpci_mod_fc(req, &fib, &status);
 123        if (cc)
 124                zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
 125        return cc;
 126}
 127
 128/* Modify PCI: Unregister I/O address translation parameters */
 129int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
 130{
 131        u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
 132        struct zpci_fib fib = {0};
 133        u8 cc, status;
 134
 135        cc = zpci_mod_fc(req, &fib, &status);
 136        if (cc)
 137                zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
 138        return cc;
 139}
 140
 141/* Modify PCI: Set PCI function measurement parameters */
 142int zpci_fmb_enable_device(struct zpci_dev *zdev)
 143{
 144        u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
 145        struct zpci_fib fib = {0};
 146        u8 cc, status;
 147
 148        if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
 149                return -EINVAL;
 150
 151        zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
 152        if (!zdev->fmb)
 153                return -ENOMEM;
 154        WARN_ON((u64) zdev->fmb & 0xf);
 155
 156        /* reset software counters */
 157        atomic64_set(&zdev->allocated_pages, 0);
 158        atomic64_set(&zdev->mapped_pages, 0);
 159        atomic64_set(&zdev->unmapped_pages, 0);
 160
 161        fib.fmb_addr = virt_to_phys(zdev->fmb);
 162        cc = zpci_mod_fc(req, &fib, &status);
 163        if (cc) {
 164                kmem_cache_free(zdev_fmb_cache, zdev->fmb);
 165                zdev->fmb = NULL;
 166        }
 167        return cc ? -EIO : 0;
 168}
 169
 170/* Modify PCI: Disable PCI function measurement */
 171int zpci_fmb_disable_device(struct zpci_dev *zdev)
 172{
 173        u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
 174        struct zpci_fib fib = {0};
 175        u8 cc, status;
 176
 177        if (!zdev->fmb)
 178                return -EINVAL;
 179
 180        /* Function measurement is disabled if fmb address is zero */
 181        cc = zpci_mod_fc(req, &fib, &status);
 182        if (cc == 3) /* Function already gone. */
 183                cc = 0;
 184
 185        if (!cc) {
 186                kmem_cache_free(zdev_fmb_cache, zdev->fmb);
 187                zdev->fmb = NULL;
 188        }
 189        return cc ? -EIO : 0;
 190}
 191
 192static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
 193{
 194        u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
 195        u64 data;
 196        int rc;
 197
 198        rc = __zpci_load(&data, req, offset);
 199        if (!rc) {
 200                data = le64_to_cpu((__force __le64) data);
 201                data >>= (8 - len) * 8;
 202                *val = (u32) data;
 203        } else
 204                *val = 0xffffffff;
 205        return rc;
 206}
 207
 208static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
 209{
 210        u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
 211        u64 data = val;
 212        int rc;
 213
 214        data <<= (8 - len) * 8;
 215        data = (__force u64) cpu_to_le64(data);
 216        rc = __zpci_store(data, req, offset);
 217        return rc;
 218}
 219
 220resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 221                                       resource_size_t size,
 222                                       resource_size_t align)
 223{
 224        return 0;
 225}
 226
 227/* combine single writes by using store-block insn */
 228void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
 229{
 230       zpci_memcpy_toio(to, from, count);
 231}
 232
 233static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
 234{
 235        unsigned long offset, vaddr;
 236        struct vm_struct *area;
 237        phys_addr_t last_addr;
 238
 239        last_addr = addr + size - 1;
 240        if (!size || last_addr < addr)
 241                return NULL;
 242
 243        if (!static_branch_unlikely(&have_mio))
 244                return (void __iomem *) addr;
 245
 246        offset = addr & ~PAGE_MASK;
 247        addr &= PAGE_MASK;
 248        size = PAGE_ALIGN(size + offset);
 249        area = get_vm_area(size, VM_IOREMAP);
 250        if (!area)
 251                return NULL;
 252
 253        vaddr = (unsigned long) area->addr;
 254        if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
 255                free_vm_area(area);
 256                return NULL;
 257        }
 258        return (void __iomem *) ((unsigned long) area->addr + offset);
 259}
 260
 261void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
 262{
 263        return __ioremap(addr, size, __pgprot(prot));
 264}
 265EXPORT_SYMBOL(ioremap_prot);
 266
 267void __iomem *ioremap(phys_addr_t addr, size_t size)
 268{
 269        return __ioremap(addr, size, PAGE_KERNEL);
 270}
 271EXPORT_SYMBOL(ioremap);
 272
 273void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
 274{
 275        return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
 276}
 277EXPORT_SYMBOL(ioremap_wc);
 278
 279void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
 280{
 281        return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
 282}
 283EXPORT_SYMBOL(ioremap_wt);
 284
 285void iounmap(volatile void __iomem *addr)
 286{
 287        if (static_branch_likely(&have_mio))
 288                vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
 289}
 290EXPORT_SYMBOL(iounmap);
 291
 292/* Create a virtual mapping cookie for a PCI BAR */
 293static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
 294                                        unsigned long offset, unsigned long max)
 295{
 296        struct zpci_dev *zdev = to_zpci(pdev);
 297        int idx;
 298
 299        idx = zdev->bars[bar].map_idx;
 300        spin_lock(&zpci_iomap_lock);
 301        /* Detect overrun */
 302        WARN_ON(!++zpci_iomap_start[idx].count);
 303        zpci_iomap_start[idx].fh = zdev->fh;
 304        zpci_iomap_start[idx].bar = bar;
 305        spin_unlock(&zpci_iomap_lock);
 306
 307        return (void __iomem *) ZPCI_ADDR(idx) + offset;
 308}
 309
 310static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
 311                                         unsigned long offset,
 312                                         unsigned long max)
 313{
 314        unsigned long barsize = pci_resource_len(pdev, bar);
 315        struct zpci_dev *zdev = to_zpci(pdev);
 316        void __iomem *iova;
 317
 318        iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
 319        return iova ? iova + offset : iova;
 320}
 321
 322void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
 323                              unsigned long offset, unsigned long max)
 324{
 325        if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
 326                return NULL;
 327
 328        if (static_branch_likely(&have_mio))
 329                return pci_iomap_range_mio(pdev, bar, offset, max);
 330        else
 331                return pci_iomap_range_fh(pdev, bar, offset, max);
 332}
 333EXPORT_SYMBOL(pci_iomap_range);
 334
 335void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 336{
 337        return pci_iomap_range(dev, bar, 0, maxlen);
 338}
 339EXPORT_SYMBOL(pci_iomap);
 340
 341static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
 342                                            unsigned long offset, unsigned long max)
 343{
 344        unsigned long barsize = pci_resource_len(pdev, bar);
 345        struct zpci_dev *zdev = to_zpci(pdev);
 346        void __iomem *iova;
 347
 348        iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
 349        return iova ? iova + offset : iova;
 350}
 351
 352void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
 353                                 unsigned long offset, unsigned long max)
 354{
 355        if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
 356                return NULL;
 357
 358        if (static_branch_likely(&have_mio))
 359                return pci_iomap_wc_range_mio(pdev, bar, offset, max);
 360        else
 361                return pci_iomap_range_fh(pdev, bar, offset, max);
 362}
 363EXPORT_SYMBOL(pci_iomap_wc_range);
 364
 365void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
 366{
 367        return pci_iomap_wc_range(dev, bar, 0, maxlen);
 368}
 369EXPORT_SYMBOL(pci_iomap_wc);
 370
 371static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
 372{
 373        unsigned int idx = ZPCI_IDX(addr);
 374
 375        spin_lock(&zpci_iomap_lock);
 376        /* Detect underrun */
 377        WARN_ON(!zpci_iomap_start[idx].count);
 378        if (!--zpci_iomap_start[idx].count) {
 379                zpci_iomap_start[idx].fh = 0;
 380                zpci_iomap_start[idx].bar = 0;
 381        }
 382        spin_unlock(&zpci_iomap_lock);
 383}
 384
 385static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
 386{
 387        iounmap(addr);
 388}
 389
 390void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
 391{
 392        if (static_branch_likely(&have_mio))
 393                pci_iounmap_mio(pdev, addr);
 394        else
 395                pci_iounmap_fh(pdev, addr);
 396}
 397EXPORT_SYMBOL(pci_iounmap);
 398
 399static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 400                    int size, u32 *val)
 401{
 402        struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
 403
 404        return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
 405}
 406
 407static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 408                     int size, u32 val)
 409{
 410        struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
 411
 412        return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
 413}
 414
 415static struct pci_ops pci_root_ops = {
 416        .read = pci_read,
 417        .write = pci_write,
 418};
 419
 420static void zpci_map_resources(struct pci_dev *pdev)
 421{
 422        struct zpci_dev *zdev = to_zpci(pdev);
 423        resource_size_t len;
 424        int i;
 425
 426        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 427                len = pci_resource_len(pdev, i);
 428                if (!len)
 429                        continue;
 430
 431                if (zpci_use_mio(zdev))
 432                        pdev->resource[i].start =
 433                                (resource_size_t __force) zdev->bars[i].mio_wt;
 434                else
 435                        pdev->resource[i].start = (resource_size_t __force)
 436                                pci_iomap_range_fh(pdev, i, 0, 0);
 437                pdev->resource[i].end = pdev->resource[i].start + len - 1;
 438        }
 439
 440        zpci_iov_map_resources(pdev);
 441}
 442
 443static void zpci_unmap_resources(struct pci_dev *pdev)
 444{
 445        struct zpci_dev *zdev = to_zpci(pdev);
 446        resource_size_t len;
 447        int i;
 448
 449        if (zpci_use_mio(zdev))
 450                return;
 451
 452        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 453                len = pci_resource_len(pdev, i);
 454                if (!len)
 455                        continue;
 456                pci_iounmap_fh(pdev, (void __iomem __force *)
 457                               pdev->resource[i].start);
 458        }
 459}
 460
 461static int zpci_alloc_iomap(struct zpci_dev *zdev)
 462{
 463        unsigned long entry;
 464
 465        spin_lock(&zpci_iomap_lock);
 466        entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
 467        if (entry == ZPCI_IOMAP_ENTRIES) {
 468                spin_unlock(&zpci_iomap_lock);
 469                return -ENOSPC;
 470        }
 471        set_bit(entry, zpci_iomap_bitmap);
 472        spin_unlock(&zpci_iomap_lock);
 473        return entry;
 474}
 475
 476static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
 477{
 478        spin_lock(&zpci_iomap_lock);
 479        memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
 480        clear_bit(entry, zpci_iomap_bitmap);
 481        spin_unlock(&zpci_iomap_lock);
 482}
 483
 484static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
 485                                    unsigned long size, unsigned long flags)
 486{
 487        struct resource *r;
 488
 489        r = kzalloc(sizeof(*r), GFP_KERNEL);
 490        if (!r)
 491                return NULL;
 492
 493        r->start = start;
 494        r->end = r->start + size - 1;
 495        r->flags = flags;
 496        r->name = zdev->res_name;
 497
 498        if (request_resource(&iomem_resource, r)) {
 499                kfree(r);
 500                return NULL;
 501        }
 502        return r;
 503}
 504
 505int zpci_setup_bus_resources(struct zpci_dev *zdev,
 506                             struct list_head *resources)
 507{
 508        unsigned long addr, size, flags;
 509        struct resource *res;
 510        int i, entry;
 511
 512        snprintf(zdev->res_name, sizeof(zdev->res_name),
 513                 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
 514
 515        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 516                if (!zdev->bars[i].size)
 517                        continue;
 518                entry = zpci_alloc_iomap(zdev);
 519                if (entry < 0)
 520                        return entry;
 521                zdev->bars[i].map_idx = entry;
 522
 523                /* only MMIO is supported */
 524                flags = IORESOURCE_MEM;
 525                if (zdev->bars[i].val & 8)
 526                        flags |= IORESOURCE_PREFETCH;
 527                if (zdev->bars[i].val & 4)
 528                        flags |= IORESOURCE_MEM_64;
 529
 530                if (zpci_use_mio(zdev))
 531                        addr = (unsigned long) zdev->bars[i].mio_wt;
 532                else
 533                        addr = ZPCI_ADDR(entry);
 534                size = 1UL << zdev->bars[i].size;
 535
 536                res = __alloc_res(zdev, addr, size, flags);
 537                if (!res) {
 538                        zpci_free_iomap(zdev, entry);
 539                        return -ENOMEM;
 540                }
 541                zdev->bars[i].res = res;
 542                pci_add_resource(resources, res);
 543        }
 544        zdev->has_resources = 1;
 545
 546        return 0;
 547}
 548
 549static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
 550{
 551        int i;
 552
 553        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 554                if (!zdev->bars[i].size || !zdev->bars[i].res)
 555                        continue;
 556
 557                zpci_free_iomap(zdev, zdev->bars[i].map_idx);
 558                release_resource(zdev->bars[i].res);
 559                kfree(zdev->bars[i].res);
 560        }
 561        zdev->has_resources = 0;
 562}
 563
 564int pcibios_add_device(struct pci_dev *pdev)
 565{
 566        struct zpci_dev *zdev = to_zpci(pdev);
 567        struct resource *res;
 568        int i;
 569
 570        /* The pdev has a reference to the zdev via its bus */
 571        zpci_zdev_get(zdev);
 572        if (pdev->is_physfn)
 573                pdev->no_vf_scan = 1;
 574
 575        pdev->dev.groups = zpci_attr_groups;
 576        pdev->dev.dma_ops = &s390_pci_dma_ops;
 577        zpci_map_resources(pdev);
 578
 579        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 580                res = &pdev->resource[i];
 581                if (res->parent || !res->flags)
 582                        continue;
 583                pci_claim_resource(pdev, i);
 584        }
 585
 586        return 0;
 587}
 588
 589void pcibios_release_device(struct pci_dev *pdev)
 590{
 591        struct zpci_dev *zdev = to_zpci(pdev);
 592
 593        zpci_unmap_resources(pdev);
 594        zpci_zdev_put(zdev);
 595}
 596
 597int pcibios_enable_device(struct pci_dev *pdev, int mask)
 598{
 599        struct zpci_dev *zdev = to_zpci(pdev);
 600
 601        zpci_debug_init_device(zdev, dev_name(&pdev->dev));
 602        zpci_fmb_enable_device(zdev);
 603
 604        return pci_enable_resources(pdev, mask);
 605}
 606
 607void pcibios_disable_device(struct pci_dev *pdev)
 608{
 609        struct zpci_dev *zdev = to_zpci(pdev);
 610
 611        zpci_fmb_disable_device(zdev);
 612        zpci_debug_exit_device(zdev);
 613}
 614
 615static int __zpci_register_domain(int domain)
 616{
 617        spin_lock(&zpci_domain_lock);
 618        if (test_bit(domain, zpci_domain)) {
 619                spin_unlock(&zpci_domain_lock);
 620                pr_err("Domain %04x is already assigned\n", domain);
 621                return -EEXIST;
 622        }
 623        set_bit(domain, zpci_domain);
 624        spin_unlock(&zpci_domain_lock);
 625        return domain;
 626}
 627
 628static int __zpci_alloc_domain(void)
 629{
 630        int domain;
 631
 632        spin_lock(&zpci_domain_lock);
 633        /*
 634         * We can always auto allocate domains below ZPCI_NR_DEVICES.
 635         * There is either a free domain or we have reached the maximum in
 636         * which case we would have bailed earlier.
 637         */
 638        domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
 639        set_bit(domain, zpci_domain);
 640        spin_unlock(&zpci_domain_lock);
 641        return domain;
 642}
 643
 644int zpci_alloc_domain(int domain)
 645{
 646        if (zpci_unique_uid) {
 647                if (domain)
 648                        return __zpci_register_domain(domain);
 649                pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
 650                update_uid_checking(false);
 651        }
 652        return __zpci_alloc_domain();
 653}
 654
 655void zpci_free_domain(int domain)
 656{
 657        spin_lock(&zpci_domain_lock);
 658        clear_bit(domain, zpci_domain);
 659        spin_unlock(&zpci_domain_lock);
 660}
 661
 662
 663int zpci_enable_device(struct zpci_dev *zdev)
 664{
 665        u32 fh = zdev->fh;
 666        int rc = 0;
 667
 668        if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
 669                rc = -EIO;
 670        else
 671                zdev->fh = fh;
 672        return rc;
 673}
 674
 675int zpci_disable_device(struct zpci_dev *zdev)
 676{
 677        u32 fh = zdev->fh;
 678        int cc, rc = 0;
 679
 680        cc = clp_disable_fh(zdev, &fh);
 681        if (!cc) {
 682                zdev->fh = fh;
 683        } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
 684                pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
 685                        zdev->fid);
 686                /* Function is already disabled - update handle */
 687                rc = clp_refresh_fh(zdev->fid, &fh);
 688                if (!rc) {
 689                        zdev->fh = fh;
 690                        rc = -EINVAL;
 691                }
 692        } else {
 693                rc = -EIO;
 694        }
 695        return rc;
 696}
 697
 698/**
 699 * zpci_create_device() - Create a new zpci_dev and add it to the zbus
 700 * @fid: Function ID of the device to be created
 701 * @fh: Current Function Handle of the device to be created
 702 * @state: Initial state after creation either Standby or Configured
 703 *
 704 * Creates a new zpci device and adds it to its, possibly newly created, zbus
 705 * as well as zpci_list.
 706 *
 707 * Returns: the zdev on success or an error pointer otherwise
 708 */
 709struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
 710{
 711        struct zpci_dev *zdev;
 712        int rc;
 713
 714        zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
 715        zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
 716        if (!zdev)
 717                return ERR_PTR(-ENOMEM);
 718
 719        /* FID and Function Handle are the static/dynamic identifiers */
 720        zdev->fid = fid;
 721        zdev->fh = fh;
 722
 723        /* Query function properties and update zdev */
 724        rc = clp_query_pci_fn(zdev);
 725        if (rc)
 726                goto error;
 727        zdev->state =  state;
 728
 729        kref_init(&zdev->kref);
 730        mutex_init(&zdev->lock);
 731
 732        rc = zpci_init_iommu(zdev);
 733        if (rc)
 734                goto error;
 735
 736        rc = zpci_bus_device_register(zdev, &pci_root_ops);
 737        if (rc)
 738                goto error_destroy_iommu;
 739
 740        spin_lock(&zpci_list_lock);
 741        list_add_tail(&zdev->entry, &zpci_list);
 742        spin_unlock(&zpci_list_lock);
 743
 744        return zdev;
 745
 746error_destroy_iommu:
 747        zpci_destroy_iommu(zdev);
 748error:
 749        zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
 750        kfree(zdev);
 751        return ERR_PTR(rc);
 752}
 753
 754bool zpci_is_device_configured(struct zpci_dev *zdev)
 755{
 756        enum zpci_state state = zdev->state;
 757
 758        return state != ZPCI_FN_STATE_RESERVED &&
 759                state != ZPCI_FN_STATE_STANDBY;
 760}
 761
 762/**
 763 * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
 764 * @zdev: The zpci_dev to be configured
 765 * @fh: The general function handle supplied by the platform
 766 *
 767 * Given a device in the configuration state Configured, enables, scans and
 768 * adds it to the common code PCI subsystem if possible. If the PCI device is
 769 * parked because we can not yet create a PCI bus because we have not seen
 770 * function 0, it is ignored but will be scanned once function 0 appears.
 771 * If any failure occurs, the zpci_dev is left disabled.
 772 *
 773 * Return: 0 on success, or an error code otherwise
 774 */
 775int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
 776{
 777        int rc;
 778
 779        zdev->fh = fh;
 780        /* the PCI function will be scanned once function 0 appears */
 781        if (!zdev->zbus->bus)
 782                return 0;
 783
 784        /* For function 0 on a multi-function bus scan whole bus as we might
 785         * have to pick up existing functions waiting for it to allow creating
 786         * the PCI bus
 787         */
 788        if (zdev->devfn == 0 && zdev->zbus->multifunction)
 789                rc = zpci_bus_scan_bus(zdev->zbus);
 790        else
 791                rc = zpci_bus_scan_device(zdev);
 792
 793        return rc;
 794}
 795
 796/**
 797 * zpci_deconfigure_device() - Deconfigure a zpci_dev
 798 * @zdev: The zpci_dev to configure
 799 *
 800 * Deconfigure a zPCI function that is currently configured and possibly known
 801 * to the common code PCI subsystem.
 802 * If any failure occurs the device is left as is.
 803 *
 804 * Return: 0 on success, or an error code otherwise
 805 */
 806int zpci_deconfigure_device(struct zpci_dev *zdev)
 807{
 808        int rc;
 809
 810        if (zdev->zbus->bus)
 811                zpci_bus_remove_device(zdev, false);
 812
 813        if (zdev->dma_table) {
 814                rc = zpci_dma_exit_device(zdev);
 815                if (rc)
 816                        return rc;
 817        }
 818        if (zdev_enabled(zdev)) {
 819                rc = zpci_disable_device(zdev);
 820                if (rc)
 821                        return rc;
 822        }
 823
 824        rc = sclp_pci_deconfigure(zdev->fid);
 825        zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
 826        if (rc)
 827                return rc;
 828        zdev->state = ZPCI_FN_STATE_STANDBY;
 829
 830        return 0;
 831}
 832
 833/**
 834 * zpci_device_reserved() - Mark device as resverved
 835 * @zdev: the zpci_dev that was reserved
 836 *
 837 * Handle the case that a given zPCI function was reserved by another system.
 838 * After a call to this function the zpci_dev can not be found via
 839 * get_zdev_by_fid() anymore but may still be accessible via existing
 840 * references though it will not be functional anymore.
 841 */
 842void zpci_device_reserved(struct zpci_dev *zdev)
 843{
 844        if (zdev->has_hp_slot)
 845                zpci_exit_slot(zdev);
 846        /*
 847         * Remove device from zpci_list as it is going away. This also
 848         * makes sure we ignore subsequent zPCI events for this device.
 849         */
 850        spin_lock(&zpci_list_lock);
 851        list_del(&zdev->entry);
 852        spin_unlock(&zpci_list_lock);
 853        zdev->state = ZPCI_FN_STATE_RESERVED;
 854        zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
 855        zpci_zdev_put(zdev);
 856}
 857
 858void zpci_release_device(struct kref *kref)
 859{
 860        struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
 861        int ret;
 862
 863        if (zdev->zbus->bus)
 864                zpci_bus_remove_device(zdev, false);
 865
 866        if (zdev->dma_table)
 867                zpci_dma_exit_device(zdev);
 868        if (zdev_enabled(zdev))
 869                zpci_disable_device(zdev);
 870
 871        switch (zdev->state) {
 872        case ZPCI_FN_STATE_CONFIGURED:
 873                ret = sclp_pci_deconfigure(zdev->fid);
 874                zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
 875                fallthrough;
 876        case ZPCI_FN_STATE_STANDBY:
 877                if (zdev->has_hp_slot)
 878                        zpci_exit_slot(zdev);
 879                spin_lock(&zpci_list_lock);
 880                list_del(&zdev->entry);
 881                spin_unlock(&zpci_list_lock);
 882                zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
 883                fallthrough;
 884        case ZPCI_FN_STATE_RESERVED:
 885                if (zdev->has_resources)
 886                        zpci_cleanup_bus_resources(zdev);
 887                zpci_bus_device_unregister(zdev);
 888                zpci_destroy_iommu(zdev);
 889                fallthrough;
 890        default:
 891                break;
 892        }
 893        zpci_dbg(3, "rem fid:%x\n", zdev->fid);
 894        kfree(zdev);
 895}
 896
 897int zpci_report_error(struct pci_dev *pdev,
 898                      struct zpci_report_error_header *report)
 899{
 900        struct zpci_dev *zdev = to_zpci(pdev);
 901
 902        return sclp_pci_report(report, zdev->fh, zdev->fid);
 903}
 904EXPORT_SYMBOL(zpci_report_error);
 905
 906static int zpci_mem_init(void)
 907{
 908        BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
 909                     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
 910
 911        zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
 912                                           __alignof__(struct zpci_fmb), 0, NULL);
 913        if (!zdev_fmb_cache)
 914                goto error_fmb;
 915
 916        zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
 917                                   sizeof(*zpci_iomap_start), GFP_KERNEL);
 918        if (!zpci_iomap_start)
 919                goto error_iomap;
 920
 921        zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
 922                                    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
 923        if (!zpci_iomap_bitmap)
 924                goto error_iomap_bitmap;
 925
 926        if (static_branch_likely(&have_mio))
 927                clp_setup_writeback_mio();
 928
 929        return 0;
 930error_iomap_bitmap:
 931        kfree(zpci_iomap_start);
 932error_iomap:
 933        kmem_cache_destroy(zdev_fmb_cache);
 934error_fmb:
 935        return -ENOMEM;
 936}
 937
 938static void zpci_mem_exit(void)
 939{
 940        kfree(zpci_iomap_bitmap);
 941        kfree(zpci_iomap_start);
 942        kmem_cache_destroy(zdev_fmb_cache);
 943}
 944
 945static unsigned int s390_pci_probe __initdata = 1;
 946unsigned int s390_pci_force_floating __initdata;
 947static unsigned int s390_pci_initialized;
 948
 949char * __init pcibios_setup(char *str)
 950{
 951        if (!strcmp(str, "off")) {
 952                s390_pci_probe = 0;
 953                return NULL;
 954        }
 955        if (!strcmp(str, "nomio")) {
 956                S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
 957                return NULL;
 958        }
 959        if (!strcmp(str, "force_floating")) {
 960                s390_pci_force_floating = 1;
 961                return NULL;
 962        }
 963        if (!strcmp(str, "norid")) {
 964                s390_pci_no_rid = 1;
 965                return NULL;
 966        }
 967        return str;
 968}
 969
 970bool zpci_is_enabled(void)
 971{
 972        return s390_pci_initialized;
 973}
 974
 975static int __init pci_base_init(void)
 976{
 977        int rc;
 978
 979        if (!s390_pci_probe)
 980                return 0;
 981
 982        if (!test_facility(69) || !test_facility(71)) {
 983                pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
 984                return 0;
 985        }
 986
 987        if (MACHINE_HAS_PCI_MIO) {
 988                static_branch_enable(&have_mio);
 989                ctl_set_bit(2, 5);
 990        }
 991
 992        rc = zpci_debug_init();
 993        if (rc)
 994                goto out;
 995
 996        rc = zpci_mem_init();
 997        if (rc)
 998                goto out_mem;
 999
1000        rc = zpci_irq_init();
1001        if (rc)
1002                goto out_irq;
1003
1004        rc = zpci_dma_init();
1005        if (rc)
1006                goto out_dma;
1007
1008        rc = clp_scan_pci_devices();
1009        if (rc)
1010                goto out_find;
1011        zpci_bus_scan_busses();
1012
1013        s390_pci_initialized = 1;
1014        return 0;
1015
1016out_find:
1017        zpci_dma_exit();
1018out_dma:
1019        zpci_irq_exit();
1020out_irq:
1021        zpci_mem_exit();
1022out_mem:
1023        zpci_debug_exit();
1024out:
1025        return rc;
1026}
1027subsys_initcall_sync(pci_base_init);
1028