linux/arch/powerpc/platforms/pseries/eeh_pseries.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * The file intends to implement the platform dependent EEH operations on pseries.
   4 * Actually, the pseries platform is built based on RTAS heavily. That means the
   5 * pseries platform dependent EEH operations will be built on RTAS calls. The functions
   6 * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
   7 * been done.
   8 *
   9 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
  10 * Copyright IBM Corporation 2001, 2005, 2006
  11 * Copyright Dave Engebretsen & Todd Inglett 2001
  12 * Copyright Linas Vepstas 2005, 2006
  13 */
  14
  15#include <linux/atomic.h>
  16#include <linux/delay.h>
  17#include <linux/export.h>
  18#include <linux/init.h>
  19#include <linux/list.h>
  20#include <linux/of.h>
  21#include <linux/pci.h>
  22#include <linux/proc_fs.h>
  23#include <linux/rbtree.h>
  24#include <linux/sched.h>
  25#include <linux/seq_file.h>
  26#include <linux/spinlock.h>
  27#include <linux/crash_dump.h>
  28
  29#include <asm/eeh.h>
  30#include <asm/eeh_event.h>
  31#include <asm/io.h>
  32#include <asm/machdep.h>
  33#include <asm/ppc-pci.h>
  34#include <asm/rtas.h>
  35
  36/* RTAS tokens */
  37static int ibm_set_eeh_option;
  38static int ibm_set_slot_reset;
  39static int ibm_read_slot_reset_state;
  40static int ibm_read_slot_reset_state2;
  41static int ibm_slot_error_detail;
  42static int ibm_get_config_addr_info;
  43static int ibm_get_config_addr_info2;
  44static int ibm_configure_pe;
  45
  46void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
  47{
  48        struct pci_dn *pdn = pci_get_pdn(pdev);
  49
  50        if (eeh_has_flag(EEH_FORCE_DISABLED))
  51                return;
  52
  53        dev_dbg(&pdev->dev, "EEH: Setting up device\n");
  54#ifdef CONFIG_PCI_IOV
  55        if (pdev->is_virtfn) {
  56                pdn->device_id  =  pdev->device;
  57                pdn->vendor_id  =  pdev->vendor;
  58                pdn->class_code =  pdev->class;
  59                /*
  60                 * Last allow unfreeze return code used for retrieval
  61                 * by user space in eeh-sysfs to show the last command
  62                 * completion from platform.
  63                 */
  64                pdn->last_allow_rc =  0;
  65        }
  66#endif
  67        pseries_eeh_init_edev(pdn);
  68#ifdef CONFIG_PCI_IOV
  69        if (pdev->is_virtfn) {
  70                /*
  71                 * FIXME: This really should be handled by choosing the right
  72                 *        parent PE in in pseries_eeh_init_edev().
  73                 */
  74                struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe;
  75                struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
  76
  77                edev->pe_config_addr =  (pdn->busno << 16) | (pdn->devfn << 8);
  78                eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */
  79                eeh_pe_tree_insert(edev, physfn_pe);   /* Add as VF PE type */
  80        }
  81#endif
  82        eeh_probe_device(pdev);
  83}
  84
  85
  86/**
  87 * pseries_eeh_get_pe_config_addr - Find the pe_config_addr for a device
  88 * @pdn: pci_dn of the input device
  89 *
  90 * The EEH RTAS calls use a tuple consisting of: (buid_hi, buid_lo,
  91 * pe_config_addr) as a handle to a given PE. This function finds the
  92 * pe_config_addr based on the device's config addr.
  93 *
  94 * Keep in mind that the pe_config_addr *might* be numerically identical to the
  95 * device's config addr, but the two are conceptually distinct.
  96 *
  97 * Returns the pe_config_addr, or a negative error code.
  98 */
  99static int pseries_eeh_get_pe_config_addr(struct pci_dn *pdn)
 100{
 101        int config_addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
 102        struct pci_controller *phb = pdn->phb;
 103        int ret, rets[3];
 104
 105        if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
 106                /*
 107                 * First of all, use function 1 to determine if this device is
 108                 * part of a PE or not. ret[0] being zero indicates it's not.
 109                 */
 110                ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
 111                                config_addr, BUID_HI(phb->buid),
 112                                BUID_LO(phb->buid), 1);
 113                if (ret || (rets[0] == 0))
 114                        return -ENOENT;
 115
 116                /* Retrieve the associated PE config address with function 0 */
 117                ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
 118                                config_addr, BUID_HI(phb->buid),
 119                                BUID_LO(phb->buid), 0);
 120                if (ret) {
 121                        pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
 122                                __func__, phb->global_number, config_addr);
 123                        return -ENXIO;
 124                }
 125
 126                return rets[0];
 127        }
 128
 129        if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
 130                ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
 131                                config_addr, BUID_HI(phb->buid),
 132                                BUID_LO(phb->buid), 0);
 133                if (ret) {
 134                        pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
 135                                __func__, phb->global_number, config_addr);
 136                        return -ENXIO;
 137                }
 138
 139                return rets[0];
 140        }
 141
 142        /*
 143         * PAPR does describe a process for finding the pe_config_addr that was
 144         * used before the ibm,get-config-addr-info calls were added. However,
 145         * I haven't found *any* systems that don't have that RTAS call
 146         * implemented. If you happen to find one that needs the old DT based
 147         * process, patches are welcome!
 148         */
 149        return -ENOENT;
 150}
 151
 152/**
 153 * pseries_eeh_phb_reset - Reset the specified PHB
 154 * @phb: PCI controller
 155 * @config_adddr: the associated config address
 156 * @option: reset option
 157 *
 158 * Reset the specified PHB/PE
 159 */
 160static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, int option)
 161{
 162        int ret;
 163
 164        /* Reset PE through RTAS call */
 165        ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
 166                        config_addr, BUID_HI(phb->buid),
 167                        BUID_LO(phb->buid), option);
 168
 169        /* If fundamental-reset not supported, try hot-reset */
 170        if (option == EEH_RESET_FUNDAMENTAL && ret == -8) {
 171                option = EEH_RESET_HOT;
 172                ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
 173                                config_addr, BUID_HI(phb->buid),
 174                                BUID_LO(phb->buid), option);
 175        }
 176
 177        /* We need reset hold or settlement delay */
 178        if (option == EEH_RESET_FUNDAMENTAL || option == EEH_RESET_HOT)
 179                msleep(EEH_PE_RST_HOLD_TIME);
 180        else
 181                msleep(EEH_PE_RST_SETTLE_TIME);
 182
 183        return ret;
 184}
 185
 186/**
 187 * pseries_eeh_phb_configure_bridge - Configure PCI bridges in the indicated PE
 188 * @phb: PCI controller
 189 * @config_adddr: the associated config address
 190 *
 191 * The function will be called to reconfigure the bridges included
 192 * in the specified PE so that the mulfunctional PE would be recovered
 193 * again.
 194 */
 195static int pseries_eeh_phb_configure_bridge(struct pci_controller *phb, int config_addr)
 196{
 197        int ret;
 198        /* Waiting 0.2s maximum before skipping configuration */
 199        int max_wait = 200;
 200
 201        while (max_wait > 0) {
 202                ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
 203                                config_addr, BUID_HI(phb->buid),
 204                                BUID_LO(phb->buid));
 205
 206                if (!ret)
 207                        return ret;
 208                if (ret < 0)
 209                        break;
 210
 211                /*
 212                 * If RTAS returns a delay value that's above 100ms, cut it
 213                 * down to 100ms in case firmware made a mistake.  For more
 214                 * on how these delay values work see rtas_busy_delay_time
 215                 */
 216                if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
 217                    ret <= RTAS_EXTENDED_DELAY_MAX)
 218                        ret = RTAS_EXTENDED_DELAY_MIN+2;
 219
 220                max_wait -= rtas_busy_delay_time(ret);
 221
 222                if (max_wait < 0)
 223                        break;
 224
 225                rtas_busy_delay(ret);
 226        }
 227
 228        pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
 229                __func__, phb->global_number, config_addr, ret);
 230        /* PAPR defines -3 as "Parameter Error" for this function: */
 231        if (ret == -3)
 232                return -EINVAL;
 233        else
 234                return -EIO;
 235}
 236
 237/*
 238 * Buffer for reporting slot-error-detail rtas calls. Its here
 239 * in BSS, and not dynamically alloced, so that it ends up in
 240 * RMO where RTAS can access it.
 241 */
 242static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
 243static DEFINE_SPINLOCK(slot_errbuf_lock);
 244static int eeh_error_buf_size;
 245
 246static int pseries_eeh_cap_start(struct pci_dn *pdn)
 247{
 248        u32 status;
 249
 250        if (!pdn)
 251                return 0;
 252
 253        rtas_read_config(pdn, PCI_STATUS, 2, &status);
 254        if (!(status & PCI_STATUS_CAP_LIST))
 255                return 0;
 256
 257        return PCI_CAPABILITY_LIST;
 258}
 259
 260
 261static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
 262{
 263        int pos = pseries_eeh_cap_start(pdn);
 264        int cnt = 48;   /* Maximal number of capabilities */
 265        u32 id;
 266
 267        if (!pos)
 268                return 0;
 269
 270        while (cnt--) {
 271                rtas_read_config(pdn, pos, 1, &pos);
 272                if (pos < 0x40)
 273                        break;
 274                pos &= ~3;
 275                rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
 276                if (id == 0xff)
 277                        break;
 278                if (id == cap)
 279                        return pos;
 280                pos += PCI_CAP_LIST_NEXT;
 281        }
 282
 283        return 0;
 284}
 285
 286static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
 287{
 288        struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
 289        u32 header;
 290        int pos = 256;
 291        int ttl = (4096 - 256) / 8;
 292
 293        if (!edev || !edev->pcie_cap)
 294                return 0;
 295        if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
 296                return 0;
 297        else if (!header)
 298                return 0;
 299
 300        while (ttl-- > 0) {
 301                if (PCI_EXT_CAP_ID(header) == cap && pos)
 302                        return pos;
 303
 304                pos = PCI_EXT_CAP_NEXT(header);
 305                if (pos < 256)
 306                        break;
 307
 308                if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
 309                        break;
 310        }
 311
 312        return 0;
 313}
 314
 315/**
 316 * pseries_eeh_pe_get_parent - Retrieve the parent PE
 317 * @edev: EEH device
 318 *
 319 * The whole PEs existing in the system are organized as hierarchy
 320 * tree. The function is used to retrieve the parent PE according
 321 * to the parent EEH device.
 322 */
 323static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev)
 324{
 325        struct eeh_dev *parent;
 326        struct pci_dn *pdn = eeh_dev_to_pdn(edev);
 327
 328        /*
 329         * It might have the case for the indirect parent
 330         * EEH device already having associated PE, but
 331         * the direct parent EEH device doesn't have yet.
 332         */
 333        if (edev->physfn)
 334                pdn = pci_get_pdn(edev->physfn);
 335        else
 336                pdn = pdn ? pdn->parent : NULL;
 337        while (pdn) {
 338                /* We're poking out of PCI territory */
 339                parent = pdn_to_eeh_dev(pdn);
 340                if (!parent)
 341                        return NULL;
 342
 343                if (parent->pe)
 344                        return parent->pe;
 345
 346                pdn = pdn->parent;
 347        }
 348
 349        return NULL;
 350}
 351
 352/**
 353 * pseries_eeh_init_edev - initialise the eeh_dev and eeh_pe for a pci_dn
 354 *
 355 * @pdn: PCI device node
 356 *
 357 * When we discover a new PCI device via the device-tree we create a
 358 * corresponding pci_dn and we allocate, but don't initialise, an eeh_dev.
 359 * This function takes care of the initialisation and inserts the eeh_dev
 360 * into the correct eeh_pe. If no eeh_pe exists we'll allocate one.
 361 */
 362void pseries_eeh_init_edev(struct pci_dn *pdn)
 363{
 364        struct eeh_pe pe, *parent;
 365        struct eeh_dev *edev;
 366        u32 pcie_flags;
 367        int ret;
 368
 369        if (WARN_ON_ONCE(!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)))
 370                return;
 371
 372        /*
 373         * Find the eeh_dev for this pdn. The storage for the eeh_dev was
 374         * allocated at the same time as the pci_dn.
 375         *
 376         * XXX: We should probably re-visit that.
 377         */
 378        edev = pdn_to_eeh_dev(pdn);
 379        if (!edev)
 380                return;
 381
 382        /*
 383         * If ->pe is set then we've already probed this device. We hit
 384         * this path when a pci_dev is removed and rescanned while recovering
 385         * a PE (i.e. for devices where the driver doesn't support error
 386         * recovery).
 387         */
 388        if (edev->pe)
 389                return;
 390
 391        /* Check class/vendor/device IDs */
 392        if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
 393                return;
 394
 395        /* Skip for PCI-ISA bridge */
 396        if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
 397                return;
 398
 399        eeh_edev_dbg(edev, "Probing device\n");
 400
 401        /*
 402         * Update class code and mode of eeh device. We need
 403         * correctly reflects that current device is root port
 404         * or PCIe switch downstream port.
 405         */
 406        edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
 407        edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
 408        edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
 409        edev->mode &= 0xFFFFFF00;
 410        if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
 411                edev->mode |= EEH_DEV_BRIDGE;
 412                if (edev->pcie_cap) {
 413                        rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
 414                                         2, &pcie_flags);
 415                        pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
 416                        if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
 417                                edev->mode |= EEH_DEV_ROOT_PORT;
 418                        else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
 419                                edev->mode |= EEH_DEV_DS_PORT;
 420                }
 421        }
 422
 423        /* first up, find the pe_config_addr for the PE containing the device */
 424        ret = pseries_eeh_get_pe_config_addr(pdn);
 425        if (ret < 0) {
 426                eeh_edev_dbg(edev, "Unable to find pe_config_addr\n");
 427                goto err;
 428        }
 429
 430        /* Try enable EEH on the fake PE */
 431        memset(&pe, 0, sizeof(struct eeh_pe));
 432        pe.phb = pdn->phb;
 433        pe.addr = ret;
 434
 435        eeh_edev_dbg(edev, "Enabling EEH on device\n");
 436        ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
 437        if (ret) {
 438                eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret);
 439                goto err;
 440        }
 441
 442        edev->pe_config_addr = pe.addr;
 443
 444        eeh_add_flag(EEH_ENABLED);
 445
 446        parent = pseries_eeh_pe_get_parent(edev);
 447        eeh_pe_tree_insert(edev, parent);
 448        eeh_save_bars(edev);
 449        eeh_edev_dbg(edev, "EEH enabled for device");
 450
 451        return;
 452
 453err:
 454        eeh_edev_dbg(edev, "EEH is unsupported on device (code = %d)\n", ret);
 455}
 456
 457static struct eeh_dev *pseries_eeh_probe(struct pci_dev *pdev)
 458{
 459        struct eeh_dev *edev;
 460        struct pci_dn *pdn;
 461
 462        pdn = pci_get_pdn_by_devfn(pdev->bus, pdev->devfn);
 463        if (!pdn)
 464                return NULL;
 465
 466        /*
 467         * If the system supports EEH on this device then the eeh_dev was
 468         * configured and inserted into a PE in pseries_eeh_init_edev()
 469         */
 470        edev = pdn_to_eeh_dev(pdn);
 471        if (!edev || !edev->pe)
 472                return NULL;
 473
 474        return edev;
 475}
 476
 477/**
 478 * pseries_eeh_init_edev_recursive - Enable EEH for the indicated device
 479 * @pdn: PCI device node
 480 *
 481 * This routine must be used to perform EEH initialization for the
 482 * indicated PCI device that was added after system boot (e.g.
 483 * hotplug, dlpar).
 484 */
 485void pseries_eeh_init_edev_recursive(struct pci_dn *pdn)
 486{
 487        struct pci_dn *n;
 488
 489        if (!pdn)
 490                return;
 491
 492        list_for_each_entry(n, &pdn->child_list, list)
 493                pseries_eeh_init_edev_recursive(n);
 494
 495        pseries_eeh_init_edev(pdn);
 496}
 497EXPORT_SYMBOL_GPL(pseries_eeh_init_edev_recursive);
 498
 499/**
 500 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
 501 * @pe: EEH PE
 502 * @option: operation to be issued
 503 *
 504 * The function is used to control the EEH functionality globally.
 505 * Currently, following options are support according to PAPR:
 506 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
 507 */
 508static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
 509{
 510        int ret = 0;
 511
 512        /*
 513         * When we're enabling or disabling EEH functioality on
 514         * the particular PE, the PE config address is possibly
 515         * unavailable. Therefore, we have to figure it out from
 516         * the FDT node.
 517         */
 518        switch (option) {
 519        case EEH_OPT_DISABLE:
 520        case EEH_OPT_ENABLE:
 521        case EEH_OPT_THAW_MMIO:
 522        case EEH_OPT_THAW_DMA:
 523                break;
 524        case EEH_OPT_FREEZE_PE:
 525                /* Not support */
 526                return 0;
 527        default:
 528                pr_err("%s: Invalid option %d\n", __func__, option);
 529                return -EINVAL;
 530        }
 531
 532        ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
 533                        pe->addr, BUID_HI(pe->phb->buid),
 534                        BUID_LO(pe->phb->buid), option);
 535
 536        return ret;
 537}
 538
 539/**
 540 * pseries_eeh_get_state - Retrieve PE state
 541 * @pe: EEH PE
 542 * @delay: suggested time to wait if state is unavailable
 543 *
 544 * Retrieve the state of the specified PE. On RTAS compliant
 545 * pseries platform, there already has one dedicated RTAS function
 546 * for the purpose. It's notable that the associated PE config address
 547 * might be ready when calling the function. Therefore, endeavour to
 548 * use the PE config address if possible. Further more, there're 2
 549 * RTAS calls for the purpose, we need to try the new one and back
 550 * to the old one if the new one couldn't work properly.
 551 */
 552static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
 553{
 554        int ret;
 555        int rets[4];
 556        int result;
 557
 558        if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
 559                ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
 560                                pe->addr, BUID_HI(pe->phb->buid),
 561                                BUID_LO(pe->phb->buid));
 562        } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
 563                /* Fake PE unavailable info */
 564                rets[2] = 0;
 565                ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
 566                                pe->addr, BUID_HI(pe->phb->buid),
 567                                BUID_LO(pe->phb->buid));
 568        } else {
 569                return EEH_STATE_NOT_SUPPORT;
 570        }
 571
 572        if (ret)
 573                return ret;
 574
 575        /* Parse the result out */
 576        if (!rets[1])
 577                return EEH_STATE_NOT_SUPPORT;
 578
 579        switch(rets[0]) {
 580        case 0:
 581                result = EEH_STATE_MMIO_ACTIVE |
 582                         EEH_STATE_DMA_ACTIVE;
 583                break;
 584        case 1:
 585                result = EEH_STATE_RESET_ACTIVE |
 586                         EEH_STATE_MMIO_ACTIVE  |
 587                         EEH_STATE_DMA_ACTIVE;
 588                break;
 589        case 2:
 590                result = 0;
 591                break;
 592        case 4:
 593                result = EEH_STATE_MMIO_ENABLED;
 594                break;
 595        case 5:
 596                if (rets[2]) {
 597                        if (delay)
 598                                *delay = rets[2];
 599                        result = EEH_STATE_UNAVAILABLE;
 600                } else {
 601                        result = EEH_STATE_NOT_SUPPORT;
 602                }
 603                break;
 604        default:
 605                result = EEH_STATE_NOT_SUPPORT;
 606        }
 607
 608        return result;
 609}
 610
 611/**
 612 * pseries_eeh_reset - Reset the specified PE
 613 * @pe: EEH PE
 614 * @option: reset option
 615 *
 616 * Reset the specified PE
 617 */
 618static int pseries_eeh_reset(struct eeh_pe *pe, int option)
 619{
 620        return pseries_eeh_phb_reset(pe->phb, pe->addr, option);
 621}
 622
 623/**
 624 * pseries_eeh_get_log - Retrieve error log
 625 * @pe: EEH PE
 626 * @severity: temporary or permanent error log
 627 * @drv_log: driver log to be combined with retrieved error log
 628 * @len: length of driver log
 629 *
 630 * Retrieve the temporary or permanent error from the PE.
 631 * Actually, the error will be retrieved through the dedicated
 632 * RTAS call.
 633 */
 634static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
 635{
 636        unsigned long flags;
 637        int ret;
 638
 639        spin_lock_irqsave(&slot_errbuf_lock, flags);
 640        memset(slot_errbuf, 0, eeh_error_buf_size);
 641
 642        ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, pe->addr,
 643                        BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
 644                        virt_to_phys(drv_log), len,
 645                        virt_to_phys(slot_errbuf), eeh_error_buf_size,
 646                        severity);
 647        if (!ret)
 648                log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
 649        spin_unlock_irqrestore(&slot_errbuf_lock, flags);
 650
 651        return ret;
 652}
 653
 654/**
 655 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
 656 * @pe: EEH PE
 657 *
 658 */
 659static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
 660{
 661        return pseries_eeh_phb_configure_bridge(pe->phb, pe->addr);
 662}
 663
 664/**
 665 * pseries_eeh_read_config - Read PCI config space
 666 * @edev: EEH device handle
 667 * @where: PCI config space offset
 668 * @size: size to read
 669 * @val: return value
 670 *
 671 * Read config space from the speicifed device
 672 */
 673static int pseries_eeh_read_config(struct eeh_dev *edev, int where, int size, u32 *val)
 674{
 675        struct pci_dn *pdn = eeh_dev_to_pdn(edev);
 676
 677        return rtas_read_config(pdn, where, size, val);
 678}
 679
 680/**
 681 * pseries_eeh_write_config - Write PCI config space
 682 * @edev: EEH device handle
 683 * @where: PCI config space offset
 684 * @size: size to write
 685 * @val: value to be written
 686 *
 687 * Write config space to the specified device
 688 */
 689static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u32 val)
 690{
 691        struct pci_dn *pdn = eeh_dev_to_pdn(edev);
 692
 693        return rtas_write_config(pdn, where, size, val);
 694}
 695
 696#ifdef CONFIG_PCI_IOV
 697int pseries_send_allow_unfreeze(struct pci_dn *pdn,
 698                                u16 *vf_pe_array, int cur_vfs)
 699{
 700        int rc;
 701        int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze");
 702        unsigned long buid, addr;
 703
 704        addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
 705        buid = pdn->phb->buid;
 706        spin_lock(&rtas_data_buf_lock);
 707        memcpy(rtas_data_buf, vf_pe_array, RTAS_DATA_BUF_SIZE);
 708        rc = rtas_call(ibm_allow_unfreeze, 5, 1, NULL,
 709                       addr,
 710                       BUID_HI(buid),
 711                       BUID_LO(buid),
 712                       rtas_data_buf, cur_vfs * sizeof(u16));
 713        spin_unlock(&rtas_data_buf_lock);
 714        if (rc)
 715                pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n",
 716                        __func__,
 717                        pdn->phb->global_number, addr, rc);
 718        return rc;
 719}
 720
 721static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
 722{
 723        int cur_vfs = 0, rc = 0, vf_index, bus, devfn, vf_pe_num;
 724        struct pci_dn *pdn, *tmp, *parent, *physfn_pdn;
 725        u16 *vf_pe_array;
 726
 727        vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
 728        if (!vf_pe_array)
 729                return -ENOMEM;
 730        if (pci_num_vf(edev->physfn ? edev->physfn : edev->pdev)) {
 731                if (edev->pdev->is_physfn) {
 732                        cur_vfs = pci_num_vf(edev->pdev);
 733                        pdn = eeh_dev_to_pdn(edev);
 734                        parent = pdn->parent;
 735                        for (vf_index = 0; vf_index < cur_vfs; vf_index++)
 736                                vf_pe_array[vf_index] =
 737                                        cpu_to_be16(pdn->pe_num_map[vf_index]);
 738                        rc = pseries_send_allow_unfreeze(pdn, vf_pe_array,
 739                                                         cur_vfs);
 740                        pdn->last_allow_rc = rc;
 741                        for (vf_index = 0; vf_index < cur_vfs; vf_index++) {
 742                                list_for_each_entry_safe(pdn, tmp,
 743                                                         &parent->child_list,
 744                                                         list) {
 745                                        bus = pci_iov_virtfn_bus(edev->pdev,
 746                                                                 vf_index);
 747                                        devfn = pci_iov_virtfn_devfn(edev->pdev,
 748                                                                     vf_index);
 749                                        if (pdn->busno != bus ||
 750                                            pdn->devfn != devfn)
 751                                                continue;
 752                                        pdn->last_allow_rc = rc;
 753                                }
 754                        }
 755                } else {
 756                        pdn = pci_get_pdn(edev->pdev);
 757                        physfn_pdn = pci_get_pdn(edev->physfn);
 758
 759                        vf_pe_num = physfn_pdn->pe_num_map[edev->vf_index];
 760                        vf_pe_array[0] = cpu_to_be16(vf_pe_num);
 761                        rc = pseries_send_allow_unfreeze(physfn_pdn,
 762                                                         vf_pe_array, 1);
 763                        pdn->last_allow_rc = rc;
 764                }
 765        }
 766
 767        kfree(vf_pe_array);
 768        return rc;
 769}
 770
 771static int pseries_notify_resume(struct eeh_dev *edev)
 772{
 773        if (!edev)
 774                return -EEXIST;
 775
 776        if (rtas_token("ibm,open-sriov-allow-unfreeze") == RTAS_UNKNOWN_SERVICE)
 777                return -EINVAL;
 778
 779        if (edev->pdev->is_physfn || edev->pdev->is_virtfn)
 780                return pseries_call_allow_unfreeze(edev);
 781
 782        return 0;
 783}
 784#endif
 785
 786static struct eeh_ops pseries_eeh_ops = {
 787        .name                   = "pseries",
 788        .probe                  = pseries_eeh_probe,
 789        .set_option             = pseries_eeh_set_option,
 790        .get_state              = pseries_eeh_get_state,
 791        .reset                  = pseries_eeh_reset,
 792        .get_log                = pseries_eeh_get_log,
 793        .configure_bridge       = pseries_eeh_configure_bridge,
 794        .err_inject             = NULL,
 795        .read_config            = pseries_eeh_read_config,
 796        .write_config           = pseries_eeh_write_config,
 797        .next_error             = NULL,
 798        .restore_config         = NULL, /* NB: configure_bridge() does this */
 799#ifdef CONFIG_PCI_IOV
 800        .notify_resume          = pseries_notify_resume
 801#endif
 802};
 803
 804/**
 805 * eeh_pseries_init - Register platform dependent EEH operations
 806 *
 807 * EEH initialization on pseries platform. This function should be
 808 * called before any EEH related functions.
 809 */
 810static int __init eeh_pseries_init(void)
 811{
 812        struct pci_controller *phb;
 813        struct pci_dn *pdn;
 814        int ret, config_addr;
 815
 816        /* figure out EEH RTAS function call tokens */
 817        ibm_set_eeh_option              = rtas_token("ibm,set-eeh-option");
 818        ibm_set_slot_reset              = rtas_token("ibm,set-slot-reset");
 819        ibm_read_slot_reset_state2      = rtas_token("ibm,read-slot-reset-state2");
 820        ibm_read_slot_reset_state       = rtas_token("ibm,read-slot-reset-state");
 821        ibm_slot_error_detail           = rtas_token("ibm,slot-error-detail");
 822        ibm_get_config_addr_info2       = rtas_token("ibm,get-config-addr-info2");
 823        ibm_get_config_addr_info        = rtas_token("ibm,get-config-addr-info");
 824        ibm_configure_pe                = rtas_token("ibm,configure-pe");
 825
 826        /*
 827         * ibm,configure-pe and ibm,configure-bridge have the same semantics,
 828         * however ibm,configure-pe can be faster.  If we can't find
 829         * ibm,configure-pe then fall back to using ibm,configure-bridge.
 830         */
 831        if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
 832                ibm_configure_pe        = rtas_token("ibm,configure-bridge");
 833
 834        /*
 835         * Necessary sanity check. We needn't check "get-config-addr-info"
 836         * and its variant since the old firmware probably support address
 837         * of domain/bus/slot/function for EEH RTAS operations.
 838         */
 839        if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE          ||
 840            ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE          ||
 841            (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
 842             ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
 843            ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE       ||
 844            ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
 845                pr_info("EEH functionality not supported\n");
 846                return -EINVAL;
 847        }
 848
 849        /* Initialize error log lock and size */
 850        spin_lock_init(&slot_errbuf_lock);
 851        eeh_error_buf_size = rtas_token("rtas-error-log-max");
 852        if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
 853                pr_info("%s: unknown EEH error log size\n",
 854                        __func__);
 855                eeh_error_buf_size = 1024;
 856        } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
 857                pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
 858                        __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
 859                eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
 860        }
 861
 862        /* Set EEH probe mode */
 863        eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
 864
 865        /* Set EEH machine dependent code */
 866        ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device;
 867
 868        if (is_kdump_kernel() || reset_devices) {
 869                pr_info("Issue PHB reset ...\n");
 870                list_for_each_entry(phb, &hose_list, list_node) {
 871                        pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
 872                        config_addr = pseries_eeh_get_pe_config_addr(pdn);
 873
 874                        /* invalid PE config addr */
 875                        if (config_addr < 0)
 876                                continue;
 877
 878                        pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_FUNDAMENTAL);
 879                        pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_DEACTIVATE);
 880                        pseries_eeh_phb_configure_bridge(phb, config_addr);
 881                }
 882        }
 883
 884        ret = eeh_init(&pseries_eeh_ops);
 885        if (!ret)
 886                pr_info("EEH: pSeries platform initialized\n");
 887        else
 888                pr_info("EEH: pSeries platform initialization failure (%d)\n",
 889                        ret);
 890        return ret;
 891}
 892machine_arch_initcall(pseries, eeh_pseries_init);
 893