linux/arch/powerpc/platforms/powernv/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Support PCI/PCIe on PowerNV platforms
   4 *
   5 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/pci.h>
  10#include <linux/delay.h>
  11#include <linux/string.h>
  12#include <linux/init.h>
  13#include <linux/irq.h>
  14#include <linux/io.h>
  15#include <linux/msi.h>
  16#include <linux/iommu.h>
  17#include <linux/sched/mm.h>
  18
  19#include <asm/sections.h>
  20#include <asm/io.h>
  21#include <asm/prom.h>
  22#include <asm/pci-bridge.h>
  23#include <asm/machdep.h>
  24#include <asm/msi_bitmap.h>
  25#include <asm/ppc-pci.h>
  26#include <asm/pnv-pci.h>
  27#include <asm/opal.h>
  28#include <asm/iommu.h>
  29#include <asm/tce.h>
  30#include <asm/firmware.h>
  31#include <asm/eeh_event.h>
  32#include <asm/eeh.h>
  33
  34#include "powernv.h"
  35#include "pci.h"
  36
  37static DEFINE_MUTEX(tunnel_mutex);
  38
  39int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
  40{
  41        struct device_node *node = np;
  42        u32 bdfn;
  43        u64 phbid;
  44        int ret;
  45
  46        ret = of_property_read_u32(np, "reg", &bdfn);
  47        if (ret)
  48                return -ENXIO;
  49
  50        bdfn = ((bdfn & 0x00ffff00) >> 8);
  51        for (node = np; node; node = of_get_parent(node)) {
  52                if (!PCI_DN(node)) {
  53                        of_node_put(node);
  54                        break;
  55                }
  56
  57                if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
  58                    !of_device_is_compatible(node, "ibm,ioda3-phb") &&
  59                    !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
  60                        of_node_put(node);
  61                        continue;
  62                }
  63
  64                ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
  65                if (ret) {
  66                        of_node_put(node);
  67                        return -ENXIO;
  68                }
  69
  70                if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
  71                        *id = PCI_PHB_SLOT_ID(phbid);
  72                else
  73                        *id = PCI_SLOT_ID(phbid, bdfn);
  74                return 0;
  75        }
  76
  77        return -ENODEV;
  78}
  79EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
  80
  81int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
  82{
  83        int64_t rc;
  84
  85        if (!opal_check_token(OPAL_GET_DEVICE_TREE))
  86                return -ENXIO;
  87
  88        rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
  89        if (rc < OPAL_SUCCESS)
  90                return -EIO;
  91
  92        return rc;
  93}
  94EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
  95
  96int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
  97{
  98        int64_t rc;
  99
 100        if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
 101                return -ENXIO;
 102
 103        rc = opal_pci_get_presence_state(id, (uint64_t)state);
 104        if (rc != OPAL_SUCCESS)
 105                return -EIO;
 106
 107        return 0;
 108}
 109EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
 110
 111int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
 112{
 113        int64_t rc;
 114
 115        if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
 116                return -ENXIO;
 117
 118        rc = opal_pci_get_power_state(id, (uint64_t)state);
 119        if (rc != OPAL_SUCCESS)
 120                return -EIO;
 121
 122        return 0;
 123}
 124EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
 125
 126int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
 127{
 128        struct opal_msg m;
 129        int token, ret;
 130        int64_t rc;
 131
 132        if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
 133                return -ENXIO;
 134
 135        token = opal_async_get_token_interruptible();
 136        if (unlikely(token < 0))
 137                return token;
 138
 139        rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
 140        if (rc == OPAL_SUCCESS) {
 141                ret = 0;
 142                goto exit;
 143        } else if (rc != OPAL_ASYNC_COMPLETION) {
 144                ret = -EIO;
 145                goto exit;
 146        }
 147
 148        ret = opal_async_wait_response(token, &m);
 149        if (ret < 0)
 150                goto exit;
 151
 152        if (msg) {
 153                ret = 1;
 154                memcpy(msg, &m, sizeof(m));
 155        }
 156
 157exit:
 158        opal_async_release_token(token);
 159        return ret;
 160}
 161EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
 162
 163int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 164{
 165        struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
 166        struct msi_desc *entry;
 167        struct msi_msg msg;
 168        int hwirq;
 169        unsigned int virq;
 170        int rc;
 171
 172        if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
 173                return -ENODEV;
 174
 175        if (pdev->no_64bit_msi && !phb->msi32_support)
 176                return -ENODEV;
 177
 178        for_each_pci_msi_entry(entry, pdev) {
 179                if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
 180                        pr_warn("%s: Supports only 64-bit MSIs\n",
 181                                pci_name(pdev));
 182                        return -ENXIO;
 183                }
 184                hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
 185                if (hwirq < 0) {
 186                        pr_warn("%s: Failed to find a free MSI\n",
 187                                pci_name(pdev));
 188                        return -ENOSPC;
 189                }
 190                virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
 191                if (!virq) {
 192                        pr_warn("%s: Failed to map MSI to linux irq\n",
 193                                pci_name(pdev));
 194                        msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
 195                        return -ENOMEM;
 196                }
 197                rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
 198                                    virq, entry->msi_attrib.is_64, &msg);
 199                if (rc) {
 200                        pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
 201                        irq_dispose_mapping(virq);
 202                        msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
 203                        return rc;
 204                }
 205                irq_set_msi_desc(virq, entry);
 206                pci_write_msi_msg(virq, &msg);
 207        }
 208        return 0;
 209}
 210
 211void pnv_teardown_msi_irqs(struct pci_dev *pdev)
 212{
 213        struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
 214        struct msi_desc *entry;
 215        irq_hw_number_t hwirq;
 216
 217        if (WARN_ON(!phb))
 218                return;
 219
 220        for_each_pci_msi_entry(entry, pdev) {
 221                if (!entry->irq)
 222                        continue;
 223                hwirq = virq_to_hw(entry->irq);
 224                irq_set_msi_desc(entry->irq, NULL);
 225                irq_dispose_mapping(entry->irq);
 226                msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
 227        }
 228}
 229
 230/* Nicely print the contents of the PE State Tables (PEST). */
 231static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
 232{
 233        __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
 234        bool dup = false;
 235        int i;
 236
 237        for (i = 0; i < pest_size; i++) {
 238                __be64 peA = be64_to_cpu(pestA[i]);
 239                __be64 peB = be64_to_cpu(pestB[i]);
 240
 241                if (peA != prevA || peB != prevB) {
 242                        if (dup) {
 243                                pr_info("PE[..%03x] A/B: as above\n", i-1);
 244                                dup = false;
 245                        }
 246                        prevA = peA;
 247                        prevB = peB;
 248                        if (peA & PNV_IODA_STOPPED_STATE ||
 249                            peB & PNV_IODA_STOPPED_STATE)
 250                                pr_info("PE[%03x] A/B: %016llx %016llx\n",
 251                                        i, peA, peB);
 252                } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
 253                                    peB & PNV_IODA_STOPPED_STATE)) {
 254                        dup = true;
 255                }
 256        }
 257}
 258
 259static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
 260                                         struct OpalIoPhbErrorCommon *common)
 261{
 262        struct OpalIoP7IOCPhbErrorData *data;
 263
 264        data = (struct OpalIoP7IOCPhbErrorData *)common;
 265        pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
 266                hose->global_number, be32_to_cpu(common->version));
 267
 268        if (data->brdgCtl)
 269                pr_info("brdgCtl:     %08x\n",
 270                        be32_to_cpu(data->brdgCtl));
 271        if (data->portStatusReg || data->rootCmplxStatus ||
 272            data->busAgentStatus)
 273                pr_info("UtlSts:      %08x %08x %08x\n",
 274                        be32_to_cpu(data->portStatusReg),
 275                        be32_to_cpu(data->rootCmplxStatus),
 276                        be32_to_cpu(data->busAgentStatus));
 277        if (data->deviceStatus || data->slotStatus   ||
 278            data->linkStatus   || data->devCmdStatus ||
 279            data->devSecStatus)
 280                pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
 281                        be32_to_cpu(data->deviceStatus),
 282                        be32_to_cpu(data->slotStatus),
 283                        be32_to_cpu(data->linkStatus),
 284                        be32_to_cpu(data->devCmdStatus),
 285                        be32_to_cpu(data->devSecStatus));
 286        if (data->rootErrorStatus   || data->uncorrErrorStatus ||
 287            data->corrErrorStatus)
 288                pr_info("RootErrSts:  %08x %08x %08x\n",
 289                        be32_to_cpu(data->rootErrorStatus),
 290                        be32_to_cpu(data->uncorrErrorStatus),
 291                        be32_to_cpu(data->corrErrorStatus));
 292        if (data->tlpHdr1 || data->tlpHdr2 ||
 293            data->tlpHdr3 || data->tlpHdr4)
 294                pr_info("RootErrLog:  %08x %08x %08x %08x\n",
 295                        be32_to_cpu(data->tlpHdr1),
 296                        be32_to_cpu(data->tlpHdr2),
 297                        be32_to_cpu(data->tlpHdr3),
 298                        be32_to_cpu(data->tlpHdr4));
 299        if (data->sourceId || data->errorClass ||
 300            data->correlator)
 301                pr_info("RootErrLog1: %08x %016llx %016llx\n",
 302                        be32_to_cpu(data->sourceId),
 303                        be64_to_cpu(data->errorClass),
 304                        be64_to_cpu(data->correlator));
 305        if (data->p7iocPlssr || data->p7iocCsr)
 306                pr_info("PhbSts:      %016llx %016llx\n",
 307                        be64_to_cpu(data->p7iocPlssr),
 308                        be64_to_cpu(data->p7iocCsr));
 309        if (data->lemFir)
 310                pr_info("Lem:         %016llx %016llx %016llx\n",
 311                        be64_to_cpu(data->lemFir),
 312                        be64_to_cpu(data->lemErrorMask),
 313                        be64_to_cpu(data->lemWOF));
 314        if (data->phbErrorStatus)
 315                pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
 316                        be64_to_cpu(data->phbErrorStatus),
 317                        be64_to_cpu(data->phbFirstErrorStatus),
 318                        be64_to_cpu(data->phbErrorLog0),
 319                        be64_to_cpu(data->phbErrorLog1));
 320        if (data->mmioErrorStatus)
 321                pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
 322                        be64_to_cpu(data->mmioErrorStatus),
 323                        be64_to_cpu(data->mmioFirstErrorStatus),
 324                        be64_to_cpu(data->mmioErrorLog0),
 325                        be64_to_cpu(data->mmioErrorLog1));
 326        if (data->dma0ErrorStatus)
 327                pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
 328                        be64_to_cpu(data->dma0ErrorStatus),
 329                        be64_to_cpu(data->dma0FirstErrorStatus),
 330                        be64_to_cpu(data->dma0ErrorLog0),
 331                        be64_to_cpu(data->dma0ErrorLog1));
 332        if (data->dma1ErrorStatus)
 333                pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
 334                        be64_to_cpu(data->dma1ErrorStatus),
 335                        be64_to_cpu(data->dma1FirstErrorStatus),
 336                        be64_to_cpu(data->dma1ErrorLog0),
 337                        be64_to_cpu(data->dma1ErrorLog1));
 338
 339        pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
 340}
 341
 342static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
 343                                        struct OpalIoPhbErrorCommon *common)
 344{
 345        struct OpalIoPhb3ErrorData *data;
 346
 347        data = (struct OpalIoPhb3ErrorData*)common;
 348        pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
 349                hose->global_number, be32_to_cpu(common->version));
 350        if (data->brdgCtl)
 351                pr_info("brdgCtl:     %08x\n",
 352                        be32_to_cpu(data->brdgCtl));
 353        if (data->portStatusReg || data->rootCmplxStatus ||
 354            data->busAgentStatus)
 355                pr_info("UtlSts:      %08x %08x %08x\n",
 356                        be32_to_cpu(data->portStatusReg),
 357                        be32_to_cpu(data->rootCmplxStatus),
 358                        be32_to_cpu(data->busAgentStatus));
 359        if (data->deviceStatus || data->slotStatus   ||
 360            data->linkStatus   || data->devCmdStatus ||
 361            data->devSecStatus)
 362                pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
 363                        be32_to_cpu(data->deviceStatus),
 364                        be32_to_cpu(data->slotStatus),
 365                        be32_to_cpu(data->linkStatus),
 366                        be32_to_cpu(data->devCmdStatus),
 367                        be32_to_cpu(data->devSecStatus));
 368        if (data->rootErrorStatus || data->uncorrErrorStatus ||
 369            data->corrErrorStatus)
 370                pr_info("RootErrSts:  %08x %08x %08x\n",
 371                        be32_to_cpu(data->rootErrorStatus),
 372                        be32_to_cpu(data->uncorrErrorStatus),
 373                        be32_to_cpu(data->corrErrorStatus));
 374        if (data->tlpHdr1 || data->tlpHdr2 ||
 375            data->tlpHdr3 || data->tlpHdr4)
 376                pr_info("RootErrLog:  %08x %08x %08x %08x\n",
 377                        be32_to_cpu(data->tlpHdr1),
 378                        be32_to_cpu(data->tlpHdr2),
 379                        be32_to_cpu(data->tlpHdr3),
 380                        be32_to_cpu(data->tlpHdr4));
 381        if (data->sourceId || data->errorClass ||
 382            data->correlator)
 383                pr_info("RootErrLog1: %08x %016llx %016llx\n",
 384                        be32_to_cpu(data->sourceId),
 385                        be64_to_cpu(data->errorClass),
 386                        be64_to_cpu(data->correlator));
 387        if (data->nFir)
 388                pr_info("nFir:        %016llx %016llx %016llx\n",
 389                        be64_to_cpu(data->nFir),
 390                        be64_to_cpu(data->nFirMask),
 391                        be64_to_cpu(data->nFirWOF));
 392        if (data->phbPlssr || data->phbCsr)
 393                pr_info("PhbSts:      %016llx %016llx\n",
 394                        be64_to_cpu(data->phbPlssr),
 395                        be64_to_cpu(data->phbCsr));
 396        if (data->lemFir)
 397                pr_info("Lem:         %016llx %016llx %016llx\n",
 398                        be64_to_cpu(data->lemFir),
 399                        be64_to_cpu(data->lemErrorMask),
 400                        be64_to_cpu(data->lemWOF));
 401        if (data->phbErrorStatus)
 402                pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
 403                        be64_to_cpu(data->phbErrorStatus),
 404                        be64_to_cpu(data->phbFirstErrorStatus),
 405                        be64_to_cpu(data->phbErrorLog0),
 406                        be64_to_cpu(data->phbErrorLog1));
 407        if (data->mmioErrorStatus)
 408                pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
 409                        be64_to_cpu(data->mmioErrorStatus),
 410                        be64_to_cpu(data->mmioFirstErrorStatus),
 411                        be64_to_cpu(data->mmioErrorLog0),
 412                        be64_to_cpu(data->mmioErrorLog1));
 413        if (data->dma0ErrorStatus)
 414                pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
 415                        be64_to_cpu(data->dma0ErrorStatus),
 416                        be64_to_cpu(data->dma0FirstErrorStatus),
 417                        be64_to_cpu(data->dma0ErrorLog0),
 418                        be64_to_cpu(data->dma0ErrorLog1));
 419        if (data->dma1ErrorStatus)
 420                pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
 421                        be64_to_cpu(data->dma1ErrorStatus),
 422                        be64_to_cpu(data->dma1FirstErrorStatus),
 423                        be64_to_cpu(data->dma1ErrorLog0),
 424                        be64_to_cpu(data->dma1ErrorLog1));
 425
 426        pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
 427}
 428
 429static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
 430                                        struct OpalIoPhbErrorCommon *common)
 431{
 432        struct OpalIoPhb4ErrorData *data;
 433
 434        data = (struct OpalIoPhb4ErrorData*)common;
 435        pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
 436                hose->global_number, be32_to_cpu(common->version));
 437        if (data->brdgCtl)
 438                pr_info("brdgCtl:    %08x\n",
 439                        be32_to_cpu(data->brdgCtl));
 440        if (data->deviceStatus || data->slotStatus   ||
 441            data->linkStatus   || data->devCmdStatus ||
 442            data->devSecStatus)
 443                pr_info("RootSts:    %08x %08x %08x %08x %08x\n",
 444                        be32_to_cpu(data->deviceStatus),
 445                        be32_to_cpu(data->slotStatus),
 446                        be32_to_cpu(data->linkStatus),
 447                        be32_to_cpu(data->devCmdStatus),
 448                        be32_to_cpu(data->devSecStatus));
 449        if (data->rootErrorStatus || data->uncorrErrorStatus ||
 450            data->corrErrorStatus)
 451                pr_info("RootErrSts: %08x %08x %08x\n",
 452                        be32_to_cpu(data->rootErrorStatus),
 453                        be32_to_cpu(data->uncorrErrorStatus),
 454                        be32_to_cpu(data->corrErrorStatus));
 455        if (data->tlpHdr1 || data->tlpHdr2 ||
 456            data->tlpHdr3 || data->tlpHdr4)
 457                pr_info("RootErrLog: %08x %08x %08x %08x\n",
 458                        be32_to_cpu(data->tlpHdr1),
 459                        be32_to_cpu(data->tlpHdr2),
 460                        be32_to_cpu(data->tlpHdr3),
 461                        be32_to_cpu(data->tlpHdr4));
 462        if (data->sourceId)
 463                pr_info("sourceId:   %08x\n", be32_to_cpu(data->sourceId));
 464        if (data->nFir)
 465                pr_info("nFir:       %016llx %016llx %016llx\n",
 466                        be64_to_cpu(data->nFir),
 467                        be64_to_cpu(data->nFirMask),
 468                        be64_to_cpu(data->nFirWOF));
 469        if (data->phbPlssr || data->phbCsr)
 470                pr_info("PhbSts:     %016llx %016llx\n",
 471                        be64_to_cpu(data->phbPlssr),
 472                        be64_to_cpu(data->phbCsr));
 473        if (data->lemFir)
 474                pr_info("Lem:        %016llx %016llx %016llx\n",
 475                        be64_to_cpu(data->lemFir),
 476                        be64_to_cpu(data->lemErrorMask),
 477                        be64_to_cpu(data->lemWOF));
 478        if (data->phbErrorStatus)
 479                pr_info("PhbErr:     %016llx %016llx %016llx %016llx\n",
 480                        be64_to_cpu(data->phbErrorStatus),
 481                        be64_to_cpu(data->phbFirstErrorStatus),
 482                        be64_to_cpu(data->phbErrorLog0),
 483                        be64_to_cpu(data->phbErrorLog1));
 484        if (data->phbTxeErrorStatus)
 485                pr_info("PhbTxeErr:  %016llx %016llx %016llx %016llx\n",
 486                        be64_to_cpu(data->phbTxeErrorStatus),
 487                        be64_to_cpu(data->phbTxeFirstErrorStatus),
 488                        be64_to_cpu(data->phbTxeErrorLog0),
 489                        be64_to_cpu(data->phbTxeErrorLog1));
 490        if (data->phbRxeArbErrorStatus)
 491                pr_info("RxeArbErr:  %016llx %016llx %016llx %016llx\n",
 492                        be64_to_cpu(data->phbRxeArbErrorStatus),
 493                        be64_to_cpu(data->phbRxeArbFirstErrorStatus),
 494                        be64_to_cpu(data->phbRxeArbErrorLog0),
 495                        be64_to_cpu(data->phbRxeArbErrorLog1));
 496        if (data->phbRxeMrgErrorStatus)
 497                pr_info("RxeMrgErr:  %016llx %016llx %016llx %016llx\n",
 498                        be64_to_cpu(data->phbRxeMrgErrorStatus),
 499                        be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
 500                        be64_to_cpu(data->phbRxeMrgErrorLog0),
 501                        be64_to_cpu(data->phbRxeMrgErrorLog1));
 502        if (data->phbRxeTceErrorStatus)
 503                pr_info("RxeTceErr:  %016llx %016llx %016llx %016llx\n",
 504                        be64_to_cpu(data->phbRxeTceErrorStatus),
 505                        be64_to_cpu(data->phbRxeTceFirstErrorStatus),
 506                        be64_to_cpu(data->phbRxeTceErrorLog0),
 507                        be64_to_cpu(data->phbRxeTceErrorLog1));
 508
 509        if (data->phbPblErrorStatus)
 510                pr_info("PblErr:     %016llx %016llx %016llx %016llx\n",
 511                        be64_to_cpu(data->phbPblErrorStatus),
 512                        be64_to_cpu(data->phbPblFirstErrorStatus),
 513                        be64_to_cpu(data->phbPblErrorLog0),
 514                        be64_to_cpu(data->phbPblErrorLog1));
 515        if (data->phbPcieDlpErrorStatus)
 516                pr_info("PcieDlp:    %016llx %016llx %016llx\n",
 517                        be64_to_cpu(data->phbPcieDlpErrorLog1),
 518                        be64_to_cpu(data->phbPcieDlpErrorLog2),
 519                        be64_to_cpu(data->phbPcieDlpErrorStatus));
 520        if (data->phbRegbErrorStatus)
 521                pr_info("RegbErr:    %016llx %016llx %016llx %016llx\n",
 522                        be64_to_cpu(data->phbRegbErrorStatus),
 523                        be64_to_cpu(data->phbRegbFirstErrorStatus),
 524                        be64_to_cpu(data->phbRegbErrorLog0),
 525                        be64_to_cpu(data->phbRegbErrorLog1));
 526
 527
 528        pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
 529}
 530
 531void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
 532                                unsigned char *log_buff)
 533{
 534        struct OpalIoPhbErrorCommon *common;
 535
 536        if (!hose || !log_buff)
 537                return;
 538
 539        common = (struct OpalIoPhbErrorCommon *)log_buff;
 540        switch (be32_to_cpu(common->ioType)) {
 541        case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
 542                pnv_pci_dump_p7ioc_diag_data(hose, common);
 543                break;
 544        case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
 545                pnv_pci_dump_phb3_diag_data(hose, common);
 546                break;
 547        case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
 548                pnv_pci_dump_phb4_diag_data(hose, common);
 549                break;
 550        default:
 551                pr_warn("%s: Unrecognized ioType %d\n",
 552                        __func__, be32_to_cpu(common->ioType));
 553        }
 554}
 555
 556static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
 557{
 558        unsigned long flags, rc;
 559        int has_diag, ret = 0;
 560
 561        spin_lock_irqsave(&phb->lock, flags);
 562
 563        /* Fetch PHB diag-data */
 564        rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
 565                                         phb->diag_data_size);
 566        has_diag = (rc == OPAL_SUCCESS);
 567
 568        /* If PHB supports compound PE, to handle it */
 569        if (phb->unfreeze_pe) {
 570                ret = phb->unfreeze_pe(phb,
 571                                       pe_no,
 572                                       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
 573        } else {
 574                rc = opal_pci_eeh_freeze_clear(phb->opal_id,
 575                                             pe_no,
 576                                             OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
 577                if (rc) {
 578                        pr_warn("%s: Failure %ld clearing frozen "
 579                                "PHB#%x-PE#%x\n",
 580                                __func__, rc, phb->hose->global_number,
 581                                pe_no);
 582                        ret = -EIO;
 583                }
 584        }
 585
 586        /*
 587         * For now, let's only display the diag buffer when we fail to clear
 588         * the EEH status. We'll do more sensible things later when we have
 589         * proper EEH support. We need to make sure we don't pollute ourselves
 590         * with the normal errors generated when probing empty slots
 591         */
 592        if (has_diag && ret)
 593                pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
 594
 595        spin_unlock_irqrestore(&phb->lock, flags);
 596}
 597
 598static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
 599{
 600        struct pnv_phb *phb = pdn->phb->private_data;
 601        u8      fstate = 0;
 602        __be16  pcierr = 0;
 603        unsigned int pe_no;
 604        s64     rc;
 605
 606        /*
 607         * Get the PE#. During the PCI probe stage, we might not
 608         * setup that yet. So all ER errors should be mapped to
 609         * reserved PE.
 610         */
 611        pe_no = pdn->pe_number;
 612        if (pe_no == IODA_INVALID_PE) {
 613                pe_no = phb->ioda.reserved_pe_idx;
 614        }
 615
 616        /*
 617         * Fetch frozen state. If the PHB support compound PE,
 618         * we need handle that case.
 619         */
 620        if (phb->get_pe_state) {
 621                fstate = phb->get_pe_state(phb, pe_no);
 622        } else {
 623                rc = opal_pci_eeh_freeze_status(phb->opal_id,
 624                                                pe_no,
 625                                                &fstate,
 626                                                &pcierr,
 627                                                NULL);
 628                if (rc) {
 629                        pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
 630                                __func__, rc, phb->hose->global_number, pe_no);
 631                        return;
 632                }
 633        }
 634
 635        pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
 636                 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
 637
 638        /* Clear the frozen state if applicable */
 639        if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
 640            fstate == OPAL_EEH_STOPPED_DMA_FREEZE  ||
 641            fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
 642                /*
 643                 * If PHB supports compound PE, freeze it for
 644                 * consistency.
 645                 */
 646                if (phb->freeze_pe)
 647                        phb->freeze_pe(phb, pe_no);
 648
 649                pnv_pci_handle_eeh_config(phb, pe_no);
 650        }
 651}
 652
 653int pnv_pci_cfg_read(struct pci_dn *pdn,
 654                     int where, int size, u32 *val)
 655{
 656        struct pnv_phb *phb = pdn->phb->private_data;
 657        u32 bdfn = (pdn->busno << 8) | pdn->devfn;
 658        s64 rc;
 659
 660        switch (size) {
 661        case 1: {
 662                u8 v8;
 663                rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
 664                *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
 665                break;
 666        }
 667        case 2: {
 668                __be16 v16;
 669                rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
 670                                                   &v16);
 671                *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
 672                break;
 673        }
 674        case 4: {
 675                __be32 v32;
 676                rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
 677                *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
 678                break;
 679        }
 680        default:
 681                return PCIBIOS_FUNC_NOT_SUPPORTED;
 682        }
 683
 684        pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
 685                 __func__, pdn->busno, pdn->devfn, where, size, *val);
 686        return PCIBIOS_SUCCESSFUL;
 687}
 688
 689int pnv_pci_cfg_write(struct pci_dn *pdn,
 690                      int where, int size, u32 val)
 691{
 692        struct pnv_phb *phb = pdn->phb->private_data;
 693        u32 bdfn = (pdn->busno << 8) | pdn->devfn;
 694
 695        pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
 696                 __func__, pdn->busno, pdn->devfn, where, size, val);
 697        switch (size) {
 698        case 1:
 699                opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
 700                break;
 701        case 2:
 702                opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
 703                break;
 704        case 4:
 705                opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
 706                break;
 707        default:
 708                return PCIBIOS_FUNC_NOT_SUPPORTED;
 709        }
 710
 711        return PCIBIOS_SUCCESSFUL;
 712}
 713
 714#if CONFIG_EEH
 715static bool pnv_pci_cfg_check(struct pci_dn *pdn)
 716{
 717        struct eeh_dev *edev = NULL;
 718        struct pnv_phb *phb = pdn->phb->private_data;
 719
 720        /* EEH not enabled ? */
 721        if (!(phb->flags & PNV_PHB_FLAG_EEH))
 722                return true;
 723
 724        /* PE reset or device removed ? */
 725        edev = pdn->edev;
 726        if (edev) {
 727                if (edev->pe &&
 728                    (edev->pe->state & EEH_PE_CFG_BLOCKED))
 729                        return false;
 730
 731                if (edev->mode & EEH_DEV_REMOVED)
 732                        return false;
 733        }
 734
 735        return true;
 736}
 737#else
 738static inline pnv_pci_cfg_check(struct pci_dn *pdn)
 739{
 740        return true;
 741}
 742#endif /* CONFIG_EEH */
 743
 744static int pnv_pci_read_config(struct pci_bus *bus,
 745                               unsigned int devfn,
 746                               int where, int size, u32 *val)
 747{
 748        struct pci_dn *pdn;
 749        struct pnv_phb *phb;
 750        int ret;
 751
 752        *val = 0xFFFFFFFF;
 753        pdn = pci_get_pdn_by_devfn(bus, devfn);
 754        if (!pdn)
 755                return PCIBIOS_DEVICE_NOT_FOUND;
 756
 757        if (!pnv_pci_cfg_check(pdn))
 758                return PCIBIOS_DEVICE_NOT_FOUND;
 759
 760        ret = pnv_pci_cfg_read(pdn, where, size, val);
 761        phb = pdn->phb->private_data;
 762        if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
 763                if (*val == EEH_IO_ERROR_VALUE(size) &&
 764                    eeh_dev_check_failure(pdn->edev))
 765                        return PCIBIOS_DEVICE_NOT_FOUND;
 766        } else {
 767                pnv_pci_config_check_eeh(pdn);
 768        }
 769
 770        return ret;
 771}
 772
 773static int pnv_pci_write_config(struct pci_bus *bus,
 774                                unsigned int devfn,
 775                                int where, int size, u32 val)
 776{
 777        struct pci_dn *pdn;
 778        struct pnv_phb *phb;
 779        int ret;
 780
 781        pdn = pci_get_pdn_by_devfn(bus, devfn);
 782        if (!pdn)
 783                return PCIBIOS_DEVICE_NOT_FOUND;
 784
 785        if (!pnv_pci_cfg_check(pdn))
 786                return PCIBIOS_DEVICE_NOT_FOUND;
 787
 788        ret = pnv_pci_cfg_write(pdn, where, size, val);
 789        phb = pdn->phb->private_data;
 790        if (!(phb->flags & PNV_PHB_FLAG_EEH))
 791                pnv_pci_config_check_eeh(pdn);
 792
 793        return ret;
 794}
 795
 796struct pci_ops pnv_pci_ops = {
 797        .read  = pnv_pci_read_config,
 798        .write = pnv_pci_write_config,
 799};
 800
 801struct iommu_table *pnv_pci_table_alloc(int nid)
 802{
 803        struct iommu_table *tbl;
 804
 805        tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
 806        if (!tbl)
 807                return NULL;
 808
 809        INIT_LIST_HEAD_RCU(&tbl->it_group_list);
 810        kref_init(&tbl->it_kref);
 811
 812        return tbl;
 813}
 814
 815struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
 816{
 817        struct pci_controller *hose = pci_bus_to_host(dev->bus);
 818
 819        return of_node_get(hose->dn);
 820}
 821EXPORT_SYMBOL(pnv_pci_get_phb_node);
 822
 823int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
 824{
 825        struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
 826        u64 tunnel_bar;
 827        __be64 val;
 828        int rc;
 829
 830        if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
 831                return -ENXIO;
 832        if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
 833                return -ENXIO;
 834
 835        mutex_lock(&tunnel_mutex);
 836        rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
 837        if (rc != OPAL_SUCCESS) {
 838                rc = -EIO;
 839                goto out;
 840        }
 841        tunnel_bar = be64_to_cpu(val);
 842        if (enable) {
 843                /*
 844                * Only one device per PHB can use atomics.
 845                * Our policy is first-come, first-served.
 846                */
 847                if (tunnel_bar) {
 848                        if (tunnel_bar != addr)
 849                                rc = -EBUSY;
 850                        else
 851                                rc = 0; /* Setting same address twice is ok */
 852                        goto out;
 853                }
 854        } else {
 855                /*
 856                * The device that owns atomics and wants to release
 857                * them must pass the same address with enable == 0.
 858                */
 859                if (tunnel_bar != addr) {
 860                        rc = -EPERM;
 861                        goto out;
 862                }
 863                addr = 0x0ULL;
 864        }
 865        rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
 866        rc = opal_error_code(rc);
 867out:
 868        mutex_unlock(&tunnel_mutex);
 869        return rc;
 870}
 871EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
 872
 873void pnv_pci_shutdown(void)
 874{
 875        struct pci_controller *hose;
 876
 877        list_for_each_entry(hose, &hose_list, list_node)
 878                if (hose->controller_ops.shutdown)
 879                        hose->controller_ops.shutdown(hose);
 880}
 881
 882/* Fixup wrong class code in p7ioc and p8 root complex */
 883static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
 884{
 885        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 886}
 887DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
 888
 889void __init pnv_pci_init(void)
 890{
 891        struct device_node *np;
 892
 893        pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
 894
 895        /* If we don't have OPAL, eg. in sim, just skip PCI probe */
 896        if (!firmware_has_feature(FW_FEATURE_OPAL))
 897                return;
 898
 899#ifdef CONFIG_PCIEPORTBUS
 900        /*
 901         * On PowerNV PCIe devices are (currently) managed in cooperation
 902         * with firmware. This isn't *strictly* required, but there's enough
 903         * assumptions baked into both firmware and the platform code that
 904         * it's unwise to allow the portbus services to be used.
 905         *
 906         * We need to fix this eventually, but for now set this flag to disable
 907         * the portbus driver. The AER service isn't required since that AER
 908         * events are handled via EEH. The pciehp hotplug driver can't work
 909         * without kernel changes (and portbus binding breaks pnv_php). The
 910         * other services also require some thinking about how we're going
 911         * to integrate them.
 912         */
 913        pcie_ports_disabled = true;
 914#endif
 915
 916        /* Look for IODA IO-Hubs. */
 917        for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
 918                pnv_pci_init_ioda_hub(np);
 919        }
 920
 921        /* Look for ioda2 built-in PHB3's */
 922        for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
 923                pnv_pci_init_ioda2_phb(np);
 924
 925        /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
 926        for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
 927                pnv_pci_init_ioda2_phb(np);
 928
 929        /* Look for NPU PHBs */
 930        for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
 931                pnv_pci_init_npu_phb(np);
 932
 933        /*
 934         * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
 935         * the exception of TCE kill which requires an OPAL call.
 936         */
 937        for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
 938                pnv_pci_init_npu_phb(np);
 939
 940        /* Look for NPU2 OpenCAPI PHBs */
 941        for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
 942                pnv_pci_init_npu2_opencapi_phb(np);
 943
 944        /* Configure IOMMU DMA hooks */
 945        set_pci_dma_ops(&dma_iommu_ops);
 946}
 947
 948static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
 949                unsigned long action, void *data)
 950{
 951        struct device *dev = data;
 952
 953        switch (action) {
 954        case BUS_NOTIFY_DEL_DEVICE:
 955                iommu_del_device(dev);
 956                return 0;
 957        default:
 958                return 0;
 959        }
 960}
 961
 962static struct notifier_block pnv_tce_iommu_bus_nb = {
 963        .notifier_call = pnv_tce_iommu_bus_notifier,
 964};
 965
 966static int __init pnv_tce_iommu_bus_notifier_init(void)
 967{
 968        bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
 969        return 0;
 970}
 971machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
 972