linux/arch/powerpc/platforms/powernv/pci.c
<<
>>
Prefs
   1/*
   2 * Support PCI/PCIe on PowerNV platforms
   3 *
   4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/pci.h>
  14#include <linux/delay.h>
  15#include <linux/string.h>
  16#include <linux/init.h>
  17#include <linux/irq.h>
  18#include <linux/io.h>
  19#include <linux/msi.h>
  20#include <linux/iommu.h>
  21#include <linux/sched/mm.h>
  22
  23#include <asm/sections.h>
  24#include <asm/io.h>
  25#include <asm/prom.h>
  26#include <asm/pci-bridge.h>
  27#include <asm/machdep.h>
  28#include <asm/msi_bitmap.h>
  29#include <asm/ppc-pci.h>
  30#include <asm/pnv-pci.h>
  31#include <asm/opal.h>
  32#include <asm/iommu.h>
  33#include <asm/tce.h>
  34#include <asm/firmware.h>
  35#include <asm/eeh_event.h>
  36#include <asm/eeh.h>
  37
  38#include "powernv.h"
  39#include "pci.h"
  40
  41static DEFINE_MUTEX(p2p_mutex);
  42static DEFINE_MUTEX(tunnel_mutex);
  43
  44int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
  45{
  46        struct device_node *node = np;
  47        u32 bdfn;
  48        u64 phbid;
  49        int ret;
  50
  51        ret = of_property_read_u32(np, "reg", &bdfn);
  52        if (ret)
  53                return -ENXIO;
  54
  55        bdfn = ((bdfn & 0x00ffff00) >> 8);
  56        for (node = np; node; node = of_get_parent(node)) {
  57                if (!PCI_DN(node)) {
  58                        of_node_put(node);
  59                        break;
  60                }
  61
  62                if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
  63                    !of_device_is_compatible(node, "ibm,ioda3-phb") &&
  64                    !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
  65                        of_node_put(node);
  66                        continue;
  67                }
  68
  69                ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
  70                if (ret) {
  71                        of_node_put(node);
  72                        return -ENXIO;
  73                }
  74
  75                if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
  76                        *id = PCI_PHB_SLOT_ID(phbid);
  77                else
  78                        *id = PCI_SLOT_ID(phbid, bdfn);
  79                return 0;
  80        }
  81
  82        return -ENODEV;
  83}
  84EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
  85
  86int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
  87{
  88        int64_t rc;
  89
  90        if (!opal_check_token(OPAL_GET_DEVICE_TREE))
  91                return -ENXIO;
  92
  93        rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
  94        if (rc < OPAL_SUCCESS)
  95                return -EIO;
  96
  97        return rc;
  98}
  99EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
 100
 101int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
 102{
 103        int64_t rc;
 104
 105        if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
 106                return -ENXIO;
 107
 108        rc = opal_pci_get_presence_state(id, (uint64_t)state);
 109        if (rc != OPAL_SUCCESS)
 110                return -EIO;
 111
 112        return 0;
 113}
 114EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
 115
 116int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
 117{
 118        int64_t rc;
 119
 120        if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
 121                return -ENXIO;
 122
 123        rc = opal_pci_get_power_state(id, (uint64_t)state);
 124        if (rc != OPAL_SUCCESS)
 125                return -EIO;
 126
 127        return 0;
 128}
 129EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
 130
 131int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
 132{
 133        struct opal_msg m;
 134        int token, ret;
 135        int64_t rc;
 136
 137        if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
 138                return -ENXIO;
 139
 140        token = opal_async_get_token_interruptible();
 141        if (unlikely(token < 0))
 142                return token;
 143
 144        rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
 145        if (rc == OPAL_SUCCESS) {
 146                ret = 0;
 147                goto exit;
 148        } else if (rc != OPAL_ASYNC_COMPLETION) {
 149                ret = -EIO;
 150                goto exit;
 151        }
 152
 153        ret = opal_async_wait_response(token, &m);
 154        if (ret < 0)
 155                goto exit;
 156
 157        if (msg) {
 158                ret = 1;
 159                memcpy(msg, &m, sizeof(m));
 160        }
 161
 162exit:
 163        opal_async_release_token(token);
 164        return ret;
 165}
 166EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
 167
 168int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 169{
 170        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 171        struct pnv_phb *phb = hose->private_data;
 172        struct msi_desc *entry;
 173        struct msi_msg msg;
 174        int hwirq;
 175        unsigned int virq;
 176        int rc;
 177
 178        if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
 179                return -ENODEV;
 180
 181        if (pdev->no_64bit_msi && !phb->msi32_support)
 182                return -ENODEV;
 183
 184        for_each_pci_msi_entry(entry, pdev) {
 185                if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
 186                        pr_warn("%s: Supports only 64-bit MSIs\n",
 187                                pci_name(pdev));
 188                        return -ENXIO;
 189                }
 190                hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
 191                if (hwirq < 0) {
 192                        pr_warn("%s: Failed to find a free MSI\n",
 193                                pci_name(pdev));
 194                        return -ENOSPC;
 195                }
 196                virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
 197                if (!virq) {
 198                        pr_warn("%s: Failed to map MSI to linux irq\n",
 199                                pci_name(pdev));
 200                        msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
 201                        return -ENOMEM;
 202                }
 203                rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
 204                                    virq, entry->msi_attrib.is_64, &msg);
 205                if (rc) {
 206                        pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
 207                        irq_dispose_mapping(virq);
 208                        msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
 209                        return rc;
 210                }
 211                irq_set_msi_desc(virq, entry);
 212                pci_write_msi_msg(virq, &msg);
 213        }
 214        return 0;
 215}
 216
 217void pnv_teardown_msi_irqs(struct pci_dev *pdev)
 218{
 219        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 220        struct pnv_phb *phb = hose->private_data;
 221        struct msi_desc *entry;
 222        irq_hw_number_t hwirq;
 223
 224        if (WARN_ON(!phb))
 225                return;
 226
 227        for_each_pci_msi_entry(entry, pdev) {
 228                if (!entry->irq)
 229                        continue;
 230                hwirq = virq_to_hw(entry->irq);
 231                irq_set_msi_desc(entry->irq, NULL);
 232                irq_dispose_mapping(entry->irq);
 233                msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
 234        }
 235}
 236
 237/* Nicely print the contents of the PE State Tables (PEST). */
 238static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
 239{
 240        __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
 241        bool dup = false;
 242        int i;
 243
 244        for (i = 0; i < pest_size; i++) {
 245                __be64 peA = be64_to_cpu(pestA[i]);
 246                __be64 peB = be64_to_cpu(pestB[i]);
 247
 248                if (peA != prevA || peB != prevB) {
 249                        if (dup) {
 250                                pr_info("PE[..%03x] A/B: as above\n", i-1);
 251                                dup = false;
 252                        }
 253                        prevA = peA;
 254                        prevB = peB;
 255                        if (peA & PNV_IODA_STOPPED_STATE ||
 256                            peB & PNV_IODA_STOPPED_STATE)
 257                                pr_info("PE[%03x] A/B: %016llx %016llx\n",
 258                                        i, peA, peB);
 259                } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
 260                                    peB & PNV_IODA_STOPPED_STATE)) {
 261                        dup = true;
 262                }
 263        }
 264}
 265
 266static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
 267                                         struct OpalIoPhbErrorCommon *common)
 268{
 269        struct OpalIoP7IOCPhbErrorData *data;
 270
 271        data = (struct OpalIoP7IOCPhbErrorData *)common;
 272        pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
 273                hose->global_number, be32_to_cpu(common->version));
 274
 275        if (data->brdgCtl)
 276                pr_info("brdgCtl:     %08x\n",
 277                        be32_to_cpu(data->brdgCtl));
 278        if (data->portStatusReg || data->rootCmplxStatus ||
 279            data->busAgentStatus)
 280                pr_info("UtlSts:      %08x %08x %08x\n",
 281                        be32_to_cpu(data->portStatusReg),
 282                        be32_to_cpu(data->rootCmplxStatus),
 283                        be32_to_cpu(data->busAgentStatus));
 284        if (data->deviceStatus || data->slotStatus   ||
 285            data->linkStatus   || data->devCmdStatus ||
 286            data->devSecStatus)
 287                pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
 288                        be32_to_cpu(data->deviceStatus),
 289                        be32_to_cpu(data->slotStatus),
 290                        be32_to_cpu(data->linkStatus),
 291                        be32_to_cpu(data->devCmdStatus),
 292                        be32_to_cpu(data->devSecStatus));
 293        if (data->rootErrorStatus   || data->uncorrErrorStatus ||
 294            data->corrErrorStatus)
 295                pr_info("RootErrSts:  %08x %08x %08x\n",
 296                        be32_to_cpu(data->rootErrorStatus),
 297                        be32_to_cpu(data->uncorrErrorStatus),
 298                        be32_to_cpu(data->corrErrorStatus));
 299        if (data->tlpHdr1 || data->tlpHdr2 ||
 300            data->tlpHdr3 || data->tlpHdr4)
 301                pr_info("RootErrLog:  %08x %08x %08x %08x\n",
 302                        be32_to_cpu(data->tlpHdr1),
 303                        be32_to_cpu(data->tlpHdr2),
 304                        be32_to_cpu(data->tlpHdr3),
 305                        be32_to_cpu(data->tlpHdr4));
 306        if (data->sourceId || data->errorClass ||
 307            data->correlator)
 308                pr_info("RootErrLog1: %08x %016llx %016llx\n",
 309                        be32_to_cpu(data->sourceId),
 310                        be64_to_cpu(data->errorClass),
 311                        be64_to_cpu(data->correlator));
 312        if (data->p7iocPlssr || data->p7iocCsr)
 313                pr_info("PhbSts:      %016llx %016llx\n",
 314                        be64_to_cpu(data->p7iocPlssr),
 315                        be64_to_cpu(data->p7iocCsr));
 316        if (data->lemFir)
 317                pr_info("Lem:         %016llx %016llx %016llx\n",
 318                        be64_to_cpu(data->lemFir),
 319                        be64_to_cpu(data->lemErrorMask),
 320                        be64_to_cpu(data->lemWOF));
 321        if (data->phbErrorStatus)
 322                pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
 323                        be64_to_cpu(data->phbErrorStatus),
 324                        be64_to_cpu(data->phbFirstErrorStatus),
 325                        be64_to_cpu(data->phbErrorLog0),
 326                        be64_to_cpu(data->phbErrorLog1));
 327        if (data->mmioErrorStatus)
 328                pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
 329                        be64_to_cpu(data->mmioErrorStatus),
 330                        be64_to_cpu(data->mmioFirstErrorStatus),
 331                        be64_to_cpu(data->mmioErrorLog0),
 332                        be64_to_cpu(data->mmioErrorLog1));
 333        if (data->dma0ErrorStatus)
 334                pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
 335                        be64_to_cpu(data->dma0ErrorStatus),
 336                        be64_to_cpu(data->dma0FirstErrorStatus),
 337                        be64_to_cpu(data->dma0ErrorLog0),
 338                        be64_to_cpu(data->dma0ErrorLog1));
 339        if (data->dma1ErrorStatus)
 340                pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
 341                        be64_to_cpu(data->dma1ErrorStatus),
 342                        be64_to_cpu(data->dma1FirstErrorStatus),
 343                        be64_to_cpu(data->dma1ErrorLog0),
 344                        be64_to_cpu(data->dma1ErrorLog1));
 345
 346        pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
 347}
 348
 349static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
 350                                        struct OpalIoPhbErrorCommon *common)
 351{
 352        struct OpalIoPhb3ErrorData *data;
 353
 354        data = (struct OpalIoPhb3ErrorData*)common;
 355        pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
 356                hose->global_number, be32_to_cpu(common->version));
 357        if (data->brdgCtl)
 358                pr_info("brdgCtl:     %08x\n",
 359                        be32_to_cpu(data->brdgCtl));
 360        if (data->portStatusReg || data->rootCmplxStatus ||
 361            data->busAgentStatus)
 362                pr_info("UtlSts:      %08x %08x %08x\n",
 363                        be32_to_cpu(data->portStatusReg),
 364                        be32_to_cpu(data->rootCmplxStatus),
 365                        be32_to_cpu(data->busAgentStatus));
 366        if (data->deviceStatus || data->slotStatus   ||
 367            data->linkStatus   || data->devCmdStatus ||
 368            data->devSecStatus)
 369                pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
 370                        be32_to_cpu(data->deviceStatus),
 371                        be32_to_cpu(data->slotStatus),
 372                        be32_to_cpu(data->linkStatus),
 373                        be32_to_cpu(data->devCmdStatus),
 374                        be32_to_cpu(data->devSecStatus));
 375        if (data->rootErrorStatus || data->uncorrErrorStatus ||
 376            data->corrErrorStatus)
 377                pr_info("RootErrSts:  %08x %08x %08x\n",
 378                        be32_to_cpu(data->rootErrorStatus),
 379                        be32_to_cpu(data->uncorrErrorStatus),
 380                        be32_to_cpu(data->corrErrorStatus));
 381        if (data->tlpHdr1 || data->tlpHdr2 ||
 382            data->tlpHdr3 || data->tlpHdr4)
 383                pr_info("RootErrLog:  %08x %08x %08x %08x\n",
 384                        be32_to_cpu(data->tlpHdr1),
 385                        be32_to_cpu(data->tlpHdr2),
 386                        be32_to_cpu(data->tlpHdr3),
 387                        be32_to_cpu(data->tlpHdr4));
 388        if (data->sourceId || data->errorClass ||
 389            data->correlator)
 390                pr_info("RootErrLog1: %08x %016llx %016llx\n",
 391                        be32_to_cpu(data->sourceId),
 392                        be64_to_cpu(data->errorClass),
 393                        be64_to_cpu(data->correlator));
 394        if (data->nFir)
 395                pr_info("nFir:        %016llx %016llx %016llx\n",
 396                        be64_to_cpu(data->nFir),
 397                        be64_to_cpu(data->nFirMask),
 398                        be64_to_cpu(data->nFirWOF));
 399        if (data->phbPlssr || data->phbCsr)
 400                pr_info("PhbSts:      %016llx %016llx\n",
 401                        be64_to_cpu(data->phbPlssr),
 402                        be64_to_cpu(data->phbCsr));
 403        if (data->lemFir)
 404                pr_info("Lem:         %016llx %016llx %016llx\n",
 405                        be64_to_cpu(data->lemFir),
 406                        be64_to_cpu(data->lemErrorMask),
 407                        be64_to_cpu(data->lemWOF));
 408        if (data->phbErrorStatus)
 409                pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
 410                        be64_to_cpu(data->phbErrorStatus),
 411                        be64_to_cpu(data->phbFirstErrorStatus),
 412                        be64_to_cpu(data->phbErrorLog0),
 413                        be64_to_cpu(data->phbErrorLog1));
 414        if (data->mmioErrorStatus)
 415                pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
 416                        be64_to_cpu(data->mmioErrorStatus),
 417                        be64_to_cpu(data->mmioFirstErrorStatus),
 418                        be64_to_cpu(data->mmioErrorLog0),
 419                        be64_to_cpu(data->mmioErrorLog1));
 420        if (data->dma0ErrorStatus)
 421                pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
 422                        be64_to_cpu(data->dma0ErrorStatus),
 423                        be64_to_cpu(data->dma0FirstErrorStatus),
 424                        be64_to_cpu(data->dma0ErrorLog0),
 425                        be64_to_cpu(data->dma0ErrorLog1));
 426        if (data->dma1ErrorStatus)
 427                pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
 428                        be64_to_cpu(data->dma1ErrorStatus),
 429                        be64_to_cpu(data->dma1FirstErrorStatus),
 430                        be64_to_cpu(data->dma1ErrorLog0),
 431                        be64_to_cpu(data->dma1ErrorLog1));
 432
 433        pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
 434}
 435
 436static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
 437                                        struct OpalIoPhbErrorCommon *common)
 438{
 439        struct OpalIoPhb4ErrorData *data;
 440
 441        data = (struct OpalIoPhb4ErrorData*)common;
 442        pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
 443                hose->global_number, be32_to_cpu(common->version));
 444        if (data->brdgCtl)
 445                pr_info("brdgCtl:    %08x\n",
 446                        be32_to_cpu(data->brdgCtl));
 447        if (data->deviceStatus || data->slotStatus   ||
 448            data->linkStatus   || data->devCmdStatus ||
 449            data->devSecStatus)
 450                pr_info("RootSts:    %08x %08x %08x %08x %08x\n",
 451                        be32_to_cpu(data->deviceStatus),
 452                        be32_to_cpu(data->slotStatus),
 453                        be32_to_cpu(data->linkStatus),
 454                        be32_to_cpu(data->devCmdStatus),
 455                        be32_to_cpu(data->devSecStatus));
 456        if (data->rootErrorStatus || data->uncorrErrorStatus ||
 457            data->corrErrorStatus)
 458                pr_info("RootErrSts: %08x %08x %08x\n",
 459                        be32_to_cpu(data->rootErrorStatus),
 460                        be32_to_cpu(data->uncorrErrorStatus),
 461                        be32_to_cpu(data->corrErrorStatus));
 462        if (data->tlpHdr1 || data->tlpHdr2 ||
 463            data->tlpHdr3 || data->tlpHdr4)
 464                pr_info("RootErrLog: %08x %08x %08x %08x\n",
 465                        be32_to_cpu(data->tlpHdr1),
 466                        be32_to_cpu(data->tlpHdr2),
 467                        be32_to_cpu(data->tlpHdr3),
 468                        be32_to_cpu(data->tlpHdr4));
 469        if (data->sourceId)
 470                pr_info("sourceId:   %08x\n", be32_to_cpu(data->sourceId));
 471        if (data->nFir)
 472                pr_info("nFir:       %016llx %016llx %016llx\n",
 473                        be64_to_cpu(data->nFir),
 474                        be64_to_cpu(data->nFirMask),
 475                        be64_to_cpu(data->nFirWOF));
 476        if (data->phbPlssr || data->phbCsr)
 477                pr_info("PhbSts:     %016llx %016llx\n",
 478                        be64_to_cpu(data->phbPlssr),
 479                        be64_to_cpu(data->phbCsr));
 480        if (data->lemFir)
 481                pr_info("Lem:        %016llx %016llx %016llx\n",
 482                        be64_to_cpu(data->lemFir),
 483                        be64_to_cpu(data->lemErrorMask),
 484                        be64_to_cpu(data->lemWOF));
 485        if (data->phbErrorStatus)
 486                pr_info("PhbErr:     %016llx %016llx %016llx %016llx\n",
 487                        be64_to_cpu(data->phbErrorStatus),
 488                        be64_to_cpu(data->phbFirstErrorStatus),
 489                        be64_to_cpu(data->phbErrorLog0),
 490                        be64_to_cpu(data->phbErrorLog1));
 491        if (data->phbTxeErrorStatus)
 492                pr_info("PhbTxeErr:  %016llx %016llx %016llx %016llx\n",
 493                        be64_to_cpu(data->phbTxeErrorStatus),
 494                        be64_to_cpu(data->phbTxeFirstErrorStatus),
 495                        be64_to_cpu(data->phbTxeErrorLog0),
 496                        be64_to_cpu(data->phbTxeErrorLog1));
 497        if (data->phbRxeArbErrorStatus)
 498                pr_info("RxeArbErr:  %016llx %016llx %016llx %016llx\n",
 499                        be64_to_cpu(data->phbRxeArbErrorStatus),
 500                        be64_to_cpu(data->phbRxeArbFirstErrorStatus),
 501                        be64_to_cpu(data->phbRxeArbErrorLog0),
 502                        be64_to_cpu(data->phbRxeArbErrorLog1));
 503        if (data->phbRxeMrgErrorStatus)
 504                pr_info("RxeMrgErr:  %016llx %016llx %016llx %016llx\n",
 505                        be64_to_cpu(data->phbRxeMrgErrorStatus),
 506                        be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
 507                        be64_to_cpu(data->phbRxeMrgErrorLog0),
 508                        be64_to_cpu(data->phbRxeMrgErrorLog1));
 509        if (data->phbRxeTceErrorStatus)
 510                pr_info("RxeTceErr:  %016llx %016llx %016llx %016llx\n",
 511                        be64_to_cpu(data->phbRxeTceErrorStatus),
 512                        be64_to_cpu(data->phbRxeTceFirstErrorStatus),
 513                        be64_to_cpu(data->phbRxeTceErrorLog0),
 514                        be64_to_cpu(data->phbRxeTceErrorLog1));
 515
 516        if (data->phbPblErrorStatus)
 517                pr_info("PblErr:     %016llx %016llx %016llx %016llx\n",
 518                        be64_to_cpu(data->phbPblErrorStatus),
 519                        be64_to_cpu(data->phbPblFirstErrorStatus),
 520                        be64_to_cpu(data->phbPblErrorLog0),
 521                        be64_to_cpu(data->phbPblErrorLog1));
 522        if (data->phbPcieDlpErrorStatus)
 523                pr_info("PcieDlp:    %016llx %016llx %016llx\n",
 524                        be64_to_cpu(data->phbPcieDlpErrorLog1),
 525                        be64_to_cpu(data->phbPcieDlpErrorLog2),
 526                        be64_to_cpu(data->phbPcieDlpErrorStatus));
 527        if (data->phbRegbErrorStatus)
 528                pr_info("RegbErr:    %016llx %016llx %016llx %016llx\n",
 529                        be64_to_cpu(data->phbRegbErrorStatus),
 530                        be64_to_cpu(data->phbRegbFirstErrorStatus),
 531                        be64_to_cpu(data->phbRegbErrorLog0),
 532                        be64_to_cpu(data->phbRegbErrorLog1));
 533
 534
 535        pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
 536}
 537
 538void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
 539                                unsigned char *log_buff)
 540{
 541        struct OpalIoPhbErrorCommon *common;
 542
 543        if (!hose || !log_buff)
 544                return;
 545
 546        common = (struct OpalIoPhbErrorCommon *)log_buff;
 547        switch (be32_to_cpu(common->ioType)) {
 548        case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
 549                pnv_pci_dump_p7ioc_diag_data(hose, common);
 550                break;
 551        case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
 552                pnv_pci_dump_phb3_diag_data(hose, common);
 553                break;
 554        case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
 555                pnv_pci_dump_phb4_diag_data(hose, common);
 556                break;
 557        default:
 558                pr_warn("%s: Unrecognized ioType %d\n",
 559                        __func__, be32_to_cpu(common->ioType));
 560        }
 561}
 562
 563static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
 564{
 565        unsigned long flags, rc;
 566        int has_diag, ret = 0;
 567
 568        spin_lock_irqsave(&phb->lock, flags);
 569
 570        /* Fetch PHB diag-data */
 571        rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
 572                                         phb->diag_data_size);
 573        has_diag = (rc == OPAL_SUCCESS);
 574
 575        /* If PHB supports compound PE, to handle it */
 576        if (phb->unfreeze_pe) {
 577                ret = phb->unfreeze_pe(phb,
 578                                       pe_no,
 579                                       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
 580        } else {
 581                rc = opal_pci_eeh_freeze_clear(phb->opal_id,
 582                                             pe_no,
 583                                             OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
 584                if (rc) {
 585                        pr_warn("%s: Failure %ld clearing frozen "
 586                                "PHB#%x-PE#%x\n",
 587                                __func__, rc, phb->hose->global_number,
 588                                pe_no);
 589                        ret = -EIO;
 590                }
 591        }
 592
 593        /*
 594         * For now, let's only display the diag buffer when we fail to clear
 595         * the EEH status. We'll do more sensible things later when we have
 596         * proper EEH support. We need to make sure we don't pollute ourselves
 597         * with the normal errors generated when probing empty slots
 598         */
 599        if (has_diag && ret)
 600                pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
 601
 602        spin_unlock_irqrestore(&phb->lock, flags);
 603}
 604
 605static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
 606{
 607        struct pnv_phb *phb = pdn->phb->private_data;
 608        u8      fstate = 0;
 609        __be16  pcierr = 0;
 610        unsigned int pe_no;
 611        s64     rc;
 612
 613        /*
 614         * Get the PE#. During the PCI probe stage, we might not
 615         * setup that yet. So all ER errors should be mapped to
 616         * reserved PE.
 617         */
 618        pe_no = pdn->pe_number;
 619        if (pe_no == IODA_INVALID_PE) {
 620                pe_no = phb->ioda.reserved_pe_idx;
 621        }
 622
 623        /*
 624         * Fetch frozen state. If the PHB support compound PE,
 625         * we need handle that case.
 626         */
 627        if (phb->get_pe_state) {
 628                fstate = phb->get_pe_state(phb, pe_no);
 629        } else {
 630                rc = opal_pci_eeh_freeze_status(phb->opal_id,
 631                                                pe_no,
 632                                                &fstate,
 633                                                &pcierr,
 634                                                NULL);
 635                if (rc) {
 636                        pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
 637                                __func__, rc, phb->hose->global_number, pe_no);
 638                        return;
 639                }
 640        }
 641
 642        pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
 643                 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
 644
 645        /* Clear the frozen state if applicable */
 646        if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
 647            fstate == OPAL_EEH_STOPPED_DMA_FREEZE  ||
 648            fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
 649                /*
 650                 * If PHB supports compound PE, freeze it for
 651                 * consistency.
 652                 */
 653                if (phb->freeze_pe)
 654                        phb->freeze_pe(phb, pe_no);
 655
 656                pnv_pci_handle_eeh_config(phb, pe_no);
 657        }
 658}
 659
 660int pnv_pci_cfg_read(struct pci_dn *pdn,
 661                     int where, int size, u32 *val)
 662{
 663        struct pnv_phb *phb = pdn->phb->private_data;
 664        u32 bdfn = (pdn->busno << 8) | pdn->devfn;
 665        s64 rc;
 666
 667        switch (size) {
 668        case 1: {
 669                u8 v8;
 670                rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
 671                *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
 672                break;
 673        }
 674        case 2: {
 675                __be16 v16;
 676                rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
 677                                                   &v16);
 678                *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
 679                break;
 680        }
 681        case 4: {
 682                __be32 v32;
 683                rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
 684                *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
 685                break;
 686        }
 687        default:
 688                return PCIBIOS_FUNC_NOT_SUPPORTED;
 689        }
 690
 691        pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
 692                 __func__, pdn->busno, pdn->devfn, where, size, *val);
 693        return PCIBIOS_SUCCESSFUL;
 694}
 695
 696int pnv_pci_cfg_write(struct pci_dn *pdn,
 697                      int where, int size, u32 val)
 698{
 699        struct pnv_phb *phb = pdn->phb->private_data;
 700        u32 bdfn = (pdn->busno << 8) | pdn->devfn;
 701
 702        pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
 703                 __func__, pdn->busno, pdn->devfn, where, size, val);
 704        switch (size) {
 705        case 1:
 706                opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
 707                break;
 708        case 2:
 709                opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
 710                break;
 711        case 4:
 712                opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
 713                break;
 714        default:
 715                return PCIBIOS_FUNC_NOT_SUPPORTED;
 716        }
 717
 718        return PCIBIOS_SUCCESSFUL;
 719}
 720
 721#if CONFIG_EEH
 722static bool pnv_pci_cfg_check(struct pci_dn *pdn)
 723{
 724        struct eeh_dev *edev = NULL;
 725        struct pnv_phb *phb = pdn->phb->private_data;
 726
 727        /* EEH not enabled ? */
 728        if (!(phb->flags & PNV_PHB_FLAG_EEH))
 729                return true;
 730
 731        /* PE reset or device removed ? */
 732        edev = pdn->edev;
 733        if (edev) {
 734                if (edev->pe &&
 735                    (edev->pe->state & EEH_PE_CFG_BLOCKED))
 736                        return false;
 737
 738                if (edev->mode & EEH_DEV_REMOVED)
 739                        return false;
 740        }
 741
 742        return true;
 743}
 744#else
 745static inline pnv_pci_cfg_check(struct pci_dn *pdn)
 746{
 747        return true;
 748}
 749#endif /* CONFIG_EEH */
 750
 751static int pnv_pci_read_config(struct pci_bus *bus,
 752                               unsigned int devfn,
 753                               int where, int size, u32 *val)
 754{
 755        struct pci_dn *pdn;
 756        struct pnv_phb *phb;
 757        int ret;
 758
 759        *val = 0xFFFFFFFF;
 760        pdn = pci_get_pdn_by_devfn(bus, devfn);
 761        if (!pdn)
 762                return PCIBIOS_DEVICE_NOT_FOUND;
 763
 764        if (!pnv_pci_cfg_check(pdn))
 765                return PCIBIOS_DEVICE_NOT_FOUND;
 766
 767        ret = pnv_pci_cfg_read(pdn, where, size, val);
 768        phb = pdn->phb->private_data;
 769        if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
 770                if (*val == EEH_IO_ERROR_VALUE(size) &&
 771                    eeh_dev_check_failure(pdn->edev))
 772                        return PCIBIOS_DEVICE_NOT_FOUND;
 773        } else {
 774                pnv_pci_config_check_eeh(pdn);
 775        }
 776
 777        return ret;
 778}
 779
 780static int pnv_pci_write_config(struct pci_bus *bus,
 781                                unsigned int devfn,
 782                                int where, int size, u32 val)
 783{
 784        struct pci_dn *pdn;
 785        struct pnv_phb *phb;
 786        int ret;
 787
 788        pdn = pci_get_pdn_by_devfn(bus, devfn);
 789        if (!pdn)
 790                return PCIBIOS_DEVICE_NOT_FOUND;
 791
 792        if (!pnv_pci_cfg_check(pdn))
 793                return PCIBIOS_DEVICE_NOT_FOUND;
 794
 795        ret = pnv_pci_cfg_write(pdn, where, size, val);
 796        phb = pdn->phb->private_data;
 797        if (!(phb->flags & PNV_PHB_FLAG_EEH))
 798                pnv_pci_config_check_eeh(pdn);
 799
 800        return ret;
 801}
 802
 803struct pci_ops pnv_pci_ops = {
 804        .read  = pnv_pci_read_config,
 805        .write = pnv_pci_write_config,
 806};
 807
 808struct iommu_table *pnv_pci_table_alloc(int nid)
 809{
 810        struct iommu_table *tbl;
 811
 812        tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
 813        if (!tbl)
 814                return NULL;
 815
 816        INIT_LIST_HEAD_RCU(&tbl->it_group_list);
 817        kref_init(&tbl->it_kref);
 818
 819        return tbl;
 820}
 821
 822void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
 823{
 824        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 825        struct pnv_phb *phb = hose->private_data;
 826#ifdef CONFIG_PCI_IOV
 827        struct pnv_ioda_pe *pe;
 828
 829        /* Fix the VF pdn PE number */
 830        if (pdev->is_virtfn) {
 831                list_for_each_entry(pe, &phb->ioda.pe_list, list) {
 832                        if (pe->rid == ((pdev->bus->number << 8) |
 833                            (pdev->devfn & 0xff))) {
 834                                pe->pdev = pdev;
 835                                break;
 836                        }
 837                }
 838        }
 839#endif /* CONFIG_PCI_IOV */
 840
 841        if (phb && phb->dma_dev_setup)
 842                phb->dma_dev_setup(phb, pdev);
 843}
 844
 845void pnv_pci_dma_bus_setup(struct pci_bus *bus)
 846{
 847        struct pci_controller *hose = bus->sysdata;
 848        struct pnv_phb *phb = hose->private_data;
 849        struct pnv_ioda_pe *pe;
 850
 851        list_for_each_entry(pe, &phb->ioda.pe_list, list) {
 852                if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
 853                        continue;
 854
 855                if (!pe->pbus)
 856                        continue;
 857
 858                if (bus->number == ((pe->rid >> 8) & 0xFF)) {
 859                        pe->pbus = bus;
 860                        break;
 861                }
 862        }
 863}
 864
 865int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc)
 866{
 867        struct pci_controller *hose;
 868        struct pnv_phb *phb_init, *phb_target;
 869        struct pnv_ioda_pe *pe_init;
 870        int rc;
 871
 872        if (!opal_check_token(OPAL_PCI_SET_P2P))
 873                return -ENXIO;
 874
 875        hose = pci_bus_to_host(initiator->bus);
 876        phb_init = hose->private_data;
 877
 878        hose = pci_bus_to_host(target->bus);
 879        phb_target = hose->private_data;
 880
 881        pe_init = pnv_ioda_get_pe(initiator);
 882        if (!pe_init)
 883                return -ENODEV;
 884
 885        /*
 886         * Configuring the initiator's PHB requires to adjust its
 887         * TVE#1 setting. Since the same device can be an initiator
 888         * several times for different target devices, we need to keep
 889         * a reference count to know when we can restore the default
 890         * bypass setting on its TVE#1 when disabling. Opal is not
 891         * tracking PE states, so we add a reference count on the PE
 892         * in linux.
 893         *
 894         * For the target, the configuration is per PHB, so we keep a
 895         * target reference count on the PHB.
 896         */
 897        mutex_lock(&p2p_mutex);
 898
 899        if (desc & OPAL_PCI_P2P_ENABLE) {
 900                /* always go to opal to validate the configuration */
 901                rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id,
 902                                      desc, pe_init->pe_number);
 903
 904                if (rc != OPAL_SUCCESS) {
 905                        rc = -EIO;
 906                        goto out;
 907                }
 908
 909                pe_init->p2p_initiator_count++;
 910                phb_target->p2p_target_count++;
 911        } else {
 912                if (!pe_init->p2p_initiator_count ||
 913                        !phb_target->p2p_target_count) {
 914                        rc = -EINVAL;
 915                        goto out;
 916                }
 917
 918                if (--pe_init->p2p_initiator_count == 0)
 919                        pnv_pci_ioda2_set_bypass(pe_init, true);
 920
 921                if (--phb_target->p2p_target_count == 0) {
 922                        rc = opal_pci_set_p2p(phb_init->opal_id,
 923                                              phb_target->opal_id, desc,
 924                                              pe_init->pe_number);
 925                        if (rc != OPAL_SUCCESS) {
 926                                rc = -EIO;
 927                                goto out;
 928                        }
 929                }
 930        }
 931        rc = 0;
 932out:
 933        mutex_unlock(&p2p_mutex);
 934        return rc;
 935}
 936EXPORT_SYMBOL_GPL(pnv_pci_set_p2p);
 937
 938struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
 939{
 940        struct pci_controller *hose = pci_bus_to_host(dev->bus);
 941
 942        return of_node_get(hose->dn);
 943}
 944EXPORT_SYMBOL(pnv_pci_get_phb_node);
 945
 946int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind)
 947{
 948        struct device_node *np;
 949        const __be32 *prop;
 950        struct pnv_ioda_pe *pe;
 951        uint16_t window_id;
 952        int rc;
 953
 954        if (!radix_enabled())
 955                return -ENXIO;
 956
 957        if (!(np = pnv_pci_get_phb_node(dev)))
 958                return -ENXIO;
 959
 960        prop = of_get_property(np, "ibm,phb-indications", NULL);
 961        of_node_put(np);
 962
 963        if (!prop || !prop[1])
 964                return -ENXIO;
 965
 966        *asnind = (u64)be32_to_cpu(prop[1]);
 967        pe = pnv_ioda_get_pe(dev);
 968        if (!pe)
 969                return -ENODEV;
 970
 971        /* Increase real window size to accept as_notify messages. */
 972        window_id = (pe->pe_number << 1 ) + 1;
 973        rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number,
 974                                             window_id, pe->tce_bypass_base,
 975                                             (uint64_t)1 << 48);
 976        return opal_error_code(rc);
 977}
 978EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel);
 979
 980int pnv_pci_disable_tunnel(struct pci_dev *dev)
 981{
 982        struct pnv_ioda_pe *pe;
 983
 984        pe = pnv_ioda_get_pe(dev);
 985        if (!pe)
 986                return -ENODEV;
 987
 988        /* Restore default real window size. */
 989        pnv_pci_ioda2_set_bypass(pe, true);
 990        return 0;
 991}
 992EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel);
 993
 994int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
 995{
 996        __be64 val;
 997        struct pci_controller *hose;
 998        struct pnv_phb *phb;
 999        u64 tunnel_bar;
1000        int rc;
1001
1002        if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
1003                return -ENXIO;
1004        if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
1005                return -ENXIO;
1006
1007        hose = pci_bus_to_host(dev->bus);
1008        phb = hose->private_data;
1009
1010        mutex_lock(&tunnel_mutex);
1011        rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
1012        if (rc != OPAL_SUCCESS) {
1013                rc = -EIO;
1014                goto out;
1015        }
1016        tunnel_bar = be64_to_cpu(val);
1017        if (enable) {
1018                /*
1019                * Only one device per PHB can use atomics.
1020                * Our policy is first-come, first-served.
1021                */
1022                if (tunnel_bar) {
1023                        if (tunnel_bar != addr)
1024                                rc = -EBUSY;
1025                        else
1026                                rc = 0; /* Setting same address twice is ok */
1027                        goto out;
1028                }
1029        } else {
1030                /*
1031                * The device that owns atomics and wants to release
1032                * them must pass the same address with enable == 0.
1033                */
1034                if (tunnel_bar != addr) {
1035                        rc = -EPERM;
1036                        goto out;
1037                }
1038                addr = 0x0ULL;
1039        }
1040        rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
1041        rc = opal_error_code(rc);
1042out:
1043        mutex_unlock(&tunnel_mutex);
1044        return rc;
1045}
1046EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
1047
1048#ifdef CONFIG_PPC64     /* for thread.tidr */
1049int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid,
1050                               u32 *tid)
1051{
1052        struct mm_struct *mm = NULL;
1053
1054        if (task == NULL)
1055                return -EINVAL;
1056
1057        mm = get_task_mm(task);
1058        if (mm == NULL)
1059                return -EINVAL;
1060
1061        *pid = mm->context.id;
1062        mmput(mm);
1063
1064        *tid = task->thread.tidr;
1065        *lpid = mfspr(SPRN_LPID);
1066        return 0;
1067}
1068EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info);
1069#endif
1070
1071void pnv_pci_shutdown(void)
1072{
1073        struct pci_controller *hose;
1074
1075        list_for_each_entry(hose, &hose_list, list_node)
1076                if (hose->controller_ops.shutdown)
1077                        hose->controller_ops.shutdown(hose);
1078}
1079
1080/* Fixup wrong class code in p7ioc and p8 root complex */
1081static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
1082{
1083        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1084}
1085DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
1086
1087void __init pnv_pci_init(void)
1088{
1089        struct device_node *np;
1090
1091        pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
1092
1093        /* If we don't have OPAL, eg. in sim, just skip PCI probe */
1094        if (!firmware_has_feature(FW_FEATURE_OPAL))
1095                return;
1096
1097#ifdef CONFIG_PCIEPORTBUS
1098        /*
1099         * On PowerNV PCIe devices are (currently) managed in cooperation
1100         * with firmware. This isn't *strictly* required, but there's enough
1101         * assumptions baked into both firmware and the platform code that
1102         * it's unwise to allow the portbus services to be used.
1103         *
1104         * We need to fix this eventually, but for now set this flag to disable
1105         * the portbus driver. The AER service isn't required since that AER
1106         * events are handled via EEH. The pciehp hotplug driver can't work
1107         * without kernel changes (and portbus binding breaks pnv_php). The
1108         * other services also require some thinking about how we're going
1109         * to integrate them.
1110         */
1111        pcie_ports_disabled = true;
1112#endif
1113
1114        /* Look for IODA IO-Hubs. */
1115        for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
1116                pnv_pci_init_ioda_hub(np);
1117        }
1118
1119        /* Look for ioda2 built-in PHB3's */
1120        for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
1121                pnv_pci_init_ioda2_phb(np);
1122
1123        /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
1124        for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
1125                pnv_pci_init_ioda2_phb(np);
1126
1127        /* Look for NPU PHBs */
1128        for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
1129                pnv_pci_init_npu_phb(np);
1130
1131        /*
1132         * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
1133         * the exception of TCE kill which requires an OPAL call.
1134         */
1135        for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
1136                pnv_pci_init_npu_phb(np);
1137
1138        /* Look for NPU2 OpenCAPI PHBs */
1139        for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
1140                pnv_pci_init_npu2_opencapi_phb(np);
1141
1142        /* Configure IOMMU DMA hooks */
1143        set_pci_dma_ops(&dma_iommu_ops);
1144}
1145
1146static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
1147                unsigned long action, void *data)
1148{
1149        struct device *dev = data;
1150        struct pci_dev *pdev;
1151        struct pci_dn *pdn;
1152        struct pnv_ioda_pe *pe;
1153        struct pci_controller *hose;
1154        struct pnv_phb *phb;
1155
1156        switch (action) {
1157        case BUS_NOTIFY_ADD_DEVICE:
1158                pdev = to_pci_dev(dev);
1159                pdn = pci_get_pdn(pdev);
1160                hose = pci_bus_to_host(pdev->bus);
1161                phb = hose->private_data;
1162
1163                WARN_ON_ONCE(!phb);
1164                if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb)
1165                        return 0;
1166
1167                pe = &phb->ioda.pe_array[pdn->pe_number];
1168                if (!pe->table_group.group)
1169                        return 0;
1170                iommu_add_device(&pe->table_group, dev);
1171                return 0;
1172        case BUS_NOTIFY_DEL_DEVICE:
1173                iommu_del_device(dev);
1174                return 0;
1175        default:
1176                return 0;
1177        }
1178}
1179
1180static struct notifier_block pnv_tce_iommu_bus_nb = {
1181        .notifier_call = pnv_tce_iommu_bus_notifier,
1182};
1183
1184static int __init pnv_tce_iommu_bus_notifier_init(void)
1185{
1186        bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
1187        return 0;
1188}
1189machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
1190