linux/drivers/ntb/hw/amd/ntb_hw_amd.c
<<
>>
Prefs
   1/*
   2 * This file is provided under a dual BSD/GPLv2 license.  When using or
   3 *   redistributing this file, you may do so under either license.
   4 *
   5 *   GPL LICENSE SUMMARY
   6 *
   7 *   Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
   8 *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
   9 *
  10 *   This program is free software; you can redistribute it and/or modify
  11 *   it under the terms of version 2 of the GNU General Public License as
  12 *   published by the Free Software Foundation.
  13 *
  14 *   BSD LICENSE
  15 *
  16 *   Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
  17 *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
  18 *
  19 *   Redistribution and use in source and binary forms, with or without
  20 *   modification, are permitted provided that the following conditions
  21 *   are met:
  22 *
  23 *     * Redistributions of source code must retain the above copyright
  24 *       notice, this list of conditions and the following disclaimer.
  25 *     * Redistributions in binary form must reproduce the above copy
  26 *       notice, this list of conditions and the following disclaimer in
  27 *       the documentation and/or other materials provided with the
  28 *       distribution.
  29 *     * Neither the name of AMD Corporation nor the names of its
  30 *       contributors may be used to endorse or promote products derived
  31 *       from this software without specific prior written permission.
  32 *
  33 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  34 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  35 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  36 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  37 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  38 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  39 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  40 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  41 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  42 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  43 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  44 *
  45 * AMD PCIe NTB Linux driver
  46 *
  47 * Contact Information:
  48 * Xiangliang Yu <Xiangliang.Yu@amd.com>
  49 */
  50
  51#include <linux/debugfs.h>
  52#include <linux/delay.h>
  53#include <linux/init.h>
  54#include <linux/interrupt.h>
  55#include <linux/module.h>
  56#include <linux/acpi.h>
  57#include <linux/pci.h>
  58#include <linux/random.h>
  59#include <linux/slab.h>
  60#include <linux/ntb.h>
  61
  62#include "ntb_hw_amd.h"
  63
  64#define NTB_NAME        "ntb_hw_amd"
  65#define NTB_DESC        "AMD(R) PCI-E Non-Transparent Bridge Driver"
  66#define NTB_VER         "1.0"
  67
  68MODULE_DESCRIPTION(NTB_DESC);
  69MODULE_VERSION(NTB_VER);
  70MODULE_LICENSE("Dual BSD/GPL");
  71MODULE_AUTHOR("AMD Inc.");
  72
  73static const struct file_operations amd_ntb_debugfs_info;
  74static struct dentry *debugfs_dir;
  75
  76static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
  77{
  78        if (idx < 0 || idx > ndev->mw_count)
  79                return -EINVAL;
  80
  81        return ndev->dev_data->mw_idx << idx;
  82}
  83
  84static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
  85{
  86        if (pidx != NTB_DEF_PEER_IDX)
  87                return -EINVAL;
  88
  89        return ntb_ndev(ntb)->mw_count;
  90}
  91
  92static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
  93                                resource_size_t *addr_align,
  94                                resource_size_t *size_align,
  95                                resource_size_t *size_max)
  96{
  97        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  98        int bar;
  99
 100        if (pidx != NTB_DEF_PEER_IDX)
 101                return -EINVAL;
 102
 103        bar = ndev_mw_to_bar(ndev, idx);
 104        if (bar < 0)
 105                return bar;
 106
 107        if (addr_align)
 108                *addr_align = SZ_4K;
 109
 110        if (size_align)
 111                *size_align = 1;
 112
 113        if (size_max)
 114                *size_max = pci_resource_len(ndev->ntb.pdev, bar);
 115
 116        return 0;
 117}
 118
 119static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
 120                                dma_addr_t addr, resource_size_t size)
 121{
 122        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 123        unsigned long xlat_reg, limit_reg = 0;
 124        resource_size_t mw_size;
 125        void __iomem *mmio, *peer_mmio;
 126        u64 base_addr, limit, reg_val;
 127        int bar;
 128
 129        if (pidx != NTB_DEF_PEER_IDX)
 130                return -EINVAL;
 131
 132        bar = ndev_mw_to_bar(ndev, idx);
 133        if (bar < 0)
 134                return bar;
 135
 136        mw_size = pci_resource_len(ntb->pdev, bar);
 137
 138        /* make sure the range fits in the usable mw size */
 139        if (size > mw_size)
 140                return -EINVAL;
 141
 142        mmio = ndev->self_mmio;
 143        peer_mmio = ndev->peer_mmio;
 144
 145        base_addr = pci_resource_start(ntb->pdev, bar);
 146
 147        if (bar != 1) {
 148                xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
 149                limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
 150
 151                /* Set the limit if supported */
 152                limit = size;
 153
 154                /* set and verify setting the translation address */
 155                write64(addr, peer_mmio + xlat_reg);
 156                reg_val = read64(peer_mmio + xlat_reg);
 157                if (reg_val != addr) {
 158                        write64(0, peer_mmio + xlat_reg);
 159                        return -EIO;
 160                }
 161
 162                /* set and verify setting the limit */
 163                write64(limit, peer_mmio + limit_reg);
 164                reg_val = read64(peer_mmio + limit_reg);
 165                if (reg_val != limit) {
 166                        write64(base_addr, mmio + limit_reg);
 167                        write64(0, peer_mmio + xlat_reg);
 168                        return -EIO;
 169                }
 170        } else {
 171                xlat_reg = AMD_BAR1XLAT_OFFSET;
 172                limit_reg = AMD_BAR1LMT_OFFSET;
 173
 174                /* Set the limit if supported */
 175                limit = size;
 176
 177                /* set and verify setting the translation address */
 178                write64(addr, peer_mmio + xlat_reg);
 179                reg_val = read64(peer_mmio + xlat_reg);
 180                if (reg_val != addr) {
 181                        write64(0, peer_mmio + xlat_reg);
 182                        return -EIO;
 183                }
 184
 185                /* set and verify setting the limit */
 186                writel(limit, peer_mmio + limit_reg);
 187                reg_val = readl(peer_mmio + limit_reg);
 188                if (reg_val != limit) {
 189                        writel(base_addr, mmio + limit_reg);
 190                        writel(0, peer_mmio + xlat_reg);
 191                        return -EIO;
 192                }
 193        }
 194
 195        return 0;
 196}
 197
 198static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev)
 199{
 200        struct pci_dev *pdev = NULL;
 201        struct pci_dev *pci_swds = NULL;
 202        struct pci_dev *pci_swus = NULL;
 203        u32 stat;
 204        int rc;
 205
 206        if (ndev->ntb.topo == NTB_TOPO_SEC) {
 207                /* Locate the pointer to Downstream Switch for this device */
 208                pci_swds = pci_upstream_bridge(ndev->ntb.pdev);
 209                if (pci_swds) {
 210                        /*
 211                         * Locate the pointer to Upstream Switch for
 212                         * the Downstream Switch.
 213                         */
 214                        pci_swus = pci_upstream_bridge(pci_swds);
 215                        if (pci_swus) {
 216                                rc = pcie_capability_read_dword(pci_swus,
 217                                                                PCI_EXP_LNKCTL,
 218                                                                &stat);
 219                                if (rc)
 220                                        return 0;
 221                        } else {
 222                                return 0;
 223                        }
 224                } else {
 225                        return 0;
 226                }
 227        } else if (ndev->ntb.topo == NTB_TOPO_PRI) {
 228                /*
 229                 * For NTB primary, we simply read the Link Status and control
 230                 * register of the NTB device itself.
 231                 */
 232                pdev = ndev->ntb.pdev;
 233                rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat);
 234                if (rc)
 235                        return 0;
 236        } else {
 237                /* Catch all for everything else */
 238                return 0;
 239        }
 240
 241        ndev->lnk_sta = stat;
 242
 243        return 1;
 244}
 245
 246static int amd_link_is_up(struct amd_ntb_dev *ndev)
 247{
 248        int ret;
 249
 250        /*
 251         * We consider the link to be up under two conditions:
 252         *
 253         *   - When a link-up event is received. This is indicated by
 254         *     AMD_LINK_UP_EVENT set in peer_sta.
 255         *   - When driver on both sides of the link have been loaded.
 256         *     This is indicated by bit 1 being set in the peer
 257         *     SIDEINFO register.
 258         *
 259         * This function should return 1 when the latter of the above
 260         * two conditions is true.
 261         *
 262         * Now consider the sequence of events - Link-Up event occurs,
 263         * then the peer side driver loads. In this case, we would have
 264         * received LINK_UP event and bit 1 of peer SIDEINFO is also
 265         * set. What happens now if the link goes down? Bit 1 of
 266         * peer SIDEINFO remains set, but LINK_DOWN bit is set in
 267         * peer_sta. So we should return 0 from this function. Not only
 268         * that, we clear bit 1 of peer SIDEINFO to 0, since the peer
 269         * side driver did not even get a chance to clear it before
 270         * the link went down. This can be the case of surprise link
 271         * removal.
 272         *
 273         * LINK_UP event will always occur before the peer side driver
 274         * gets loaded the very first time. So there can be a case when
 275         * the LINK_UP event has occurred, but the peer side driver hasn't
 276         * yet loaded. We return 0 in that case.
 277         *
 278         * There is also a special case when the primary side driver is
 279         * unloaded and then loaded again. Since there is no change in
 280         * the status of NTB secondary in this case, there is no Link-Up
 281         * or Link-Down notification received. We recognize this condition
 282         * with peer_sta being set to 0.
 283         *
 284         * If bit 1 of peer SIDEINFO register is not set, then we
 285         * simply return 0 irrespective of the link up or down status
 286         * set in peer_sta.
 287         */
 288        ret = amd_poll_link(ndev);
 289        if (ret) {
 290                /*
 291                 * We need to check the below only for NTB primary. For NTB
 292                 * secondary, simply checking the result of PSIDE_INFO
 293                 * register will suffice.
 294                 */
 295                if (ndev->ntb.topo == NTB_TOPO_PRI) {
 296                        if ((ndev->peer_sta & AMD_LINK_UP_EVENT) ||
 297                            (ndev->peer_sta == 0))
 298                                return ret;
 299                        else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) {
 300                                /* Clear peer sideinfo register */
 301                                amd_clear_side_info_reg(ndev, true);
 302
 303                                return 0;
 304                        }
 305                } else { /* NTB_TOPO_SEC */
 306                        return ret;
 307                }
 308        }
 309
 310        return 0;
 311}
 312
 313static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
 314                              enum ntb_speed *speed,
 315                              enum ntb_width *width)
 316{
 317        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 318        int ret = 0;
 319
 320        if (amd_link_is_up(ndev)) {
 321                if (speed)
 322                        *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
 323                if (width)
 324                        *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
 325
 326                dev_dbg(&ntb->pdev->dev, "link is up.\n");
 327
 328                ret = 1;
 329        } else {
 330                if (speed)
 331                        *speed = NTB_SPEED_NONE;
 332                if (width)
 333                        *width = NTB_WIDTH_NONE;
 334
 335                dev_dbg(&ntb->pdev->dev, "link is down.\n");
 336        }
 337
 338        return ret;
 339}
 340
 341static int amd_ntb_link_enable(struct ntb_dev *ntb,
 342                               enum ntb_speed max_speed,
 343                               enum ntb_width max_width)
 344{
 345        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 346        void __iomem *mmio = ndev->self_mmio;
 347
 348        /* Enable event interrupt */
 349        ndev->int_mask &= ~AMD_EVENT_INTMASK;
 350        writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
 351
 352        if (ndev->ntb.topo == NTB_TOPO_SEC)
 353                return -EINVAL;
 354        dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
 355
 356        return 0;
 357}
 358
 359static int amd_ntb_link_disable(struct ntb_dev *ntb)
 360{
 361        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 362        void __iomem *mmio = ndev->self_mmio;
 363
 364        /* Disable event interrupt */
 365        ndev->int_mask |= AMD_EVENT_INTMASK;
 366        writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
 367
 368        if (ndev->ntb.topo == NTB_TOPO_SEC)
 369                return -EINVAL;
 370        dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
 371
 372        return 0;
 373}
 374
 375static int amd_ntb_peer_mw_count(struct ntb_dev *ntb)
 376{
 377        /* The same as for inbound MWs */
 378        return ntb_ndev(ntb)->mw_count;
 379}
 380
 381static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
 382                                    phys_addr_t *base, resource_size_t *size)
 383{
 384        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 385        int bar;
 386
 387        bar = ndev_mw_to_bar(ndev, idx);
 388        if (bar < 0)
 389                return bar;
 390
 391        if (base)
 392                *base = pci_resource_start(ndev->ntb.pdev, bar);
 393
 394        if (size)
 395                *size = pci_resource_len(ndev->ntb.pdev, bar);
 396
 397        return 0;
 398}
 399
 400static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
 401{
 402        return ntb_ndev(ntb)->db_valid_mask;
 403}
 404
 405static int amd_ntb_db_vector_count(struct ntb_dev *ntb)
 406{
 407        return ntb_ndev(ntb)->db_count;
 408}
 409
 410static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
 411{
 412        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 413
 414        if (db_vector < 0 || db_vector > ndev->db_count)
 415                return 0;
 416
 417        return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector);
 418}
 419
 420static u64 amd_ntb_db_read(struct ntb_dev *ntb)
 421{
 422        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 423        void __iomem *mmio = ndev->self_mmio;
 424
 425        return (u64)readw(mmio + AMD_DBSTAT_OFFSET);
 426}
 427
 428static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
 429{
 430        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 431        void __iomem *mmio = ndev->self_mmio;
 432
 433        writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET);
 434
 435        return 0;
 436}
 437
 438static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
 439{
 440        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 441        void __iomem *mmio = ndev->self_mmio;
 442        unsigned long flags;
 443
 444        if (db_bits & ~ndev->db_valid_mask)
 445                return -EINVAL;
 446
 447        spin_lock_irqsave(&ndev->db_mask_lock, flags);
 448        ndev->db_mask |= db_bits;
 449        writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
 450        spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
 451
 452        return 0;
 453}
 454
 455static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
 456{
 457        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 458        void __iomem *mmio = ndev->self_mmio;
 459        unsigned long flags;
 460
 461        if (db_bits & ~ndev->db_valid_mask)
 462                return -EINVAL;
 463
 464        spin_lock_irqsave(&ndev->db_mask_lock, flags);
 465        ndev->db_mask &= ~db_bits;
 466        writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
 467        spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
 468
 469        return 0;
 470}
 471
 472static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
 473{
 474        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 475        void __iomem *mmio = ndev->self_mmio;
 476
 477        writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET);
 478
 479        return 0;
 480}
 481
 482static int amd_ntb_spad_count(struct ntb_dev *ntb)
 483{
 484        return ntb_ndev(ntb)->spad_count;
 485}
 486
 487static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx)
 488{
 489        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 490        void __iomem *mmio = ndev->self_mmio;
 491        u32 offset;
 492
 493        if (idx < 0 || idx >= ndev->spad_count)
 494                return 0;
 495
 496        offset = ndev->self_spad + (idx << 2);
 497        return readl(mmio + AMD_SPAD_OFFSET + offset);
 498}
 499
 500static int amd_ntb_spad_write(struct ntb_dev *ntb,
 501                              int idx, u32 val)
 502{
 503        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 504        void __iomem *mmio = ndev->self_mmio;
 505        u32 offset;
 506
 507        if (idx < 0 || idx >= ndev->spad_count)
 508                return -EINVAL;
 509
 510        offset = ndev->self_spad + (idx << 2);
 511        writel(val, mmio + AMD_SPAD_OFFSET + offset);
 512
 513        return 0;
 514}
 515
 516static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
 517{
 518        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 519        void __iomem *mmio = ndev->self_mmio;
 520        u32 offset;
 521
 522        if (sidx < 0 || sidx >= ndev->spad_count)
 523                return -EINVAL;
 524
 525        offset = ndev->peer_spad + (sidx << 2);
 526        return readl(mmio + AMD_SPAD_OFFSET + offset);
 527}
 528
 529static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
 530                                   int sidx, u32 val)
 531{
 532        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
 533        void __iomem *mmio = ndev->self_mmio;
 534        u32 offset;
 535
 536        if (sidx < 0 || sidx >= ndev->spad_count)
 537                return -EINVAL;
 538
 539        offset = ndev->peer_spad + (sidx << 2);
 540        writel(val, mmio + AMD_SPAD_OFFSET + offset);
 541
 542        return 0;
 543}
 544
 545static const struct ntb_dev_ops amd_ntb_ops = {
 546        .mw_count               = amd_ntb_mw_count,
 547        .mw_get_align           = amd_ntb_mw_get_align,
 548        .mw_set_trans           = amd_ntb_mw_set_trans,
 549        .peer_mw_count          = amd_ntb_peer_mw_count,
 550        .peer_mw_get_addr       = amd_ntb_peer_mw_get_addr,
 551        .link_is_up             = amd_ntb_link_is_up,
 552        .link_enable            = amd_ntb_link_enable,
 553        .link_disable           = amd_ntb_link_disable,
 554        .db_valid_mask          = amd_ntb_db_valid_mask,
 555        .db_vector_count        = amd_ntb_db_vector_count,
 556        .db_vector_mask         = amd_ntb_db_vector_mask,
 557        .db_read                = amd_ntb_db_read,
 558        .db_clear               = amd_ntb_db_clear,
 559        .db_set_mask            = amd_ntb_db_set_mask,
 560        .db_clear_mask          = amd_ntb_db_clear_mask,
 561        .peer_db_set            = amd_ntb_peer_db_set,
 562        .spad_count             = amd_ntb_spad_count,
 563        .spad_read              = amd_ntb_spad_read,
 564        .spad_write             = amd_ntb_spad_write,
 565        .peer_spad_read         = amd_ntb_peer_spad_read,
 566        .peer_spad_write        = amd_ntb_peer_spad_write,
 567};
 568
 569static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
 570{
 571        void __iomem *mmio = ndev->self_mmio;
 572        int reg;
 573
 574        reg = readl(mmio + AMD_SMUACK_OFFSET);
 575        reg |= bit;
 576        writel(reg, mmio + AMD_SMUACK_OFFSET);
 577}
 578
 579static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
 580{
 581        void __iomem *mmio = ndev->self_mmio;
 582        struct device *dev = &ndev->ntb.pdev->dev;
 583        u32 status;
 584
 585        status = readl(mmio + AMD_INTSTAT_OFFSET);
 586        if (!(status & AMD_EVENT_INTMASK))
 587                return;
 588
 589        dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec);
 590
 591        status &= AMD_EVENT_INTMASK;
 592        switch (status) {
 593        case AMD_PEER_FLUSH_EVENT:
 594                ndev->peer_sta |= AMD_PEER_FLUSH_EVENT;
 595                dev_info(dev, "Flush is done.\n");
 596                break;
 597        case AMD_PEER_RESET_EVENT:
 598        case AMD_LINK_DOWN_EVENT:
 599                ndev->peer_sta |= status;
 600                if (status == AMD_LINK_DOWN_EVENT)
 601                        ndev->peer_sta &= ~AMD_LINK_UP_EVENT;
 602
 603                amd_ack_smu(ndev, status);
 604
 605                /* link down first */
 606                ntb_link_event(&ndev->ntb);
 607                /* polling peer status */
 608                schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
 609
 610                break;
 611        case AMD_PEER_D3_EVENT:
 612        case AMD_PEER_PMETO_EVENT:
 613        case AMD_LINK_UP_EVENT:
 614                ndev->peer_sta |= status;
 615                if (status == AMD_LINK_UP_EVENT)
 616                        ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT;
 617                else if (status == AMD_PEER_D3_EVENT)
 618                        ndev->peer_sta &= ~AMD_PEER_D0_EVENT;
 619
 620                amd_ack_smu(ndev, status);
 621
 622                /* link down */
 623                ntb_link_event(&ndev->ntb);
 624
 625                break;
 626        case AMD_PEER_D0_EVENT:
 627                mmio = ndev->peer_mmio;
 628                status = readl(mmio + AMD_PMESTAT_OFFSET);
 629                /* check if this is WAKEUP event */
 630                if (status & 0x1)
 631                        dev_info(dev, "Wakeup is done.\n");
 632
 633                ndev->peer_sta |= AMD_PEER_D0_EVENT;
 634                ndev->peer_sta &= ~AMD_PEER_D3_EVENT;
 635                amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
 636
 637                /* start a timer to poll link status */
 638                schedule_delayed_work(&ndev->hb_timer,
 639                                      AMD_LINK_HB_TIMEOUT);
 640                break;
 641        default:
 642                dev_info(dev, "event status = 0x%x.\n", status);
 643                break;
 644        }
 645
 646        /* Clear the interrupt status */
 647        writel(status, mmio + AMD_INTSTAT_OFFSET);
 648}
 649
 650static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec)
 651{
 652        struct device *dev = &ndev->ntb.pdev->dev;
 653        u64 status;
 654
 655        status = amd_ntb_db_read(&ndev->ntb);
 656
 657        dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec);
 658
 659        /*
 660         * Since we had reserved highest order bit of DB for signaling peer of
 661         * a special event, this is the only status bit we should be concerned
 662         * here now.
 663         */
 664        if (status & BIT(ndev->db_last_bit)) {
 665                ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit));
 666                /* send link down event notification */
 667                ntb_link_event(&ndev->ntb);
 668
 669                /*
 670                 * If we are here, that means the peer has signalled a special
 671                 * event which notifies that the peer driver has been
 672                 * un-loaded for some reason. Since there is a chance that the
 673                 * peer will load its driver again sometime, we schedule link
 674                 * polling routine.
 675                 */
 676                schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
 677        }
 678}
 679
 680static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
 681{
 682        dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec);
 683
 684        if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
 685                amd_handle_event(ndev, vec);
 686
 687        if (vec < AMD_DB_CNT) {
 688                amd_handle_db_event(ndev, vec);
 689                ntb_db_event(&ndev->ntb, vec);
 690        }
 691
 692        return IRQ_HANDLED;
 693}
 694
 695static irqreturn_t ndev_vec_isr(int irq, void *dev)
 696{
 697        struct amd_ntb_vec *nvec = dev;
 698
 699        return ndev_interrupt(nvec->ndev, nvec->num);
 700}
 701
 702static irqreturn_t ndev_irq_isr(int irq, void *dev)
 703{
 704        struct amd_ntb_dev *ndev = dev;
 705
 706        return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
 707}
 708
 709static int ndev_init_isr(struct amd_ntb_dev *ndev,
 710                         int msix_min, int msix_max)
 711{
 712        struct pci_dev *pdev;
 713        int rc, i, msix_count, node;
 714
 715        pdev = ndev->ntb.pdev;
 716
 717        node = dev_to_node(&pdev->dev);
 718
 719        ndev->db_mask = ndev->db_valid_mask;
 720
 721        /* Try to set up msix irq */
 722        ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
 723                                 GFP_KERNEL, node);
 724        if (!ndev->vec)
 725                goto err_msix_vec_alloc;
 726
 727        ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
 728                                  GFP_KERNEL, node);
 729        if (!ndev->msix)
 730                goto err_msix_alloc;
 731
 732        for (i = 0; i < msix_max; ++i)
 733                ndev->msix[i].entry = i;
 734
 735        msix_count = pci_enable_msix_range(pdev, ndev->msix,
 736                                           msix_min, msix_max);
 737        if (msix_count < 0)
 738                goto err_msix_enable;
 739
 740        /* NOTE: Disable MSIX if msix count is less than 16 because of
 741         * hardware limitation.
 742         */
 743        if (msix_count < msix_min) {
 744                pci_disable_msix(pdev);
 745                goto err_msix_enable;
 746        }
 747
 748        for (i = 0; i < msix_count; ++i) {
 749                ndev->vec[i].ndev = ndev;
 750                ndev->vec[i].num = i;
 751                rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
 752                                 "ndev_vec_isr", &ndev->vec[i]);
 753                if (rc)
 754                        goto err_msix_request;
 755        }
 756
 757        dev_dbg(&pdev->dev, "Using msix interrupts\n");
 758        ndev->db_count = msix_min;
 759        ndev->msix_vec_count = msix_max;
 760        return 0;
 761
 762err_msix_request:
 763        while (i-- > 0)
 764                free_irq(ndev->msix[i].vector, &ndev->vec[i]);
 765        pci_disable_msix(pdev);
 766err_msix_enable:
 767        kfree(ndev->msix);
 768err_msix_alloc:
 769        kfree(ndev->vec);
 770err_msix_vec_alloc:
 771        ndev->msix = NULL;
 772        ndev->vec = NULL;
 773
 774        /* Try to set up msi irq */
 775        rc = pci_enable_msi(pdev);
 776        if (rc)
 777                goto err_msi_enable;
 778
 779        rc = request_irq(pdev->irq, ndev_irq_isr, 0,
 780                         "ndev_irq_isr", ndev);
 781        if (rc)
 782                goto err_msi_request;
 783
 784        dev_dbg(&pdev->dev, "Using msi interrupts\n");
 785        ndev->db_count = 1;
 786        ndev->msix_vec_count = 1;
 787        return 0;
 788
 789err_msi_request:
 790        pci_disable_msi(pdev);
 791err_msi_enable:
 792
 793        /* Try to set up intx irq */
 794        pci_intx(pdev, 1);
 795
 796        rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
 797                         "ndev_irq_isr", ndev);
 798        if (rc)
 799                goto err_intx_request;
 800
 801        dev_dbg(&pdev->dev, "Using intx interrupts\n");
 802        ndev->db_count = 1;
 803        ndev->msix_vec_count = 1;
 804        return 0;
 805
 806err_intx_request:
 807        return rc;
 808}
 809
 810static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
 811{
 812        struct pci_dev *pdev;
 813        void __iomem *mmio = ndev->self_mmio;
 814        int i;
 815
 816        pdev = ndev->ntb.pdev;
 817
 818        /* Mask all doorbell interrupts */
 819        ndev->db_mask = ndev->db_valid_mask;
 820        writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
 821
 822        if (ndev->msix) {
 823                i = ndev->msix_vec_count;
 824                while (i--)
 825                        free_irq(ndev->msix[i].vector, &ndev->vec[i]);
 826                pci_disable_msix(pdev);
 827                kfree(ndev->msix);
 828                kfree(ndev->vec);
 829        } else {
 830                free_irq(pdev->irq, ndev);
 831                if (pci_dev_msi_enabled(pdev))
 832                        pci_disable_msi(pdev);
 833                else
 834                        pci_intx(pdev, 0);
 835        }
 836}
 837
 838static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
 839                                 size_t count, loff_t *offp)
 840{
 841        struct amd_ntb_dev *ndev;
 842        void __iomem *mmio;
 843        char *buf;
 844        size_t buf_size;
 845        ssize_t ret, off;
 846        union { u64 v64; u32 v32; u16 v16; } u;
 847
 848        ndev = filp->private_data;
 849        mmio = ndev->self_mmio;
 850
 851        buf_size = min(count, 0x800ul);
 852
 853        buf = kmalloc(buf_size, GFP_KERNEL);
 854        if (!buf)
 855                return -ENOMEM;
 856
 857        off = 0;
 858
 859        off += scnprintf(buf + off, buf_size - off,
 860                         "NTB Device Information:\n");
 861
 862        off += scnprintf(buf + off, buf_size - off,
 863                         "Connection Topology -\t%s\n",
 864                         ntb_topo_string(ndev->ntb.topo));
 865
 866        off += scnprintf(buf + off, buf_size - off,
 867                         "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
 868
 869        if (!amd_link_is_up(ndev)) {
 870                off += scnprintf(buf + off, buf_size - off,
 871                                 "Link Status -\t\tDown\n");
 872        } else {
 873                off += scnprintf(buf + off, buf_size - off,
 874                                 "Link Status -\t\tUp\n");
 875                off += scnprintf(buf + off, buf_size - off,
 876                                 "Link Speed -\t\tPCI-E Gen %u\n",
 877                                 NTB_LNK_STA_SPEED(ndev->lnk_sta));
 878                off += scnprintf(buf + off, buf_size - off,
 879                                 "Link Width -\t\tx%u\n",
 880                                 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
 881        }
 882
 883        off += scnprintf(buf + off, buf_size - off,
 884                         "Memory Window Count -\t%u\n", ndev->mw_count);
 885        off += scnprintf(buf + off, buf_size - off,
 886                         "Scratchpad Count -\t%u\n", ndev->spad_count);
 887        off += scnprintf(buf + off, buf_size - off,
 888                         "Doorbell Count -\t%u\n", ndev->db_count);
 889        off += scnprintf(buf + off, buf_size - off,
 890                         "MSIX Vector Count -\t%u\n", ndev->msix_vec_count);
 891
 892        off += scnprintf(buf + off, buf_size - off,
 893                         "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
 894
 895        u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET);
 896        off += scnprintf(buf + off, buf_size - off,
 897                         "Doorbell Mask -\t\t\t%#06x\n", u.v32);
 898
 899        u.v32 = readl(mmio + AMD_DBSTAT_OFFSET);
 900        off += scnprintf(buf + off, buf_size - off,
 901                         "Doorbell Bell -\t\t\t%#06x\n", u.v32);
 902
 903        off += scnprintf(buf + off, buf_size - off,
 904                         "\nNTB Incoming XLAT:\n");
 905
 906        u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET);
 907        off += scnprintf(buf + off, buf_size - off,
 908                         "XLAT1 -\t\t%#018llx\n", u.v64);
 909
 910        u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET);
 911        off += scnprintf(buf + off, buf_size - off,
 912                         "XLAT23 -\t\t%#018llx\n", u.v64);
 913
 914        u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET);
 915        off += scnprintf(buf + off, buf_size - off,
 916                         "XLAT45 -\t\t%#018llx\n", u.v64);
 917
 918        u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET);
 919        off += scnprintf(buf + off, buf_size - off,
 920                         "LMT1 -\t\t\t%#06x\n", u.v32);
 921
 922        u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET);
 923        off += scnprintf(buf + off, buf_size - off,
 924                         "LMT23 -\t\t\t%#018llx\n", u.v64);
 925
 926        u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET);
 927        off += scnprintf(buf + off, buf_size - off,
 928                         "LMT45 -\t\t\t%#018llx\n", u.v64);
 929
 930        ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
 931        kfree(buf);
 932        return ret;
 933}
 934
 935static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
 936{
 937        if (!debugfs_dir) {
 938                ndev->debugfs_dir = NULL;
 939                ndev->debugfs_info = NULL;
 940        } else {
 941                ndev->debugfs_dir =
 942                        debugfs_create_dir(pci_name(ndev->ntb.pdev),
 943                                           debugfs_dir);
 944                if (!ndev->debugfs_dir)
 945                        ndev->debugfs_info = NULL;
 946                else
 947                        ndev->debugfs_info =
 948                                debugfs_create_file("info", S_IRUSR,
 949                                                    ndev->debugfs_dir, ndev,
 950                                                    &amd_ntb_debugfs_info);
 951        }
 952}
 953
 954static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev)
 955{
 956        debugfs_remove_recursive(ndev->debugfs_dir);
 957}
 958
 959static inline void ndev_init_struct(struct amd_ntb_dev *ndev,
 960                                    struct pci_dev *pdev)
 961{
 962        ndev->ntb.pdev = pdev;
 963        ndev->ntb.topo = NTB_TOPO_NONE;
 964        ndev->ntb.ops = &amd_ntb_ops;
 965        ndev->int_mask = AMD_EVENT_INTMASK;
 966        spin_lock_init(&ndev->db_mask_lock);
 967}
 968
 969static int amd_poll_link(struct amd_ntb_dev *ndev)
 970{
 971        void __iomem *mmio = ndev->peer_mmio;
 972        u32 reg;
 973
 974        reg = readl(mmio + AMD_SIDEINFO_OFFSET);
 975        reg &= AMD_SIDE_READY;
 976
 977        dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg);
 978
 979        ndev->cntl_sta = reg;
 980
 981        amd_ntb_get_link_status(ndev);
 982
 983        return ndev->cntl_sta;
 984}
 985
 986static void amd_link_hb(struct work_struct *work)
 987{
 988        struct amd_ntb_dev *ndev = hb_ndev(work);
 989
 990        if (amd_poll_link(ndev))
 991                ntb_link_event(&ndev->ntb);
 992
 993        if (!amd_link_is_up(ndev))
 994                schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
 995}
 996
 997static int amd_init_isr(struct amd_ntb_dev *ndev)
 998{
 999        return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT);
1000}
1001
1002static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
1003{
1004        void __iomem *mmio = NULL;
1005        unsigned int reg;
1006
1007        if (peer)
1008                mmio = ndev->peer_mmio;
1009        else
1010                mmio = ndev->self_mmio;
1011
1012        reg = readl(mmio + AMD_SIDEINFO_OFFSET);
1013        if (!(reg & AMD_SIDE_READY)) {
1014                reg |= AMD_SIDE_READY;
1015                writel(reg, mmio + AMD_SIDEINFO_OFFSET);
1016        }
1017}
1018
1019static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
1020{
1021        void __iomem *mmio = NULL;
1022        unsigned int reg;
1023
1024        if (peer)
1025                mmio = ndev->peer_mmio;
1026        else
1027                mmio = ndev->self_mmio;
1028
1029        reg = readl(mmio + AMD_SIDEINFO_OFFSET);
1030        if (reg & AMD_SIDE_READY) {
1031                reg &= ~AMD_SIDE_READY;
1032                writel(reg, mmio + AMD_SIDEINFO_OFFSET);
1033                readl(mmio + AMD_SIDEINFO_OFFSET);
1034        }
1035}
1036
1037static void amd_init_side_info(struct amd_ntb_dev *ndev)
1038{
1039        void __iomem *mmio = ndev->self_mmio;
1040        u32 ntb_ctl;
1041
1042        amd_set_side_info_reg(ndev, false);
1043
1044        ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
1045        ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
1046        writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
1047}
1048
1049static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
1050{
1051        void __iomem *mmio = ndev->self_mmio;
1052        u32 ntb_ctl;
1053
1054        amd_clear_side_info_reg(ndev, false);
1055
1056        ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
1057        ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
1058        writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
1059}
1060
1061static int amd_init_ntb(struct amd_ntb_dev *ndev)
1062{
1063        void __iomem *mmio = ndev->self_mmio;
1064
1065        ndev->mw_count = ndev->dev_data->mw_count;
1066        ndev->spad_count = AMD_SPADS_CNT;
1067        ndev->db_count = AMD_DB_CNT;
1068
1069        switch (ndev->ntb.topo) {
1070        case NTB_TOPO_PRI:
1071        case NTB_TOPO_SEC:
1072                ndev->spad_count >>= 1;
1073                if (ndev->ntb.topo == NTB_TOPO_PRI) {
1074                        ndev->self_spad = 0;
1075                        ndev->peer_spad = 0x20;
1076                } else {
1077                        ndev->self_spad = 0x20;
1078                        ndev->peer_spad = 0;
1079                }
1080
1081                INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb);
1082                schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
1083
1084                break;
1085        default:
1086                dev_err(&ndev->ntb.pdev->dev,
1087                        "AMD NTB does not support B2B mode.\n");
1088                return -EINVAL;
1089        }
1090
1091        /* Mask event interrupts */
1092        writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
1093
1094        return 0;
1095}
1096
1097static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev)
1098{
1099        void __iomem *mmio = ndev->self_mmio;
1100        u32 info;
1101
1102        info = readl(mmio + AMD_SIDEINFO_OFFSET);
1103        if (info & AMD_SIDE_MASK)
1104                return NTB_TOPO_SEC;
1105        else
1106                return NTB_TOPO_PRI;
1107}
1108
1109static int amd_init_dev(struct amd_ntb_dev *ndev)
1110{
1111        void __iomem *mmio = ndev->self_mmio;
1112        struct pci_dev *pdev;
1113        int rc = 0;
1114
1115        pdev = ndev->ntb.pdev;
1116
1117        ndev->ntb.topo = amd_get_topo(ndev);
1118        dev_dbg(&pdev->dev, "AMD NTB topo is %s\n",
1119                ntb_topo_string(ndev->ntb.topo));
1120
1121        rc = amd_init_ntb(ndev);
1122        if (rc)
1123                return rc;
1124
1125        rc = amd_init_isr(ndev);
1126        if (rc) {
1127                dev_err(&pdev->dev, "fail to init isr.\n");
1128                return rc;
1129        }
1130
1131        ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1132        /*
1133         * We reserve the highest order bit of the DB register which will
1134         * be used to notify peer when the driver on this side is being
1135         * un-loaded.
1136         */
1137        ndev->db_last_bit =
1138                        find_last_bit((unsigned long *)&ndev->db_valid_mask,
1139                                      hweight64(ndev->db_valid_mask));
1140        writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET);
1141        /*
1142         * Since now there is one less bit to account for, the DB count
1143         * and DB mask should be adjusted accordingly.
1144         */
1145        ndev->db_count -= 1;
1146        ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1147
1148        /* Enable Link-Up and Link-Down event interrupts */
1149        ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT);
1150        writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
1151
1152        return 0;
1153}
1154
1155static void amd_deinit_dev(struct amd_ntb_dev *ndev)
1156{
1157        cancel_delayed_work_sync(&ndev->hb_timer);
1158
1159        ndev_deinit_isr(ndev);
1160}
1161
1162static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
1163                            struct pci_dev *pdev)
1164{
1165        int rc;
1166
1167        pci_set_drvdata(pdev, ndev);
1168
1169        rc = pci_enable_device(pdev);
1170        if (rc)
1171                goto err_pci_enable;
1172
1173        rc = pci_request_regions(pdev, NTB_NAME);
1174        if (rc)
1175                goto err_pci_regions;
1176
1177        pci_set_master(pdev);
1178
1179        rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1180        if (rc) {
1181                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1182                if (rc)
1183                        goto err_dma_mask;
1184                dev_warn(&pdev->dev, "Cannot DMA highmem\n");
1185        }
1186
1187        ndev->self_mmio = pci_iomap(pdev, 0, 0);
1188        if (!ndev->self_mmio) {
1189                rc = -EIO;
1190                goto err_dma_mask;
1191        }
1192        ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;
1193
1194        return 0;
1195
1196err_dma_mask:
1197        pci_clear_master(pdev);
1198        pci_release_regions(pdev);
1199err_pci_regions:
1200        pci_disable_device(pdev);
1201err_pci_enable:
1202        pci_set_drvdata(pdev, NULL);
1203        return rc;
1204}
1205
1206static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
1207{
1208        struct pci_dev *pdev = ndev->ntb.pdev;
1209
1210        pci_iounmap(pdev, ndev->self_mmio);
1211
1212        pci_clear_master(pdev);
1213        pci_release_regions(pdev);
1214        pci_disable_device(pdev);
1215        pci_set_drvdata(pdev, NULL);
1216}
1217
1218static int amd_ntb_pci_probe(struct pci_dev *pdev,
1219                             const struct pci_device_id *id)
1220{
1221        struct amd_ntb_dev *ndev;
1222        int rc, node;
1223
1224        node = dev_to_node(&pdev->dev);
1225
1226        ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1227        if (!ndev) {
1228                rc = -ENOMEM;
1229                goto err_ndev;
1230        }
1231
1232        ndev->dev_data = (struct ntb_dev_data *)id->driver_data;
1233
1234        ndev_init_struct(ndev, pdev);
1235
1236        rc = amd_ntb_init_pci(ndev, pdev);
1237        if (rc)
1238                goto err_init_pci;
1239
1240        rc = amd_init_dev(ndev);
1241        if (rc)
1242                goto err_init_dev;
1243
1244        /* write side info */
1245        amd_init_side_info(ndev);
1246
1247        amd_poll_link(ndev);
1248
1249        ndev_init_debugfs(ndev);
1250
1251        rc = ntb_register_device(&ndev->ntb);
1252        if (rc)
1253                goto err_register;
1254
1255        dev_info(&pdev->dev, "NTB device registered.\n");
1256
1257        return 0;
1258
1259err_register:
1260        ndev_deinit_debugfs(ndev);
1261        amd_deinit_dev(ndev);
1262err_init_dev:
1263        amd_ntb_deinit_pci(ndev);
1264err_init_pci:
1265        kfree(ndev);
1266err_ndev:
1267        return rc;
1268}
1269
1270static void amd_ntb_pci_remove(struct pci_dev *pdev)
1271{
1272        struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
1273
1274        /*
1275         * Clear the READY bit in SIDEINFO register before sending DB event
1276         * to the peer. This will make sure that when the peer handles the
1277         * DB event, it correctly reads this bit as being 0.
1278         */
1279        amd_deinit_side_info(ndev);
1280        ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
1281        ntb_unregister_device(&ndev->ntb);
1282        ndev_deinit_debugfs(ndev);
1283        amd_deinit_dev(ndev);
1284        amd_ntb_deinit_pci(ndev);
1285        kfree(ndev);
1286}
1287
1288static void amd_ntb_pci_shutdown(struct pci_dev *pdev)
1289{
1290        struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
1291
1292        /* Send link down notification */
1293        ntb_link_event(&ndev->ntb);
1294
1295        amd_deinit_side_info(ndev);
1296        ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
1297        ntb_unregister_device(&ndev->ntb);
1298        ndev_deinit_debugfs(ndev);
1299        amd_deinit_dev(ndev);
1300        amd_ntb_deinit_pci(ndev);
1301        kfree(ndev);
1302}
1303
1304static const struct file_operations amd_ntb_debugfs_info = {
1305        .owner = THIS_MODULE,
1306        .open = simple_open,
1307        .read = ndev_debugfs_read,
1308};
1309
1310static const struct ntb_dev_data dev_data[] = {
1311        { /* for device 145b */
1312                .mw_count = 3,
1313                .mw_idx = 1,
1314        },
1315        { /* for device 148b */
1316                .mw_count = 2,
1317                .mw_idx = 2,
1318        },
1319};
1320
1321static const struct pci_device_id amd_ntb_pci_tbl[] = {
1322        { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] },
1323        { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
1324        { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
1325        { 0, }
1326};
1327MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
1328
1329static struct pci_driver amd_ntb_pci_driver = {
1330        .name           = KBUILD_MODNAME,
1331        .id_table       = amd_ntb_pci_tbl,
1332        .probe          = amd_ntb_pci_probe,
1333        .remove         = amd_ntb_pci_remove,
1334        .shutdown       = amd_ntb_pci_shutdown,
1335};
1336
1337static int __init amd_ntb_pci_driver_init(void)
1338{
1339        pr_info("%s %s\n", NTB_DESC, NTB_VER);
1340
1341        if (debugfs_initialized())
1342                debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1343
1344        return pci_register_driver(&amd_ntb_pci_driver);
1345}
1346module_init(amd_ntb_pci_driver_init);
1347
1348static void __exit amd_ntb_pci_driver_exit(void)
1349{
1350        pci_unregister_driver(&amd_ntb_pci_driver);
1351        debugfs_remove_recursive(debugfs_dir);
1352}
1353module_exit(amd_ntb_pci_driver_exit);
1354