linux/drivers/rapidio/devices/tsi721.c
<<
>>
Prefs
   1/*
   2 * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge
   3 *
   4 * Copyright 2011 Integrated Device Technology, Inc.
   5 * Alexandre Bounine <alexandre.bounine@idt.com>
   6 * Chul Kim <chul.kim@idt.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License as published by the Free
  10 * Software Foundation; either version 2 of the License, or (at your option)
  11 * any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful, but WITHOUT
  14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  16 * more details.
  17 *
  18 * You should have received a copy of the GNU General Public License along with
  19 * this program; if not, write to the Free Software Foundation, Inc., 59
  20 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  21 */
  22
  23#include <linux/io.h>
  24#include <linux/errno.h>
  25#include <linux/init.h>
  26#include <linux/ioport.h>
  27#include <linux/kernel.h>
  28#include <linux/module.h>
  29#include <linux/pci.h>
  30#include <linux/rio.h>
  31#include <linux/rio_drv.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/interrupt.h>
  34#include <linux/kfifo.h>
  35#include <linux/delay.h>
  36
  37#include "tsi721.h"
  38
  39#ifdef DEBUG
  40u32 tsi_dbg_level;
  41module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
  42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
  43#endif
  44
  45static int pcie_mrrs = -1;
  46module_param(pcie_mrrs, int, S_IRUGO);
  47MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)");
  48
  49static u8 mbox_sel = 0x0f;
  50module_param(mbox_sel, byte, S_IRUGO);
  51MODULE_PARM_DESC(mbox_sel,
  52                 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
  53
  54static DEFINE_SPINLOCK(tsi721_maint_lock);
  55
  56static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
  57static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
  58
  59/**
  60 * tsi721_lcread - read from local SREP config space
  61 * @mport: RapidIO master port info
  62 * @index: ID of RapdiIO interface
  63 * @offset: Offset into configuration space
  64 * @len: Length (in bytes) of the maintenance transaction
  65 * @data: Value to be read into
  66 *
  67 * Generates a local SREP space read. Returns %0 on
  68 * success or %-EINVAL on failure.
  69 */
  70static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
  71                         int len, u32 *data)
  72{
  73        struct tsi721_device *priv = mport->priv;
  74
  75        if (len != sizeof(u32))
  76                return -EINVAL; /* only 32-bit access is supported */
  77
  78        *data = ioread32(priv->regs + offset);
  79
  80        return 0;
  81}
  82
  83/**
  84 * tsi721_lcwrite - write into local SREP config space
  85 * @mport: RapidIO master port info
  86 * @index: ID of RapdiIO interface
  87 * @offset: Offset into configuration space
  88 * @len: Length (in bytes) of the maintenance transaction
  89 * @data: Value to be written
  90 *
  91 * Generates a local write into SREP configuration space. Returns %0 on
  92 * success or %-EINVAL on failure.
  93 */
  94static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
  95                          int len, u32 data)
  96{
  97        struct tsi721_device *priv = mport->priv;
  98
  99        if (len != sizeof(u32))
 100                return -EINVAL; /* only 32-bit access is supported */
 101
 102        iowrite32(data, priv->regs + offset);
 103
 104        return 0;
 105}
 106
 107/**
 108 * tsi721_maint_dma - Helper function to generate RapidIO maintenance
 109 *                    transactions using designated Tsi721 DMA channel.
 110 * @priv: pointer to tsi721 private data
 111 * @sys_size: RapdiIO transport system size
 112 * @destid: Destination ID of transaction
 113 * @hopcount: Number of hops to target device
 114 * @offset: Offset into configuration space
 115 * @len: Length (in bytes) of the maintenance transaction
 116 * @data: Location to be read from or write into
 117 * @do_wr: Operation flag (1 == MAINT_WR)
 118 *
 119 * Generates a RapidIO maintenance transaction (Read or Write).
 120 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
 121 */
 122static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
 123                        u16 destid, u8 hopcount, u32 offset, int len,
 124                        u32 *data, int do_wr)
 125{
 126        void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
 127        struct tsi721_dma_desc *bd_ptr;
 128        u32 rd_count, swr_ptr, ch_stat;
 129        unsigned long flags;
 130        int i, err = 0;
 131        u32 op = do_wr ? MAINT_WR : MAINT_RD;
 132
 133        if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
 134                return -EINVAL;
 135
 136        spin_lock_irqsave(&tsi721_maint_lock, flags);
 137
 138        bd_ptr = priv->mdma.bd_base;
 139
 140        rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
 141
 142        /* Initialize DMA descriptor */
 143        bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
 144        bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04);
 145        bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset);
 146        bd_ptr[0].raddr_hi = 0;
 147        if (do_wr)
 148                bd_ptr[0].data[0] = cpu_to_be32p(data);
 149        else
 150                bd_ptr[0].data[0] = 0xffffffff;
 151
 152        mb();
 153
 154        /* Start DMA operation */
 155        iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
 156        ioread32(regs + TSI721_DMAC_DWRCNT);
 157        i = 0;
 158
 159        /* Wait until DMA transfer is finished */
 160        while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
 161                                                        & TSI721_DMAC_STS_RUN) {
 162                udelay(1);
 163                if (++i >= 5000000) {
 164                        tsi_debug(MAINT, &priv->pdev->dev,
 165                                "DMA[%d] read timeout ch_status=%x",
 166                                priv->mdma.ch_id, ch_stat);
 167                        if (!do_wr)
 168                                *data = 0xffffffff;
 169                        err = -EIO;
 170                        goto err_out;
 171                }
 172        }
 173
 174        if (ch_stat & TSI721_DMAC_STS_ABORT) {
 175                /* If DMA operation aborted due to error,
 176                 * reinitialize DMA channel
 177                 */
 178                tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x",
 179                          ch_stat);
 180                tsi_debug(MAINT, &priv->pdev->dev,
 181                          "OP=%d : destid=%x hc=%x off=%x",
 182                          do_wr ? MAINT_WR : MAINT_RD,
 183                          destid, hopcount, offset);
 184                iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
 185                iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
 186                udelay(10);
 187                iowrite32(0, regs + TSI721_DMAC_DWRCNT);
 188                udelay(1);
 189                if (!do_wr)
 190                        *data = 0xffffffff;
 191                err = -EIO;
 192                goto err_out;
 193        }
 194
 195        if (!do_wr)
 196                *data = be32_to_cpu(bd_ptr[0].data[0]);
 197
 198        /*
 199         * Update descriptor status FIFO RD pointer.
 200         * NOTE: Skipping check and clear FIFO entries because we are waiting
 201         * for transfer to be completed.
 202         */
 203        swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
 204        iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
 205
 206err_out:
 207        spin_unlock_irqrestore(&tsi721_maint_lock, flags);
 208
 209        return err;
 210}
 211
 212/**
 213 * tsi721_cread_dma - Generate a RapidIO maintenance read transaction
 214 *                    using Tsi721 BDMA engine.
 215 * @mport: RapidIO master port control structure
 216 * @index: ID of RapdiIO interface
 217 * @destid: Destination ID of transaction
 218 * @hopcount: Number of hops to target device
 219 * @offset: Offset into configuration space
 220 * @len: Length (in bytes) of the maintenance transaction
 221 * @val: Location to be read into
 222 *
 223 * Generates a RapidIO maintenance read transaction.
 224 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
 225 */
 226static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
 227                        u8 hopcount, u32 offset, int len, u32 *data)
 228{
 229        struct tsi721_device *priv = mport->priv;
 230
 231        return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
 232                                offset, len, data, 0);
 233}
 234
 235/**
 236 * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction
 237 *                     using Tsi721 BDMA engine
 238 * @mport: RapidIO master port control structure
 239 * @index: ID of RapdiIO interface
 240 * @destid: Destination ID of transaction
 241 * @hopcount: Number of hops to target device
 242 * @offset: Offset into configuration space
 243 * @len: Length (in bytes) of the maintenance transaction
 244 * @val: Value to be written
 245 *
 246 * Generates a RapidIO maintenance write transaction.
 247 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
 248 */
 249static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
 250                         u8 hopcount, u32 offset, int len, u32 data)
 251{
 252        struct tsi721_device *priv = mport->priv;
 253        u32 temp = data;
 254
 255        return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
 256                                offset, len, &temp, 1);
 257}
 258
 259/**
 260 * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
 261 * @priv:  tsi721 device private structure
 262 *
 263 * Handles inbound port-write interrupts. Copies PW message from an internal
 264 * buffer into PW message FIFO and schedules deferred routine to process
 265 * queued messages.
 266 */
 267static int
 268tsi721_pw_handler(struct tsi721_device *priv)
 269{
 270        u32 pw_stat;
 271        u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
 272
 273
 274        pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT);
 275
 276        if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) {
 277                pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0));
 278                pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1));
 279                pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2));
 280                pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3));
 281
 282                /* Queue PW message (if there is room in FIFO),
 283                 * otherwise discard it.
 284                 */
 285                spin_lock(&priv->pw_fifo_lock);
 286                if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE)
 287                        kfifo_in(&priv->pw_fifo, pw_buf,
 288                                                TSI721_RIO_PW_MSG_SIZE);
 289                else
 290                        priv->pw_discard_count++;
 291                spin_unlock(&priv->pw_fifo_lock);
 292        }
 293
 294        /* Clear pending PW interrupts */
 295        iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
 296                  priv->regs + TSI721_RIO_PW_RX_STAT);
 297
 298        schedule_work(&priv->pw_work);
 299
 300        return 0;
 301}
 302
 303static void tsi721_pw_dpc(struct work_struct *work)
 304{
 305        struct tsi721_device *priv = container_of(work, struct tsi721_device,
 306                                                    pw_work);
 307        union rio_pw_msg pwmsg;
 308
 309        /*
 310         * Process port-write messages
 311         */
 312        while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg,
 313                         TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
 314                /* Pass the port-write message to RIO core for processing */
 315                rio_inb_pwrite_handler(&priv->mport, &pwmsg);
 316        }
 317}
 318
 319/**
 320 * tsi721_pw_enable - enable/disable port-write interface init
 321 * @mport: Master port implementing the port write unit
 322 * @enable:    1=enable; 0=disable port-write message handling
 323 */
 324static int tsi721_pw_enable(struct rio_mport *mport, int enable)
 325{
 326        struct tsi721_device *priv = mport->priv;
 327        u32 rval;
 328
 329        rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE);
 330
 331        if (enable)
 332                rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX;
 333        else
 334                rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX;
 335
 336        /* Clear pending PW interrupts */
 337        iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
 338                  priv->regs + TSI721_RIO_PW_RX_STAT);
 339        /* Update enable bits */
 340        iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE);
 341
 342        return 0;
 343}
 344
 345/**
 346 * tsi721_dsend - Send a RapidIO doorbell
 347 * @mport: RapidIO master port info
 348 * @index: ID of RapidIO interface
 349 * @destid: Destination ID of target device
 350 * @data: 16-bit info field of RapidIO doorbell
 351 *
 352 * Sends a RapidIO doorbell message. Always returns %0.
 353 */
 354static int tsi721_dsend(struct rio_mport *mport, int index,
 355                        u16 destid, u16 data)
 356{
 357        struct tsi721_device *priv = mport->priv;
 358        u32 offset;
 359
 360        offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
 361                 (destid << 2);
 362
 363        tsi_debug(DBELL, &priv->pdev->dev,
 364                  "Send Doorbell 0x%04x to destID 0x%x", data, destid);
 365        iowrite16be(data, priv->odb_base + offset);
 366
 367        return 0;
 368}
 369
 370/**
 371 * tsi721_dbell_handler - Tsi721 doorbell interrupt handler
 372 * @priv: tsi721 device-specific data structure
 373 *
 374 * Handles inbound doorbell interrupts. Copies doorbell entry from an internal
 375 * buffer into DB message FIFO and schedules deferred  routine to process
 376 * queued DBs.
 377 */
 378static int
 379tsi721_dbell_handler(struct tsi721_device *priv)
 380{
 381        u32 regval;
 382
 383        /* Disable IDB interrupts */
 384        regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
 385        regval &= ~TSI721_SR_CHINT_IDBQRCV;
 386        iowrite32(regval,
 387                priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
 388
 389        schedule_work(&priv->idb_work);
 390
 391        return 0;
 392}
 393
 394static void tsi721_db_dpc(struct work_struct *work)
 395{
 396        struct tsi721_device *priv = container_of(work, struct tsi721_device,
 397                                                    idb_work);
 398        struct rio_mport *mport;
 399        struct rio_dbell *dbell;
 400        int found = 0;
 401        u32 wr_ptr, rd_ptr;
 402        u64 *idb_entry;
 403        u32 regval;
 404        union {
 405                u64 msg;
 406                u8  bytes[8];
 407        } idb;
 408
 409        /*
 410         * Process queued inbound doorbells
 411         */
 412        mport = &priv->mport;
 413
 414        wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
 415        rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
 416
 417        while (wr_ptr != rd_ptr) {
 418                idb_entry = (u64 *)(priv->idb_base +
 419                                        (TSI721_IDB_ENTRY_SIZE * rd_ptr));
 420                rd_ptr++;
 421                rd_ptr %= IDB_QSIZE;
 422                idb.msg = *idb_entry;
 423                *idb_entry = 0;
 424
 425                /* Process one doorbell */
 426                list_for_each_entry(dbell, &mport->dbells, node) {
 427                        if ((dbell->res->start <= DBELL_INF(idb.bytes)) &&
 428                            (dbell->res->end >= DBELL_INF(idb.bytes))) {
 429                                found = 1;
 430                                break;
 431                        }
 432                }
 433
 434                if (found) {
 435                        dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
 436                                    DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
 437                } else {
 438                        tsi_debug(DBELL, &priv->pdev->dev,
 439                                  "spurious IDB sid %2.2x tid %2.2x info %4.4x",
 440                                  DBELL_SID(idb.bytes), DBELL_TID(idb.bytes),
 441                                  DBELL_INF(idb.bytes));
 442                }
 443
 444                wr_ptr = ioread32(priv->regs +
 445                                  TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
 446        }
 447
 448        iowrite32(rd_ptr & (IDB_QSIZE - 1),
 449                priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
 450
 451        /* Re-enable IDB interrupts */
 452        regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
 453        regval |= TSI721_SR_CHINT_IDBQRCV;
 454        iowrite32(regval,
 455                priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
 456
 457        wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
 458        if (wr_ptr != rd_ptr)
 459                schedule_work(&priv->idb_work);
 460}
 461
 462/**
 463 * tsi721_irqhandler - Tsi721 interrupt handler
 464 * @irq: Linux interrupt number
 465 * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
 466 *
 467 * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
 468 * interrupt events and calls an event-specific handler(s).
 469 */
 470static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
 471{
 472        struct tsi721_device *priv = (struct tsi721_device *)ptr;
 473        u32 dev_int;
 474        u32 dev_ch_int;
 475        u32 intval;
 476        u32 ch_inte;
 477
 478        /* For MSI mode disable all device-level interrupts */
 479        if (priv->flags & TSI721_USING_MSI)
 480                iowrite32(0, priv->regs + TSI721_DEV_INTE);
 481
 482        dev_int = ioread32(priv->regs + TSI721_DEV_INT);
 483        if (!dev_int)
 484                return IRQ_NONE;
 485
 486        dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT);
 487
 488        if (dev_int & TSI721_DEV_INT_SR2PC_CH) {
 489                /* Service SR2PC Channel interrupts */
 490                if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) {
 491                        /* Service Inbound Doorbell interrupt */
 492                        intval = ioread32(priv->regs +
 493                                                TSI721_SR_CHINT(IDB_QUEUE));
 494                        if (intval & TSI721_SR_CHINT_IDBQRCV)
 495                                tsi721_dbell_handler(priv);
 496                        else
 497                                tsi_info(&priv->pdev->dev,
 498                                        "Unsupported SR_CH_INT %x", intval);
 499
 500                        /* Clear interrupts */
 501                        iowrite32(intval,
 502                                priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
 503                        ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
 504                }
 505        }
 506
 507        if (dev_int & TSI721_DEV_INT_SMSG_CH) {
 508                int ch;
 509
 510                /*
 511                 * Service channel interrupts from Messaging Engine
 512                 */
 513
 514                if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */
 515                        /* Disable signaled OB MSG Channel interrupts */
 516                        ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
 517                        ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M);
 518                        iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
 519
 520                        /*
 521                         * Process Inbound Message interrupt for each MBOX
 522                         */
 523                        for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) {
 524                                if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch)))
 525                                        continue;
 526                                tsi721_imsg_handler(priv, ch);
 527                        }
 528                }
 529
 530                if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */
 531                        /* Disable signaled OB MSG Channel interrupts */
 532                        ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
 533                        ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M);
 534                        iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
 535
 536                        /*
 537                         * Process Outbound Message interrupts for each MBOX
 538                         */
 539
 540                        for (ch = 0; ch < RIO_MAX_MBOX; ch++) {
 541                                if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch)))
 542                                        continue;
 543                                tsi721_omsg_handler(priv, ch);
 544                        }
 545                }
 546        }
 547
 548        if (dev_int & TSI721_DEV_INT_SRIO) {
 549                /* Service SRIO MAC interrupts */
 550                intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
 551                if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
 552                        tsi721_pw_handler(priv);
 553        }
 554
 555#ifdef CONFIG_RAPIDIO_DMA_ENGINE
 556        if (dev_int & TSI721_DEV_INT_BDMA_CH) {
 557                int ch;
 558
 559                if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
 560                        tsi_debug(DMA, &priv->pdev->dev,
 561                                  "IRQ from DMA channel 0x%08x", dev_ch_int);
 562
 563                        for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
 564                                if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
 565                                        continue;
 566                                tsi721_bdma_handler(&priv->bdma[ch]);
 567                        }
 568                }
 569        }
 570#endif
 571
 572        /* For MSI mode re-enable device-level interrupts */
 573        if (priv->flags & TSI721_USING_MSI) {
 574                dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
 575                        TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
 576                iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
 577        }
 578
 579        return IRQ_HANDLED;
 580}
 581
 582static void tsi721_interrupts_init(struct tsi721_device *priv)
 583{
 584        u32 intr;
 585
 586        /* Enable IDB interrupts */
 587        iowrite32(TSI721_SR_CHINT_ALL,
 588                priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
 589        iowrite32(TSI721_SR_CHINT_IDBQRCV,
 590                priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
 591
 592        /* Enable SRIO MAC interrupts */
 593        iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
 594                priv->regs + TSI721_RIO_EM_DEV_INT_EN);
 595
 596        /* Enable interrupts from channels in use */
 597#ifdef CONFIG_RAPIDIO_DMA_ENGINE
 598        intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
 599                (TSI721_INT_BDMA_CHAN_M &
 600                 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
 601#else
 602        intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
 603#endif
 604        iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
 605
 606        if (priv->flags & TSI721_USING_MSIX)
 607                intr = TSI721_DEV_INT_SRIO;
 608        else
 609                intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
 610                        TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
 611
 612        iowrite32(intr, priv->regs + TSI721_DEV_INTE);
 613        ioread32(priv->regs + TSI721_DEV_INTE);
 614}
 615
 616#ifdef CONFIG_PCI_MSI
 617/**
 618 * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
 619 * @irq: Linux interrupt number
 620 * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
 621 *
 622 * Handles outbound messaging interrupts signaled using MSI-X.
 623 */
 624static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
 625{
 626        struct tsi721_device *priv = (struct tsi721_device *)ptr;
 627        int mbox;
 628
 629        mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
 630        tsi721_omsg_handler(priv, mbox);
 631        return IRQ_HANDLED;
 632}
 633
 634/**
 635 * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
 636 * @irq: Linux interrupt number
 637 * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
 638 *
 639 * Handles inbound messaging interrupts signaled using MSI-X.
 640 */
 641static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
 642{
 643        struct tsi721_device *priv = (struct tsi721_device *)ptr;
 644        int mbox;
 645
 646        mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
 647        tsi721_imsg_handler(priv, mbox + 4);
 648        return IRQ_HANDLED;
 649}
 650
 651/**
 652 * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
 653 * @irq: Linux interrupt number
 654 * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
 655 *
 656 * Handles Tsi721 interrupts from SRIO MAC.
 657 */
 658static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
 659{
 660        struct tsi721_device *priv = (struct tsi721_device *)ptr;
 661        u32 srio_int;
 662
 663        /* Service SRIO MAC interrupts */
 664        srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
 665        if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
 666                tsi721_pw_handler(priv);
 667
 668        return IRQ_HANDLED;
 669}
 670
 671/**
 672 * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
 673 * @irq: Linux interrupt number
 674 * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
 675 *
 676 * Handles Tsi721 interrupts from SR2PC Channel.
 677 * NOTE: At this moment services only one SR2PC channel associated with inbound
 678 * doorbells.
 679 */
 680static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
 681{
 682        struct tsi721_device *priv = (struct tsi721_device *)ptr;
 683        u32 sr_ch_int;
 684
 685        /* Service Inbound DB interrupt from SR2PC channel */
 686        sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
 687        if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
 688                tsi721_dbell_handler(priv);
 689
 690        /* Clear interrupts */
 691        iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
 692        /* Read back to ensure that interrupt was cleared */
 693        sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
 694
 695        return IRQ_HANDLED;
 696}
 697
 698/**
 699 * tsi721_request_msix - register interrupt service for MSI-X mode.
 700 * @priv: tsi721 device-specific data structure
 701 *
 702 * Registers MSI-X interrupt service routines for interrupts that are active
 703 * immediately after mport initialization. Messaging interrupt service routines
 704 * should be registered during corresponding open requests.
 705 */
 706static int tsi721_request_msix(struct tsi721_device *priv)
 707{
 708        int err = 0;
 709
 710        err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
 711                        tsi721_sr2pc_ch_msix, 0,
 712                        priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv);
 713        if (err)
 714                return err;
 715
 716        err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
 717                        tsi721_srio_msix, 0,
 718                        priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv);
 719        if (err) {
 720                free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
 721                return err;
 722        }
 723
 724        return 0;
 725}
 726
 727/**
 728 * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721.
 729 * @priv: pointer to tsi721 private data
 730 *
 731 * Configures MSI-X support for Tsi721. Supports only an exact number
 732 * of requested vectors.
 733 */
 734static int tsi721_enable_msix(struct tsi721_device *priv)
 735{
 736        struct msix_entry entries[TSI721_VECT_MAX];
 737        int err;
 738        int i;
 739
 740        entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE);
 741        entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT;
 742
 743        /*
 744         * Initialize MSI-X entries for Messaging Engine:
 745         * this driver supports four RIO mailboxes (inbound and outbound)
 746         * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore
 747         * offset +4 is added to IB MBOX number.
 748         */
 749        for (i = 0; i < RIO_MAX_MBOX; i++) {
 750                entries[TSI721_VECT_IMB0_RCV + i].entry =
 751                                        TSI721_MSIX_IMSG_DQ_RCV(i + 4);
 752                entries[TSI721_VECT_IMB0_INT + i].entry =
 753                                        TSI721_MSIX_IMSG_INT(i + 4);
 754                entries[TSI721_VECT_OMB0_DONE + i].entry =
 755                                        TSI721_MSIX_OMSG_DONE(i);
 756                entries[TSI721_VECT_OMB0_INT + i].entry =
 757                                        TSI721_MSIX_OMSG_INT(i);
 758        }
 759
 760#ifdef CONFIG_RAPIDIO_DMA_ENGINE
 761        /*
 762         * Initialize MSI-X entries for Block DMA Engine:
 763         * this driver supports XXX DMA channels
 764         * (one is reserved for SRIO maintenance transactions)
 765         */
 766        for (i = 0; i < TSI721_DMA_CHNUM; i++) {
 767                entries[TSI721_VECT_DMA0_DONE + i].entry =
 768                                        TSI721_MSIX_DMACH_DONE(i);
 769                entries[TSI721_VECT_DMA0_INT + i].entry =
 770                                        TSI721_MSIX_DMACH_INT(i);
 771        }
 772#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
 773
 774        err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries));
 775        if (err) {
 776                tsi_err(&priv->pdev->dev,
 777                        "Failed to enable MSI-X (err=%d)", err);
 778                return err;
 779        }
 780
 781        /*
 782         * Copy MSI-X vector information into tsi721 private structure
 783         */
 784        priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector;
 785        snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX,
 786                 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev));
 787        priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector;
 788        snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX,
 789                 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev));
 790
 791        for (i = 0; i < RIO_MAX_MBOX; i++) {
 792                priv->msix[TSI721_VECT_IMB0_RCV + i].vector =
 793                                entries[TSI721_VECT_IMB0_RCV + i].vector;
 794                snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name,
 795                         IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s",
 796                         i, pci_name(priv->pdev));
 797
 798                priv->msix[TSI721_VECT_IMB0_INT + i].vector =
 799                                entries[TSI721_VECT_IMB0_INT + i].vector;
 800                snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name,
 801                         IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s",
 802                         i, pci_name(priv->pdev));
 803
 804                priv->msix[TSI721_VECT_OMB0_DONE + i].vector =
 805                                entries[TSI721_VECT_OMB0_DONE + i].vector;
 806                snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name,
 807                         IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s",
 808                         i, pci_name(priv->pdev));
 809
 810                priv->msix[TSI721_VECT_OMB0_INT + i].vector =
 811                                entries[TSI721_VECT_OMB0_INT + i].vector;
 812                snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name,
 813                         IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s",
 814                         i, pci_name(priv->pdev));
 815        }
 816
 817#ifdef CONFIG_RAPIDIO_DMA_ENGINE
 818        for (i = 0; i < TSI721_DMA_CHNUM; i++) {
 819                priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
 820                                entries[TSI721_VECT_DMA0_DONE + i].vector;
 821                snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
 822                         IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
 823                         i, pci_name(priv->pdev));
 824
 825                priv->msix[TSI721_VECT_DMA0_INT + i].vector =
 826                                entries[TSI721_VECT_DMA0_INT + i].vector;
 827                snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
 828                         IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
 829                         i, pci_name(priv->pdev));
 830        }
 831#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
 832
 833        return 0;
 834}
 835#endif /* CONFIG_PCI_MSI */
 836
 837static int tsi721_request_irq(struct tsi721_device *priv)
 838{
 839        int err;
 840
 841#ifdef CONFIG_PCI_MSI
 842        if (priv->flags & TSI721_USING_MSIX)
 843                err = tsi721_request_msix(priv);
 844        else
 845#endif
 846                err = request_irq(priv->pdev->irq, tsi721_irqhandler,
 847                          (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
 848                          DRV_NAME, (void *)priv);
 849
 850        if (err)
 851                tsi_err(&priv->pdev->dev,
 852                        "Unable to allocate interrupt, err=%d", err);
 853
 854        return err;
 855}
 856
 857static void tsi721_free_irq(struct tsi721_device *priv)
 858{
 859#ifdef CONFIG_PCI_MSI
 860        if (priv->flags & TSI721_USING_MSIX) {
 861                free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
 862                free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv);
 863        } else
 864#endif
 865        free_irq(priv->pdev->irq, (void *)priv);
 866}
 867
 868static int
 869tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar,
 870                 u32 size, int *win_id)
 871{
 872        u64 win_base;
 873        u64 bar_base;
 874        u64 bar_end;
 875        u32 align;
 876        struct tsi721_ob_win *win;
 877        struct tsi721_ob_win *new_win = NULL;
 878        int new_win_idx = -1;
 879        int i = 0;
 880
 881        bar_base = pbar->base;
 882        bar_end =  bar_base + pbar->size;
 883        win_base = bar_base;
 884        align = size/TSI721_PC2SR_ZONES;
 885
 886        while (i < TSI721_IBWIN_NUM) {
 887                for (i = 0; i < TSI721_IBWIN_NUM; i++) {
 888                        if (!priv->ob_win[i].active) {
 889                                if (new_win == NULL) {
 890                                        new_win = &priv->ob_win[i];
 891                                        new_win_idx = i;
 892                                }
 893                                continue;
 894                        }
 895
 896                        /*
 897                         * If this window belongs to the current BAR check it
 898                         * for overlap
 899                         */
 900                        win = &priv->ob_win[i];
 901
 902                        if (win->base >= bar_base && win->base < bar_end) {
 903                                if (win_base < (win->base + win->size) &&
 904                                                (win_base + size) > win->base) {
 905                                        /* Overlap detected */
 906                                        win_base = win->base + win->size;
 907                                        win_base = ALIGN(win_base, align);
 908                                        break;
 909                                }
 910                        }
 911                }
 912        }
 913
 914        if (win_base + size > bar_end)
 915                return -ENOMEM;
 916
 917        if (!new_win) {
 918                tsi_err(&priv->pdev->dev, "OBW count tracking failed");
 919                return -EIO;
 920        }
 921
 922        new_win->active = true;
 923        new_win->base = win_base;
 924        new_win->size = size;
 925        new_win->pbar = pbar;
 926        priv->obwin_cnt--;
 927        pbar->free -= size;
 928        *win_id = new_win_idx;
 929        return 0;
 930}
 931
 932static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart,
 933                        u32 size, u32 flags, dma_addr_t *laddr)
 934{
 935        struct tsi721_device *priv = mport->priv;
 936        int i;
 937        struct tsi721_obw_bar *pbar;
 938        struct tsi721_ob_win *ob_win;
 939        int obw = -1;
 940        u32 rval;
 941        u64 rio_addr;
 942        u32 zsize;
 943        int ret = -ENOMEM;
 944
 945        tsi_debug(OBW, &priv->pdev->dev,
 946                  "did=%d ra=0x%llx sz=0x%x", destid, rstart, size);
 947
 948        if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1)))
 949                return -EINVAL;
 950
 951        if (priv->obwin_cnt == 0)
 952                return -EBUSY;
 953
 954        for (i = 0; i < 2; i++) {
 955                if (priv->p2r_bar[i].free >= size) {
 956                        pbar = &priv->p2r_bar[i];
 957                        ret = tsi721_obw_alloc(priv, pbar, size, &obw);
 958                        if (!ret)
 959                                break;
 960                }
 961        }
 962
 963        if (ret)
 964                return ret;
 965
 966        WARN_ON(obw == -1);
 967        ob_win = &priv->ob_win[obw];
 968        ob_win->destid = destid;
 969        ob_win->rstart = rstart;
 970        tsi_debug(OBW, &priv->pdev->dev,
 971                  "allocated OBW%d @%llx", obw, ob_win->base);
 972
 973        /*
 974         * Configure Outbound Window
 975         */
 976
 977        zsize = size/TSI721_PC2SR_ZONES;
 978        rio_addr = rstart;
 979
 980        /*
 981         * Program Address Translation Zones:
 982         *  This implementation uses all 8 zones associated wit window.
 983         */
 984        for (i = 0; i < TSI721_PC2SR_ZONES; i++) {
 985
 986                while (ioread32(priv->regs + TSI721_ZONE_SEL) &
 987                        TSI721_ZONE_SEL_GO) {
 988                        udelay(1);
 989                }
 990
 991                rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) |
 992                        TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR;
 993                iowrite32(rval, priv->regs + TSI721_LUT_DATA0);
 994                rval = (u32)(rio_addr >> 32);
 995                iowrite32(rval, priv->regs + TSI721_LUT_DATA1);
 996                rval = destid;
 997                iowrite32(rval, priv->regs + TSI721_LUT_DATA2);
 998
 999                rval = TSI721_ZONE_SEL_GO | (obw << 3) | i;
1000                iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
1001
1002                rio_addr += zsize;
1003        }
1004
1005        iowrite32(TSI721_OBWIN_SIZE(size) << 8,
1006                  priv->regs + TSI721_OBWINSZ(obw));
1007        iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw));
1008        iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN,
1009                  priv->regs + TSI721_OBWINLB(obw));
1010
1011        *laddr = ob_win->base;
1012        return 0;
1013}
1014
1015static void tsi721_unmap_outb_win(struct rio_mport *mport,
1016                                  u16 destid, u64 rstart)
1017{
1018        struct tsi721_device *priv = mport->priv;
1019        struct tsi721_ob_win *ob_win;
1020        int i;
1021
1022        tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart);
1023
1024        for (i = 0; i < TSI721_OBWIN_NUM; i++) {
1025                ob_win = &priv->ob_win[i];
1026
1027                if (ob_win->active &&
1028                    ob_win->destid == destid && ob_win->rstart == rstart) {
1029                        tsi_debug(OBW, &priv->pdev->dev,
1030                                  "free OBW%d @%llx", i, ob_win->base);
1031                        ob_win->active = false;
1032                        iowrite32(0, priv->regs + TSI721_OBWINLB(i));
1033                        ob_win->pbar->free += ob_win->size;
1034                        priv->obwin_cnt++;
1035                        break;
1036                }
1037        }
1038}
1039
1040/**
1041 * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
1042 * translation regions.
1043 * @priv: pointer to tsi721 private data
1044 *
1045 * Disables SREP translation regions.
1046 */
1047static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
1048{
1049        int i, z;
1050        u32 rval;
1051
1052        /* Disable all PC2SR translation windows */
1053        for (i = 0; i < TSI721_OBWIN_NUM; i++)
1054                iowrite32(0, priv->regs + TSI721_OBWINLB(i));
1055
1056        /* Initialize zone lookup tables to avoid ECC errors on reads */
1057        iowrite32(0, priv->regs + TSI721_LUT_DATA0);
1058        iowrite32(0, priv->regs + TSI721_LUT_DATA1);
1059        iowrite32(0, priv->regs + TSI721_LUT_DATA2);
1060
1061        for (i = 0; i < TSI721_OBWIN_NUM; i++) {
1062                for (z = 0; z < TSI721_PC2SR_ZONES; z++) {
1063                        while (ioread32(priv->regs + TSI721_ZONE_SEL) &
1064                                TSI721_ZONE_SEL_GO) {
1065                                udelay(1);
1066                        }
1067                        rval = TSI721_ZONE_SEL_GO | (i << 3) | z;
1068                        iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
1069                }
1070        }
1071
1072        if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) {
1073                priv->obwin_cnt = 0;
1074                return;
1075        }
1076
1077        priv->p2r_bar[0].free = priv->p2r_bar[0].size;
1078        priv->p2r_bar[1].free = priv->p2r_bar[1].size;
1079
1080        for (i = 0; i < TSI721_OBWIN_NUM; i++)
1081                priv->ob_win[i].active = false;
1082
1083        priv->obwin_cnt = TSI721_OBWIN_NUM;
1084}
1085
1086/**
1087 * tsi721_rio_map_inb_mem -- Mapping inbound memory region.
1088 * @mport: RapidIO master port
1089 * @lstart: Local memory space start address.
1090 * @rstart: RapidIO space start address.
1091 * @size: The mapping region size.
1092 * @flags: Flags for mapping. 0 for using default flags.
1093 *
1094 * Return: 0 -- Success.
1095 *
1096 * This function will create the inbound mapping
1097 * from rstart to lstart.
1098 */
1099static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
1100                u64 rstart, u64 size, u32 flags)
1101{
1102        struct tsi721_device *priv = mport->priv;
1103        int i, avail = -1;
1104        u32 regval;
1105        struct tsi721_ib_win *ib_win;
1106        bool direct = (lstart == rstart);
1107        u64 ibw_size;
1108        dma_addr_t loc_start;
1109        u64 ibw_start;
1110        struct tsi721_ib_win_mapping *map = NULL;
1111        int ret = -EBUSY;
1112
1113        /* Max IBW size supported by HW is 16GB */
1114        if (size > 0x400000000UL)
1115                return -EINVAL;
1116
1117        if (direct) {
1118                /* Calculate minimal acceptable window size and base address */
1119
1120                ibw_size = roundup_pow_of_two(size);
1121                ibw_start = lstart & ~(ibw_size - 1);
1122
1123                tsi_debug(IBW, &priv->pdev->dev,
1124                        "Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx",
1125                        rstart, &lstart, size, ibw_start);
1126
1127                while ((lstart + size) > (ibw_start + ibw_size)) {
1128                        ibw_size *= 2;
1129                        ibw_start = lstart & ~(ibw_size - 1);
1130                        /* Check for crossing IBW max size 16GB */
1131                        if (ibw_size > 0x400000000UL)
1132                                return -EBUSY;
1133                }
1134
1135                loc_start = ibw_start;
1136
1137                map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC);
1138                if (map == NULL)
1139                        return -ENOMEM;
1140
1141        } else {
1142                tsi_debug(IBW, &priv->pdev->dev,
1143                        "Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
1144                        rstart, &lstart, size);
1145
1146                if (!is_power_of_2(size) || size < 0x1000 ||
1147                    ((u64)lstart & (size - 1)) || (rstart & (size - 1)))
1148                        return -EINVAL;
1149                if (priv->ibwin_cnt == 0)
1150                        return -EBUSY;
1151                ibw_start = rstart;
1152                ibw_size = size;
1153                loc_start = lstart;
1154        }
1155
1156        /*
1157         * Scan for overlapping with active regions and mark the first available
1158         * IB window at the same time.
1159         */
1160        for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1161                ib_win = &priv->ib_win[i];
1162
1163                if (!ib_win->active) {
1164                        if (avail == -1) {
1165                                avail = i;
1166                                ret = 0;
1167                        }
1168                } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
1169                           (ibw_start + ibw_size) > ib_win->rstart) {
1170                        /* Return error if address translation involved */
1171                        if (!direct || ib_win->xlat) {
1172                                ret = -EFAULT;
1173                                break;
1174                        }
1175
1176                        /*
1177                         * Direct mappings usually are larger than originally
1178                         * requested fragments - check if this new request fits
1179                         * into it.
1180                         */
1181                        if (rstart >= ib_win->rstart &&
1182                            (rstart + size) <= (ib_win->rstart +
1183                                                        ib_win->size)) {
1184                                /* We are in - no further mapping required */
1185                                map->lstart = lstart;
1186                                list_add_tail(&map->node, &ib_win->mappings);
1187                                return 0;
1188                        }
1189
1190                        ret = -EFAULT;
1191                        break;
1192                }
1193        }
1194
1195        if (ret)
1196                goto out;
1197        i = avail;
1198
1199        /* Sanity check: available IB window must be disabled at this point */
1200        regval = ioread32(priv->regs + TSI721_IBWIN_LB(i));
1201        if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) {
1202                ret = -EIO;
1203                goto out;
1204        }
1205
1206        ib_win = &priv->ib_win[i];
1207        ib_win->active = true;
1208        ib_win->rstart = ibw_start;
1209        ib_win->lstart = loc_start;
1210        ib_win->size = ibw_size;
1211        ib_win->xlat = (lstart != rstart);
1212        INIT_LIST_HEAD(&ib_win->mappings);
1213
1214        /*
1215         * When using direct IBW mapping and have larger than requested IBW size
1216         * we can have multiple local memory blocks mapped through the same IBW
1217         * To handle this situation we maintain list of "clients" for such IBWs.
1218         */
1219        if (direct) {
1220                map->lstart = lstart;
1221                list_add_tail(&map->node, &ib_win->mappings);
1222        }
1223
1224        iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8,
1225                        priv->regs + TSI721_IBWIN_SZ(i));
1226
1227        iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i));
1228        iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD),
1229                  priv->regs + TSI721_IBWIN_TLA(i));
1230
1231        iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i));
1232        iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN,
1233                priv->regs + TSI721_IBWIN_LB(i));
1234
1235        priv->ibwin_cnt--;
1236
1237        tsi_debug(IBW, &priv->pdev->dev,
1238                "Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
1239                i, ibw_start, &loc_start, ibw_size);
1240
1241        return 0;
1242out:
1243        kfree(map);
1244        return ret;
1245}
1246
1247/**
1248 * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region.
1249 * @mport: RapidIO master port
1250 * @lstart: Local memory space start address.
1251 */
1252static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
1253                                dma_addr_t lstart)
1254{
1255        struct tsi721_device *priv = mport->priv;
1256        struct tsi721_ib_win *ib_win;
1257        int i;
1258
1259        tsi_debug(IBW, &priv->pdev->dev,
1260                "Unmap IBW mapped to PCIe_%pad", &lstart);
1261
1262        /* Search for matching active inbound translation window */
1263        for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1264                ib_win = &priv->ib_win[i];
1265
1266                /* Address translating IBWs must to be an exact march */
1267                if (!ib_win->active ||
1268                    (ib_win->xlat && lstart != ib_win->lstart))
1269                        continue;
1270
1271                if (lstart >= ib_win->lstart &&
1272                    lstart < (ib_win->lstart + ib_win->size)) {
1273
1274                        if (!ib_win->xlat) {
1275                                struct tsi721_ib_win_mapping *map;
1276                                int found = 0;
1277
1278                                list_for_each_entry(map,
1279                                                    &ib_win->mappings, node) {
1280                                        if (map->lstart == lstart) {
1281                                                list_del(&map->node);
1282                                                kfree(map);
1283                                                found = 1;
1284                                                break;
1285                                        }
1286                                }
1287
1288                                if (!found)
1289                                        continue;
1290
1291                                if (!list_empty(&ib_win->mappings))
1292                                        break;
1293                        }
1294
1295                        tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i);
1296                        iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1297                        ib_win->active = false;
1298                        priv->ibwin_cnt++;
1299                        break;
1300                }
1301        }
1302
1303        if (i == TSI721_IBWIN_NUM)
1304                tsi_debug(IBW, &priv->pdev->dev,
1305                        "IB window mapped to %pad not found", &lstart);
1306}
1307
1308/**
1309 * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe)
1310 * translation regions.
1311 * @priv: pointer to tsi721 private data
1312 *
1313 * Disables inbound windows.
1314 */
1315static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
1316{
1317        int i;
1318
1319        /* Disable all SR2PC inbound windows */
1320        for (i = 0; i < TSI721_IBWIN_NUM; i++)
1321                iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1322        priv->ibwin_cnt = TSI721_IBWIN_NUM;
1323}
1324
1325/*
1326 * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe)
1327 * translation regions.
1328 * @priv: pointer to tsi721 device private data
1329 */
1330static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv)
1331{
1332        struct tsi721_ib_win *ib_win;
1333        int i;
1334
1335        /* Disable all active SR2PC inbound windows */
1336        for (i = 0; i < TSI721_IBWIN_NUM; i++) {
1337                ib_win = &priv->ib_win[i];
1338                if (ib_win->active) {
1339                        iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
1340                        ib_win->active = false;
1341                }
1342        }
1343}
1344
1345/**
1346 * tsi721_port_write_init - Inbound port write interface init
1347 * @priv: pointer to tsi721 private data
1348 *
1349 * Initializes inbound port write handler.
1350 * Returns %0 on success or %-ENOMEM on failure.
1351 */
1352static int tsi721_port_write_init(struct tsi721_device *priv)
1353{
1354        priv->pw_discard_count = 0;
1355        INIT_WORK(&priv->pw_work, tsi721_pw_dpc);
1356        spin_lock_init(&priv->pw_fifo_lock);
1357        if (kfifo_alloc(&priv->pw_fifo,
1358                        TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
1359                tsi_err(&priv->pdev->dev, "PW FIFO allocation failed");
1360                return -ENOMEM;
1361        }
1362
1363        /* Use reliable port-write capture mode */
1364        iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL);
1365        return 0;
1366}
1367
1368static void tsi721_port_write_free(struct tsi721_device *priv)
1369{
1370        kfifo_free(&priv->pw_fifo);
1371}
1372
1373static int tsi721_doorbell_init(struct tsi721_device *priv)
1374{
1375        /* Outbound Doorbells do not require any setup.
1376         * Tsi721 uses dedicated PCI BAR1 to generate doorbells.
1377         * That BAR1 was mapped during the probe routine.
1378         */
1379
1380        /* Initialize Inbound Doorbell processing DPC and queue */
1381        priv->db_discard_count = 0;
1382        INIT_WORK(&priv->idb_work, tsi721_db_dpc);
1383
1384        /* Allocate buffer for inbound doorbells queue */
1385        priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
1386                                            IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1387                                            &priv->idb_dma, GFP_KERNEL);
1388        if (!priv->idb_base)
1389                return -ENOMEM;
1390
1391        tsi_debug(DBELL, &priv->pdev->dev,
1392                  "Allocated IDB buffer @ %p (phys = %pad)",
1393                  priv->idb_base, &priv->idb_dma);
1394
1395        iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
1396                priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
1397        iowrite32(((u64)priv->idb_dma >> 32),
1398                priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE));
1399        iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR),
1400                priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE));
1401        /* Enable accepting all inbound doorbells */
1402        iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE));
1403
1404        iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE));
1405
1406        iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
1407
1408        return 0;
1409}
1410
1411static void tsi721_doorbell_free(struct tsi721_device *priv)
1412{
1413        if (priv->idb_base == NULL)
1414                return;
1415
1416        /* Free buffer allocated for inbound doorbell queue */
1417        dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1418                          priv->idb_base, priv->idb_dma);
1419        priv->idb_base = NULL;
1420}
1421
1422/**
1423 * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
1424 * @priv: pointer to tsi721 private data
1425 *
1426 * Initialize BDMA channel allocated for RapidIO maintenance read/write
1427 * request generation
1428 * Returns %0 on success or %-ENOMEM on failure.
1429 */
1430static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1431{
1432        struct tsi721_dma_desc *bd_ptr;
1433        u64             *sts_ptr;
1434        dma_addr_t      bd_phys, sts_phys;
1435        int             sts_size;
1436        int             bd_num = 2;
1437        void __iomem    *regs;
1438
1439        tsi_debug(MAINT, &priv->pdev->dev,
1440                  "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT);
1441
1442        /*
1443         * Initialize DMA channel for maintenance requests
1444         */
1445
1446        priv->mdma.ch_id = TSI721_DMACH_MAINT;
1447        regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
1448
1449        /* Allocate space for DMA descriptors */
1450        bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
1451                                    bd_num * sizeof(struct tsi721_dma_desc),
1452                                    &bd_phys, GFP_KERNEL);
1453        if (!bd_ptr)
1454                return -ENOMEM;
1455
1456        priv->mdma.bd_num = bd_num;
1457        priv->mdma.bd_phys = bd_phys;
1458        priv->mdma.bd_base = bd_ptr;
1459
1460        tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)",
1461                  bd_ptr, &bd_phys);
1462
1463        /* Allocate space for descriptor status FIFO */
1464        sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
1465                                        bd_num : TSI721_DMA_MINSTSSZ;
1466        sts_size = roundup_pow_of_two(sts_size);
1467        sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
1468                                     sts_size * sizeof(struct tsi721_dma_sts),
1469                                     &sts_phys, GFP_KERNEL);
1470        if (!sts_ptr) {
1471                /* Free space allocated for DMA descriptors */
1472                dma_free_coherent(&priv->pdev->dev,
1473                                  bd_num * sizeof(struct tsi721_dma_desc),
1474                                  bd_ptr, bd_phys);
1475                priv->mdma.bd_base = NULL;
1476                return -ENOMEM;
1477        }
1478
1479        priv->mdma.sts_phys = sts_phys;
1480        priv->mdma.sts_base = sts_ptr;
1481        priv->mdma.sts_size = sts_size;
1482
1483        tsi_debug(MAINT, &priv->pdev->dev,
1484                "desc status FIFO @ %p (phys = %pad) size=0x%x",
1485                sts_ptr, &sts_phys, sts_size);
1486
1487        /* Initialize DMA descriptors ring */
1488        bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
1489        bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
1490                                                 TSI721_DMAC_DPTRL_MASK);
1491        bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
1492
1493        /* Setup DMA descriptor pointers */
1494        iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
1495        iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
1496                regs + TSI721_DMAC_DPTRL);
1497
1498        /* Setup descriptor status FIFO */
1499        iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
1500        iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
1501                regs + TSI721_DMAC_DSBL);
1502        iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
1503                regs + TSI721_DMAC_DSSZ);
1504
1505        /* Clear interrupt bits */
1506        iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
1507
1508        ioread32(regs + TSI721_DMAC_INT);
1509
1510        /* Toggle DMA channel initialization */
1511        iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
1512        ioread32(regs + TSI721_DMAC_CTL);
1513        udelay(10);
1514
1515        return 0;
1516}
1517
1518static int tsi721_bdma_maint_free(struct tsi721_device *priv)
1519{
1520        u32 ch_stat;
1521        struct tsi721_bdma_maint *mdma = &priv->mdma;
1522        void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
1523
1524        if (mdma->bd_base == NULL)
1525                return 0;
1526
1527        /* Check if DMA channel still running */
1528        ch_stat = ioread32(regs + TSI721_DMAC_STS);
1529        if (ch_stat & TSI721_DMAC_STS_RUN)
1530                return -EFAULT;
1531
1532        /* Put DMA channel into init state */
1533        iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
1534
1535        /* Free space allocated for DMA descriptors */
1536        dma_free_coherent(&priv->pdev->dev,
1537                mdma->bd_num * sizeof(struct tsi721_dma_desc),
1538                mdma->bd_base, mdma->bd_phys);
1539        mdma->bd_base = NULL;
1540
1541        /* Free space allocated for status FIFO */
1542        dma_free_coherent(&priv->pdev->dev,
1543                mdma->sts_size * sizeof(struct tsi721_dma_sts),
1544                mdma->sts_base, mdma->sts_phys);
1545        mdma->sts_base = NULL;
1546        return 0;
1547}
1548
1549/* Enable Inbound Messaging Interrupts */
1550static void
1551tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
1552                                  u32 inte_mask)
1553{
1554        u32 rval;
1555
1556        if (!inte_mask)
1557                return;
1558
1559        /* Clear pending Inbound Messaging interrupts */
1560        iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1561
1562        /* Enable Inbound Messaging interrupts */
1563        rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1564        iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch));
1565
1566        if (priv->flags & TSI721_USING_MSIX)
1567                return; /* Finished if we are in MSI-X mode */
1568
1569        /*
1570         * For MSI and INTA interrupt signalling we need to enable next levels
1571         */
1572
1573        /* Enable Device Channel Interrupt */
1574        rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1575        iowrite32(rval | TSI721_INT_IMSG_CHAN(ch),
1576                  priv->regs + TSI721_DEV_CHAN_INTE);
1577}
1578
1579/* Disable Inbound Messaging Interrupts */
1580static void
1581tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch,
1582                                   u32 inte_mask)
1583{
1584        u32 rval;
1585
1586        if (!inte_mask)
1587                return;
1588
1589        /* Clear pending Inbound Messaging interrupts */
1590        iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1591
1592        /* Disable Inbound Messaging interrupts */
1593        rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1594        rval &= ~inte_mask;
1595        iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch));
1596
1597        if (priv->flags & TSI721_USING_MSIX)
1598                return; /* Finished if we are in MSI-X mode */
1599
1600        /*
1601         * For MSI and INTA interrupt signalling we need to disable next levels
1602         */
1603
1604        /* Disable Device Channel Interrupt */
1605        rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1606        rval &= ~TSI721_INT_IMSG_CHAN(ch);
1607        iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1608}
1609
1610/* Enable Outbound Messaging interrupts */
1611static void
1612tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch,
1613                                  u32 inte_mask)
1614{
1615        u32 rval;
1616
1617        if (!inte_mask)
1618                return;
1619
1620        /* Clear pending Outbound Messaging interrupts */
1621        iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1622
1623        /* Enable Outbound Messaging channel interrupts */
1624        rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1625        iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch));
1626
1627        if (priv->flags & TSI721_USING_MSIX)
1628                return; /* Finished if we are in MSI-X mode */
1629
1630        /*
1631         * For MSI and INTA interrupt signalling we need to enable next levels
1632         */
1633
1634        /* Enable Device Channel Interrupt */
1635        rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1636        iowrite32(rval | TSI721_INT_OMSG_CHAN(ch),
1637                  priv->regs + TSI721_DEV_CHAN_INTE);
1638}
1639
1640/* Disable Outbound Messaging interrupts */
1641static void
1642tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
1643                                   u32 inte_mask)
1644{
1645        u32 rval;
1646
1647        if (!inte_mask)
1648                return;
1649
1650        /* Clear pending Outbound Messaging interrupts */
1651        iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1652
1653        /* Disable Outbound Messaging interrupts */
1654        rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1655        rval &= ~inte_mask;
1656        iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch));
1657
1658        if (priv->flags & TSI721_USING_MSIX)
1659                return; /* Finished if we are in MSI-X mode */
1660
1661        /*
1662         * For MSI and INTA interrupt signalling we need to disable next levels
1663         */
1664
1665        /* Disable Device Channel Interrupt */
1666        rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1667        rval &= ~TSI721_INT_OMSG_CHAN(ch);
1668        iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1669}
1670
1671/**
1672 * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue
1673 * @mport: Master port with outbound message queue
1674 * @rdev: Target of outbound message
1675 * @mbox: Outbound mailbox
1676 * @buffer: Message to add to outbound queue
1677 * @len: Length of message
1678 */
1679static int
1680tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
1681                        void *buffer, size_t len)
1682{
1683        struct tsi721_device *priv = mport->priv;
1684        struct tsi721_omsg_desc *desc;
1685        u32 tx_slot;
1686        unsigned long flags;
1687
1688        if (!priv->omsg_init[mbox] ||
1689            len > TSI721_MSG_MAX_SIZE || len < 8)
1690                return -EINVAL;
1691
1692        spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags);
1693
1694        tx_slot = priv->omsg_ring[mbox].tx_slot;
1695
1696        /* Copy copy message into transfer buffer */
1697        memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len);
1698
1699        if (len & 0x7)
1700                len += 8;
1701
1702        /* Build descriptor associated with buffer */
1703        desc = priv->omsg_ring[mbox].omd_base;
1704        desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
1705#ifdef TSI721_OMSG_DESC_INT
1706        /* Request IOF_DONE interrupt generation for each N-th frame in queue */
1707        if (tx_slot % 4 == 0)
1708                desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
1709#endif
1710        desc[tx_slot].msg_info =
1711                cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
1712                            (0xe << 12) | (len & 0xff8));
1713        desc[tx_slot].bufptr_lo =
1714                cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] &
1715                            0xffffffff);
1716        desc[tx_slot].bufptr_hi =
1717                cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32);
1718
1719        priv->omsg_ring[mbox].wr_count++;
1720
1721        /* Go to next descriptor */
1722        if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) {
1723                priv->omsg_ring[mbox].tx_slot = 0;
1724                /* Move through the ring link descriptor at the end */
1725                priv->omsg_ring[mbox].wr_count++;
1726        }
1727
1728        mb();
1729
1730        /* Set new write count value */
1731        iowrite32(priv->omsg_ring[mbox].wr_count,
1732                priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1733        ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1734
1735        spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags);
1736
1737        return 0;
1738}
1739
1740/**
1741 * tsi721_omsg_handler - Outbound Message Interrupt Handler
1742 * @priv: pointer to tsi721 private data
1743 * @ch:   number of OB MSG channel to service
1744 *
1745 * Services channel interrupts from outbound messaging engine.
1746 */
1747static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
1748{
1749        u32 omsg_int;
1750        struct rio_mport *mport = &priv->mport;
1751        void *dev_id = NULL;
1752        u32 tx_slot = 0xffffffff;
1753        int do_callback = 0;
1754
1755        spin_lock(&priv->omsg_ring[ch].lock);
1756
1757        omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
1758
1759        if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
1760                tsi_info(&priv->pdev->dev,
1761                        "OB MBOX%d: Status FIFO is full", ch);
1762
1763        if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
1764                u32 srd_ptr;
1765                u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
1766                int i, j;
1767
1768                /*
1769                 * Find last successfully processed descriptor
1770                 */
1771
1772                /* Check and clear descriptor status FIFO entries */
1773                srd_ptr = priv->omsg_ring[ch].sts_rdptr;
1774                sts_ptr = priv->omsg_ring[ch].sts_base;
1775                j = srd_ptr * 8;
1776                while (sts_ptr[j]) {
1777                        for (i = 0; i < 8 && sts_ptr[j]; i++, j++) {
1778                                prev_ptr = last_ptr;
1779                                last_ptr = le64_to_cpu(sts_ptr[j]);
1780                                sts_ptr[j] = 0;
1781                        }
1782
1783                        ++srd_ptr;
1784                        srd_ptr %= priv->omsg_ring[ch].sts_size;
1785                        j = srd_ptr * 8;
1786                }
1787
1788                if (last_ptr == 0)
1789                        goto no_sts_update;
1790
1791                priv->omsg_ring[ch].sts_rdptr = srd_ptr;
1792                iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
1793
1794                if (!mport->outb_msg[ch].mcback)
1795                        goto no_sts_update;
1796
1797                /* Inform upper layer about transfer completion */
1798
1799                tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/
1800                                                sizeof(struct tsi721_omsg_desc);
1801
1802                /*
1803                 * Check if this is a Link Descriptor (LD).
1804                 * If yes, ignore LD and use descriptor processed
1805                 * before LD.
1806                 */
1807                if (tx_slot == priv->omsg_ring[ch].size) {
1808                        if (prev_ptr)
1809                                tx_slot = (prev_ptr -
1810                                        (u64)priv->omsg_ring[ch].omd_phys)/
1811                                                sizeof(struct tsi721_omsg_desc);
1812                        else
1813                                goto no_sts_update;
1814                }
1815
1816                if (tx_slot >= priv->omsg_ring[ch].size)
1817                        tsi_debug(OMSG, &priv->pdev->dev,
1818                                  "OB_MSG tx_slot=%x > size=%x",
1819                                  tx_slot, priv->omsg_ring[ch].size);
1820                WARN_ON(tx_slot >= priv->omsg_ring[ch].size);
1821
1822                /* Move slot index to the next message to be sent */
1823                ++tx_slot;
1824                if (tx_slot == priv->omsg_ring[ch].size)
1825                        tx_slot = 0;
1826
1827                dev_id = priv->omsg_ring[ch].dev_id;
1828                do_callback = 1;
1829        }
1830
1831no_sts_update:
1832
1833        if (omsg_int & TSI721_OBDMAC_INT_ERROR) {
1834                /*
1835                * Outbound message operation aborted due to error,
1836                * reinitialize OB MSG channel
1837                */
1838
1839                tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x",
1840                          ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
1841
1842                iowrite32(TSI721_OBDMAC_INT_ERROR,
1843                                priv->regs + TSI721_OBDMAC_INT(ch));
1844                iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
1845                                priv->regs + TSI721_OBDMAC_CTL(ch));
1846                ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
1847
1848                /* Inform upper level to clear all pending tx slots */
1849                dev_id = priv->omsg_ring[ch].dev_id;
1850                tx_slot = priv->omsg_ring[ch].tx_slot;
1851                do_callback = 1;
1852
1853                /* Synch tx_slot tracking */
1854                iowrite32(priv->omsg_ring[ch].tx_slot,
1855                        priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1856                ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1857                priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot;
1858                priv->omsg_ring[ch].sts_rdptr = 0;
1859        }
1860
1861        /* Clear channel interrupts */
1862        iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch));
1863
1864        if (!(priv->flags & TSI721_USING_MSIX)) {
1865                u32 ch_inte;
1866
1867                /* Re-enable channel interrupts */
1868                ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1869                ch_inte |= TSI721_INT_OMSG_CHAN(ch);
1870                iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
1871        }
1872
1873        spin_unlock(&priv->omsg_ring[ch].lock);
1874
1875        if (mport->outb_msg[ch].mcback && do_callback)
1876                mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot);
1877}
1878
1879/**
1880 * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox
1881 * @mport: Master port implementing Outbound Messaging Engine
1882 * @dev_id: Device specific pointer to pass on event
1883 * @mbox: Mailbox to open
1884 * @entries: Number of entries in the outbound mailbox ring
1885 */
1886static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1887                                 int mbox, int entries)
1888{
1889        struct tsi721_device *priv = mport->priv;
1890        struct tsi721_omsg_desc *bd_ptr;
1891        int i, rc = 0;
1892
1893        if ((entries < TSI721_OMSGD_MIN_RING_SIZE) ||
1894            (entries > (TSI721_OMSGD_RING_SIZE)) ||
1895            (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
1896                rc = -EINVAL;
1897                goto out;
1898        }
1899
1900        if ((mbox_sel & (1 << mbox)) == 0) {
1901                rc = -ENODEV;
1902                goto out;
1903        }
1904
1905        priv->omsg_ring[mbox].dev_id = dev_id;
1906        priv->omsg_ring[mbox].size = entries;
1907        priv->omsg_ring[mbox].sts_rdptr = 0;
1908        spin_lock_init(&priv->omsg_ring[mbox].lock);
1909
1910        /* Outbound Msg Buffer allocation based on
1911           the number of maximum descriptor entries */
1912        for (i = 0; i < entries; i++) {
1913                priv->omsg_ring[mbox].omq_base[i] =
1914                        dma_alloc_coherent(
1915                                &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE,
1916                                &priv->omsg_ring[mbox].omq_phys[i],
1917                                GFP_KERNEL);
1918                if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
1919                        tsi_debug(OMSG, &priv->pdev->dev,
1920                                  "ENOMEM for OB_MSG_%d data buffer", mbox);
1921                        rc = -ENOMEM;
1922                        goto out_buf;
1923                }
1924        }
1925
1926        /* Outbound message descriptor allocation */
1927        priv->omsg_ring[mbox].omd_base = dma_alloc_coherent(
1928                                &priv->pdev->dev,
1929                                (entries + 1) * sizeof(struct tsi721_omsg_desc),
1930                                &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
1931        if (priv->omsg_ring[mbox].omd_base == NULL) {
1932                tsi_debug(OMSG, &priv->pdev->dev,
1933                        "ENOMEM for OB_MSG_%d descriptor memory", mbox);
1934                rc = -ENOMEM;
1935                goto out_buf;
1936        }
1937
1938        priv->omsg_ring[mbox].tx_slot = 0;
1939
1940        /* Outbound message descriptor status FIFO allocation */
1941        priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1942        priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
1943                                                            priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1944                                                            &priv->omsg_ring[mbox].sts_phys,
1945                                                            GFP_KERNEL);
1946        if (priv->omsg_ring[mbox].sts_base == NULL) {
1947                tsi_debug(OMSG, &priv->pdev->dev,
1948                        "ENOMEM for OB_MSG_%d status FIFO", mbox);
1949                rc = -ENOMEM;
1950                goto out_desc;
1951        }
1952
1953        /*
1954         * Configure Outbound Messaging Engine
1955         */
1956
1957        /* Setup Outbound Message descriptor pointer */
1958        iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32),
1959                        priv->regs + TSI721_OBDMAC_DPTRH(mbox));
1960        iowrite32(((u64)priv->omsg_ring[mbox].omd_phys &
1961                                        TSI721_OBDMAC_DPTRL_MASK),
1962                        priv->regs + TSI721_OBDMAC_DPTRL(mbox));
1963
1964        /* Setup Outbound Message descriptor status FIFO */
1965        iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32),
1966                        priv->regs + TSI721_OBDMAC_DSBH(mbox));
1967        iowrite32(((u64)priv->omsg_ring[mbox].sts_phys &
1968                                        TSI721_OBDMAC_DSBL_MASK),
1969                        priv->regs + TSI721_OBDMAC_DSBL(mbox));
1970        iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size),
1971                priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox));
1972
1973        /* Enable interrupts */
1974
1975#ifdef CONFIG_PCI_MSI
1976        if (priv->flags & TSI721_USING_MSIX) {
1977                int idx = TSI721_VECT_OMB0_DONE + mbox;
1978
1979                /* Request interrupt service if we are in MSI-X mode */
1980                rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
1981                                 priv->msix[idx].irq_name, (void *)priv);
1982
1983                if (rc) {
1984                        tsi_debug(OMSG, &priv->pdev->dev,
1985                                "Unable to get MSI-X IRQ for OBOX%d-DONE",
1986                                mbox);
1987                        goto out_stat;
1988                }
1989
1990                idx = TSI721_VECT_OMB0_INT + mbox;
1991                rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
1992                                 priv->msix[idx].irq_name, (void *)priv);
1993
1994                if (rc) {
1995                        tsi_debug(OMSG, &priv->pdev->dev,
1996                                "Unable to get MSI-X IRQ for MBOX%d-INT", mbox);
1997                        idx = TSI721_VECT_OMB0_DONE + mbox;
1998                        free_irq(priv->msix[idx].vector, (void *)priv);
1999                        goto out_stat;
2000                }
2001        }
2002#endif /* CONFIG_PCI_MSI */
2003
2004        tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL);
2005
2006        /* Initialize Outbound Message descriptors ring */
2007        bd_ptr = priv->omsg_ring[mbox].omd_base;
2008        bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29);
2009        bd_ptr[entries].msg_info = 0;
2010        bd_ptr[entries].next_lo =
2011                cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys &
2012                TSI721_OBDMAC_DPTRL_MASK);
2013        bd_ptr[entries].next_hi =
2014                cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32);
2015        priv->omsg_ring[mbox].wr_count = 0;
2016        mb();
2017
2018        /* Initialize Outbound Message engine */
2019        iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
2020                  priv->regs + TSI721_OBDMAC_CTL(mbox));
2021        ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
2022        udelay(10);
2023
2024        priv->omsg_init[mbox] = 1;
2025
2026        return 0;
2027
2028#ifdef CONFIG_PCI_MSI
2029out_stat:
2030        dma_free_coherent(&priv->pdev->dev,
2031                priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
2032                priv->omsg_ring[mbox].sts_base,
2033                priv->omsg_ring[mbox].sts_phys);
2034
2035        priv->omsg_ring[mbox].sts_base = NULL;
2036#endif /* CONFIG_PCI_MSI */
2037
2038out_desc:
2039        dma_free_coherent(&priv->pdev->dev,
2040                (entries + 1) * sizeof(struct tsi721_omsg_desc),
2041                priv->omsg_ring[mbox].omd_base,
2042                priv->omsg_ring[mbox].omd_phys);
2043
2044        priv->omsg_ring[mbox].omd_base = NULL;
2045
2046out_buf:
2047        for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
2048                if (priv->omsg_ring[mbox].omq_base[i]) {
2049                        dma_free_coherent(&priv->pdev->dev,
2050                                TSI721_MSG_BUFFER_SIZE,
2051                                priv->omsg_ring[mbox].omq_base[i],
2052                                priv->omsg_ring[mbox].omq_phys[i]);
2053
2054                        priv->omsg_ring[mbox].omq_base[i] = NULL;
2055                }
2056        }
2057
2058out:
2059        return rc;
2060}
2061
2062/**
2063 * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox
2064 * @mport: Master port implementing the outbound message unit
2065 * @mbox: Mailbox to close
2066 */
2067static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
2068{
2069        struct tsi721_device *priv = mport->priv;
2070        u32 i;
2071
2072        if (!priv->omsg_init[mbox])
2073                return;
2074        priv->omsg_init[mbox] = 0;
2075
2076        /* Disable Interrupts */
2077
2078        tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL);
2079
2080#ifdef CONFIG_PCI_MSI
2081        if (priv->flags & TSI721_USING_MSIX) {
2082                free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
2083                         (void *)priv);
2084                free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
2085                         (void *)priv);
2086        }
2087#endif /* CONFIG_PCI_MSI */
2088
2089        /* Free OMSG Descriptor Status FIFO */
2090        dma_free_coherent(&priv->pdev->dev,
2091                priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
2092                priv->omsg_ring[mbox].sts_base,
2093                priv->omsg_ring[mbox].sts_phys);
2094
2095        priv->omsg_ring[mbox].sts_base = NULL;
2096
2097        /* Free OMSG descriptors */
2098        dma_free_coherent(&priv->pdev->dev,
2099                (priv->omsg_ring[mbox].size + 1) *
2100                        sizeof(struct tsi721_omsg_desc),
2101                priv->omsg_ring[mbox].omd_base,
2102                priv->omsg_ring[mbox].omd_phys);
2103
2104        priv->omsg_ring[mbox].omd_base = NULL;
2105
2106        /* Free message buffers */
2107        for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
2108                if (priv->omsg_ring[mbox].omq_base[i]) {
2109                        dma_free_coherent(&priv->pdev->dev,
2110                                TSI721_MSG_BUFFER_SIZE,
2111                                priv->omsg_ring[mbox].omq_base[i],
2112                                priv->omsg_ring[mbox].omq_phys[i]);
2113
2114                        priv->omsg_ring[mbox].omq_base[i] = NULL;
2115                }
2116        }
2117}
2118
2119/**
2120 * tsi721_imsg_handler - Inbound Message Interrupt Handler
2121 * @priv: pointer to tsi721 private data
2122 * @ch: inbound message channel number to service
2123 *
2124 * Services channel interrupts from inbound messaging engine.
2125 */
2126static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
2127{
2128        u32 mbox = ch - 4;
2129        u32 imsg_int;
2130        struct rio_mport *mport = &priv->mport;
2131
2132        spin_lock(&priv->imsg_ring[mbox].lock);
2133
2134        imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
2135
2136        if (imsg_int & TSI721_IBDMAC_INT_SRTO)
2137                tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox);
2138
2139        if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
2140                tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox);
2141
2142        if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
2143                tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox);
2144
2145        /* Clear IB channel interrupts */
2146        iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
2147
2148        /* If an IB Msg is received notify the upper layer */
2149        if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
2150                mport->inb_msg[mbox].mcback)
2151                mport->inb_msg[mbox].mcback(mport,
2152                                priv->imsg_ring[mbox].dev_id, mbox, -1);
2153
2154        if (!(priv->flags & TSI721_USING_MSIX)) {
2155                u32 ch_inte;
2156
2157                /* Re-enable channel interrupts */
2158                ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
2159                ch_inte |= TSI721_INT_IMSG_CHAN(ch);
2160                iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
2161        }
2162
2163        spin_unlock(&priv->imsg_ring[mbox].lock);
2164}
2165
2166/**
2167 * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox
2168 * @mport: Master port implementing the Inbound Messaging Engine
2169 * @dev_id: Device specific pointer to pass on event
2170 * @mbox: Mailbox to open
2171 * @entries: Number of entries in the inbound mailbox ring
2172 */
2173static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
2174                                int mbox, int entries)
2175{
2176        struct tsi721_device *priv = mport->priv;
2177        int ch = mbox + 4;
2178        int i;
2179        u64 *free_ptr;
2180        int rc = 0;
2181
2182        if ((entries < TSI721_IMSGD_MIN_RING_SIZE) ||
2183            (entries > TSI721_IMSGD_RING_SIZE) ||
2184            (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
2185                rc = -EINVAL;
2186                goto out;
2187        }
2188
2189        if ((mbox_sel & (1 << mbox)) == 0) {
2190                rc = -ENODEV;
2191                goto out;
2192        }
2193
2194        /* Initialize IB Messaging Ring */
2195        priv->imsg_ring[mbox].dev_id = dev_id;
2196        priv->imsg_ring[mbox].size = entries;
2197        priv->imsg_ring[mbox].rx_slot = 0;
2198        priv->imsg_ring[mbox].desc_rdptr = 0;
2199        priv->imsg_ring[mbox].fq_wrptr = 0;
2200        for (i = 0; i < priv->imsg_ring[mbox].size; i++)
2201                priv->imsg_ring[mbox].imq_base[i] = NULL;
2202        spin_lock_init(&priv->imsg_ring[mbox].lock);
2203
2204        /* Allocate buffers for incoming messages */
2205        priv->imsg_ring[mbox].buf_base =
2206                dma_alloc_coherent(&priv->pdev->dev,
2207                                   entries * TSI721_MSG_BUFFER_SIZE,
2208                                   &priv->imsg_ring[mbox].buf_phys,
2209                                   GFP_KERNEL);
2210
2211        if (priv->imsg_ring[mbox].buf_base == NULL) {
2212                tsi_err(&priv->pdev->dev,
2213                        "Failed to allocate buffers for IB MBOX%d", mbox);
2214                rc = -ENOMEM;
2215                goto out;
2216        }
2217
2218        /* Allocate memory for circular free list */
2219        priv->imsg_ring[mbox].imfq_base =
2220                dma_alloc_coherent(&priv->pdev->dev,
2221                                   entries * 8,
2222                                   &priv->imsg_ring[mbox].imfq_phys,
2223                                   GFP_KERNEL);
2224
2225        if (priv->imsg_ring[mbox].imfq_base == NULL) {
2226                tsi_err(&priv->pdev->dev,
2227                        "Failed to allocate free queue for IB MBOX%d", mbox);
2228                rc = -ENOMEM;
2229                goto out_buf;
2230        }
2231
2232        /* Allocate memory for Inbound message descriptors */
2233        priv->imsg_ring[mbox].imd_base =
2234                dma_alloc_coherent(&priv->pdev->dev,
2235                                   entries * sizeof(struct tsi721_imsg_desc),
2236                                   &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
2237
2238        if (priv->imsg_ring[mbox].imd_base == NULL) {
2239                tsi_err(&priv->pdev->dev,
2240                        "Failed to allocate descriptor memory for IB MBOX%d",
2241                        mbox);
2242                rc = -ENOMEM;
2243                goto out_dma;
2244        }
2245
2246        /* Fill free buffer pointer list */
2247        free_ptr = priv->imsg_ring[mbox].imfq_base;
2248        for (i = 0; i < entries; i++)
2249                free_ptr[i] = cpu_to_le64(
2250                                (u64)(priv->imsg_ring[mbox].buf_phys) +
2251                                i * 0x1000);
2252
2253        mb();
2254
2255        /*
2256         * For mapping of inbound SRIO Messages into appropriate queues we need
2257         * to set Inbound Device ID register in the messaging engine. We do it
2258         * once when first inbound mailbox is requested.
2259         */
2260        if (!(priv->flags & TSI721_IMSGID_SET)) {
2261                iowrite32((u32)priv->mport.host_deviceid,
2262                        priv->regs + TSI721_IB_DEVID);
2263                priv->flags |= TSI721_IMSGID_SET;
2264        }
2265
2266        /*
2267         * Configure Inbound Messaging channel (ch = mbox + 4)
2268         */
2269
2270        /* Setup Inbound Message free queue */
2271        iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32),
2272                priv->regs + TSI721_IBDMAC_FQBH(ch));
2273        iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys &
2274                        TSI721_IBDMAC_FQBL_MASK),
2275                priv->regs+TSI721_IBDMAC_FQBL(ch));
2276        iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
2277                priv->regs + TSI721_IBDMAC_FQSZ(ch));
2278
2279        /* Setup Inbound Message descriptor queue */
2280        iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32),
2281                priv->regs + TSI721_IBDMAC_DQBH(ch));
2282        iowrite32(((u32)priv->imsg_ring[mbox].imd_phys &
2283                   (u32)TSI721_IBDMAC_DQBL_MASK),
2284                priv->regs+TSI721_IBDMAC_DQBL(ch));
2285        iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
2286                priv->regs + TSI721_IBDMAC_DQSZ(ch));
2287
2288        /* Enable interrupts */
2289
2290#ifdef CONFIG_PCI_MSI
2291        if (priv->flags & TSI721_USING_MSIX) {
2292                int idx = TSI721_VECT_IMB0_RCV + mbox;
2293
2294                /* Request interrupt service if we are in MSI-X mode */
2295                rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
2296                                 priv->msix[idx].irq_name, (void *)priv);
2297
2298                if (rc) {
2299                        tsi_debug(IMSG, &priv->pdev->dev,
2300                                "Unable to get MSI-X IRQ for IBOX%d-DONE",
2301                                mbox);
2302                        goto out_desc;
2303                }
2304
2305                idx = TSI721_VECT_IMB0_INT + mbox;
2306                rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
2307                                 priv->msix[idx].irq_name, (void *)priv);
2308
2309                if (rc) {
2310                        tsi_debug(IMSG, &priv->pdev->dev,
2311                                "Unable to get MSI-X IRQ for IBOX%d-INT", mbox);
2312                        free_irq(
2313                                priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
2314                                (void *)priv);
2315                        goto out_desc;
2316                }
2317        }
2318#endif /* CONFIG_PCI_MSI */
2319
2320        tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL);
2321
2322        /* Initialize Inbound Message Engine */
2323        iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch));
2324        ioread32(priv->regs + TSI721_IBDMAC_CTL(ch));
2325        udelay(10);
2326        priv->imsg_ring[mbox].fq_wrptr = entries - 1;
2327        iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch));
2328
2329        priv->imsg_init[mbox] = 1;
2330        return 0;
2331
2332#ifdef CONFIG_PCI_MSI
2333out_desc:
2334        dma_free_coherent(&priv->pdev->dev,
2335                priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
2336                priv->imsg_ring[mbox].imd_base,
2337                priv->imsg_ring[mbox].imd_phys);
2338
2339        priv->imsg_ring[mbox].imd_base = NULL;
2340#endif /* CONFIG_PCI_MSI */
2341
2342out_dma:
2343        dma_free_coherent(&priv->pdev->dev,
2344                priv->imsg_ring[mbox].size * 8,
2345                priv->imsg_ring[mbox].imfq_base,
2346                priv->imsg_ring[mbox].imfq_phys);
2347
2348        priv->imsg_ring[mbox].imfq_base = NULL;
2349
2350out_buf:
2351        dma_free_coherent(&priv->pdev->dev,
2352                priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
2353                priv->imsg_ring[mbox].buf_base,
2354                priv->imsg_ring[mbox].buf_phys);
2355
2356        priv->imsg_ring[mbox].buf_base = NULL;
2357
2358out:
2359        return rc;
2360}
2361
2362/**
2363 * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox
2364 * @mport: Master port implementing the Inbound Messaging Engine
2365 * @mbox: Mailbox to close
2366 */
2367static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
2368{
2369        struct tsi721_device *priv = mport->priv;
2370        u32 rx_slot;
2371        int ch = mbox + 4;
2372
2373        if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */
2374                return;
2375        priv->imsg_init[mbox] = 0;
2376
2377        /* Disable Inbound Messaging Engine */
2378
2379        /* Disable Interrupts */
2380        tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK);
2381
2382#ifdef CONFIG_PCI_MSI
2383        if (priv->flags & TSI721_USING_MSIX) {
2384                free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
2385                                (void *)priv);
2386                free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
2387                                (void *)priv);
2388        }
2389#endif /* CONFIG_PCI_MSI */
2390
2391        /* Clear Inbound Buffer Queue */
2392        for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++)
2393                priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
2394
2395        /* Free memory allocated for message buffers */
2396        dma_free_coherent(&priv->pdev->dev,
2397                priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
2398                priv->imsg_ring[mbox].buf_base,
2399                priv->imsg_ring[mbox].buf_phys);
2400
2401        priv->imsg_ring[mbox].buf_base = NULL;
2402
2403        /* Free memory allocated for free pointr list */
2404        dma_free_coherent(&priv->pdev->dev,
2405                priv->imsg_ring[mbox].size * 8,
2406                priv->imsg_ring[mbox].imfq_base,
2407                priv->imsg_ring[mbox].imfq_phys);
2408
2409        priv->imsg_ring[mbox].imfq_base = NULL;
2410
2411        /* Free memory allocated for RX descriptors */
2412        dma_free_coherent(&priv->pdev->dev,
2413                priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
2414                priv->imsg_ring[mbox].imd_base,
2415                priv->imsg_ring[mbox].imd_phys);
2416
2417        priv->imsg_ring[mbox].imd_base = NULL;
2418}
2419
2420/**
2421 * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue
2422 * @mport: Master port implementing the Inbound Messaging Engine
2423 * @mbox: Inbound mailbox number
2424 * @buf: Buffer to add to inbound queue
2425 */
2426static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
2427{
2428        struct tsi721_device *priv = mport->priv;
2429        u32 rx_slot;
2430        int rc = 0;
2431
2432        rx_slot = priv->imsg_ring[mbox].rx_slot;
2433        if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
2434                tsi_err(&priv->pdev->dev,
2435                        "Error adding inbound buffer %d, buffer exists",
2436                        rx_slot);
2437                rc = -EINVAL;
2438                goto out;
2439        }
2440
2441        priv->imsg_ring[mbox].imq_base[rx_slot] = buf;
2442
2443        if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size)
2444                priv->imsg_ring[mbox].rx_slot = 0;
2445
2446out:
2447        return rc;
2448}
2449
2450/**
2451 * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue
2452 * @mport: Master port implementing the Inbound Messaging Engine
2453 * @mbox: Inbound mailbox number
2454 *
2455 * Returns pointer to the message on success or NULL on failure.
2456 */
2457static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
2458{
2459        struct tsi721_device *priv = mport->priv;
2460        struct tsi721_imsg_desc *desc;
2461        u32 rx_slot;
2462        void *rx_virt = NULL;
2463        u64 rx_phys;
2464        void *buf = NULL;
2465        u64 *free_ptr;
2466        int ch = mbox + 4;
2467        int msg_size;
2468
2469        if (!priv->imsg_init[mbox])
2470                return NULL;
2471
2472        desc = priv->imsg_ring[mbox].imd_base;
2473        desc += priv->imsg_ring[mbox].desc_rdptr;
2474
2475        if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO))
2476                goto out;
2477
2478        rx_slot = priv->imsg_ring[mbox].rx_slot;
2479        while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) {
2480                if (++rx_slot == priv->imsg_ring[mbox].size)
2481                        rx_slot = 0;
2482        }
2483
2484        rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) |
2485                        le32_to_cpu(desc->bufptr_lo);
2486
2487        rx_virt = priv->imsg_ring[mbox].buf_base +
2488                  (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys);
2489
2490        buf = priv->imsg_ring[mbox].imq_base[rx_slot];
2491        msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT;
2492        if (msg_size == 0)
2493                msg_size = RIO_MAX_MSG_SIZE;
2494
2495        memcpy(buf, rx_virt, msg_size);
2496        priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
2497
2498        desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO);
2499        if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size)
2500                priv->imsg_ring[mbox].desc_rdptr = 0;
2501
2502        iowrite32(priv->imsg_ring[mbox].desc_rdptr,
2503                priv->regs + TSI721_IBDMAC_DQRP(ch));
2504
2505        /* Return free buffer into the pointer list */
2506        free_ptr = priv->imsg_ring[mbox].imfq_base;
2507        free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys);
2508
2509        if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size)
2510                priv->imsg_ring[mbox].fq_wrptr = 0;
2511
2512        iowrite32(priv->imsg_ring[mbox].fq_wrptr,
2513                priv->regs + TSI721_IBDMAC_FQWP(ch));
2514out:
2515        return buf;
2516}
2517
2518/**
2519 * tsi721_messages_init - Initialization of Messaging Engine
2520 * @priv: pointer to tsi721 private data
2521 *
2522 * Configures Tsi721 messaging engine.
2523 */
2524static int tsi721_messages_init(struct tsi721_device *priv)
2525{
2526        int     ch;
2527
2528        iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG);
2529        iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT);
2530        iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT);
2531
2532        /* Set SRIO Message Request/Response Timeout */
2533        iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO);
2534
2535        /* Initialize Inbound Messaging Engine Registers */
2536        for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) {
2537                /* Clear interrupt bits */
2538                iowrite32(TSI721_IBDMAC_INT_MASK,
2539                        priv->regs + TSI721_IBDMAC_INT(ch));
2540                /* Clear Status */
2541                iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch));
2542
2543                iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK,
2544                                priv->regs + TSI721_SMSG_ECC_COR_LOG(ch));
2545                iowrite32(TSI721_SMSG_ECC_NCOR_MASK,
2546                                priv->regs + TSI721_SMSG_ECC_NCOR(ch));
2547        }
2548
2549        return 0;
2550}
2551
2552/**
2553 * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue
2554 * @mport: Master port implementing the Inbound Messaging Engine
2555 * @mbox: Inbound mailbox number
2556 *
2557 * Returns pointer to the message on success or NULL on failure.
2558 */
2559static int tsi721_query_mport(struct rio_mport *mport,
2560                              struct rio_mport_attr *attr)
2561{
2562        struct tsi721_device *priv = mport->priv;
2563        u32 rval;
2564
2565        rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0));
2566        if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
2567                rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0));
2568                attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
2569                rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0));
2570                attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
2571        } else
2572                attr->link_speed = RIO_LINK_DOWN;
2573
2574#ifdef CONFIG_RAPIDIO_DMA_ENGINE
2575        attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG;
2576        attr->dma_max_sge = 0;
2577        attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT;
2578        attr->dma_align = 0;
2579#else
2580        attr->flags = 0;
2581#endif
2582        return 0;
2583}
2584
2585/**
2586 * tsi721_disable_ints - disables all device interrupts
2587 * @priv: pointer to tsi721 private data
2588 */
2589static void tsi721_disable_ints(struct tsi721_device *priv)
2590{
2591        int ch;
2592
2593        /* Disable all device level interrupts */
2594        iowrite32(0, priv->regs + TSI721_DEV_INTE);
2595
2596        /* Disable all Device Channel interrupts */
2597        iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE);
2598
2599        /* Disable all Inbound Msg Channel interrupts */
2600        for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++)
2601                iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch));
2602
2603        /* Disable all Outbound Msg Channel interrupts */
2604        for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++)
2605                iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch));
2606
2607        /* Disable all general messaging interrupts */
2608        iowrite32(0, priv->regs + TSI721_SMSG_INTE);
2609
2610        /* Disable all BDMA Channel interrupts */
2611        for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
2612                iowrite32(0,
2613                        priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
2614
2615        /* Disable all general BDMA interrupts */
2616        iowrite32(0, priv->regs + TSI721_BDMA_INTE);
2617
2618        /* Disable all SRIO Channel interrupts */
2619        for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++)
2620                iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch));
2621
2622        /* Disable all general SR2PC interrupts */
2623        iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE);
2624
2625        /* Disable all PC2SR interrupts */
2626        iowrite32(0, priv->regs + TSI721_PC2SR_INTE);
2627
2628        /* Disable all I2C interrupts */
2629        iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE);
2630
2631        /* Disable SRIO MAC interrupts */
2632        iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE);
2633        iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
2634}
2635
2636static struct rio_ops tsi721_rio_ops = {
2637        .lcread                 = tsi721_lcread,
2638        .lcwrite                = tsi721_lcwrite,
2639        .cread                  = tsi721_cread_dma,
2640        .cwrite                 = tsi721_cwrite_dma,
2641        .dsend                  = tsi721_dsend,
2642        .open_inb_mbox          = tsi721_open_inb_mbox,
2643        .close_inb_mbox         = tsi721_close_inb_mbox,
2644        .open_outb_mbox         = tsi721_open_outb_mbox,
2645        .close_outb_mbox        = tsi721_close_outb_mbox,
2646        .add_outb_message       = tsi721_add_outb_message,
2647        .add_inb_buffer         = tsi721_add_inb_buffer,
2648        .get_inb_message        = tsi721_get_inb_message,
2649        .map_inb                = tsi721_rio_map_inb_mem,
2650        .unmap_inb              = tsi721_rio_unmap_inb_mem,
2651        .pwenable               = tsi721_pw_enable,
2652        .query_mport            = tsi721_query_mport,
2653        .map_outb               = tsi721_map_outb_win,
2654        .unmap_outb             = tsi721_unmap_outb_win,
2655};
2656
2657static void tsi721_mport_release(struct device *dev)
2658{
2659        struct rio_mport *mport = to_rio_mport(dev);
2660
2661        tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id);
2662}
2663
2664/**
2665 * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
2666 * @priv: pointer to tsi721 private data
2667 *
2668 * Configures Tsi721 as RapidIO master port.
2669 */
2670static int tsi721_setup_mport(struct tsi721_device *priv)
2671{
2672        struct pci_dev *pdev = priv->pdev;
2673        int err = 0;
2674        struct rio_mport *mport = &priv->mport;
2675
2676        err = rio_mport_initialize(mport);
2677        if (err)
2678                return err;
2679
2680        mport->ops = &tsi721_rio_ops;
2681        mport->index = 0;
2682        mport->sys_size = 0; /* small system */
2683        mport->priv = (void *)priv;
2684        mport->phys_efptr = 0x100;
2685        mport->phys_rmap = 1;
2686        mport->dev.parent = &pdev->dev;
2687        mport->dev.release = tsi721_mport_release;
2688
2689        INIT_LIST_HEAD(&mport->dbells);
2690
2691        rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
2692        rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
2693        rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
2694        snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)",
2695                 dev_driver_string(&pdev->dev), dev_name(&pdev->dev));
2696
2697        /* Hook up interrupt handler */
2698
2699#ifdef CONFIG_PCI_MSI
2700        if (!tsi721_enable_msix(priv))
2701                priv->flags |= TSI721_USING_MSIX;
2702        else if (!pci_enable_msi(pdev))
2703                priv->flags |= TSI721_USING_MSI;
2704        else
2705                tsi_debug(MPORT, &pdev->dev,
2706                         "MSI/MSI-X is not available. Using legacy INTx.");
2707#endif /* CONFIG_PCI_MSI */
2708
2709        err = tsi721_request_irq(priv);
2710
2711        if (err) {
2712                tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)",
2713                        pdev->irq, err);
2714                return err;
2715        }
2716
2717#ifdef CONFIG_RAPIDIO_DMA_ENGINE
2718        err = tsi721_register_dma(priv);
2719        if (err)
2720                goto err_exit;
2721#endif
2722        /* Enable SRIO link */
2723        iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
2724                  TSI721_DEVCTL_SRBOOT_CMPL,
2725                  priv->regs + TSI721_DEVCTL);
2726
2727        if (mport->host_deviceid >= 0)
2728                iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
2729                          RIO_PORT_GEN_DISCOVERED,
2730                          priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2731        else
2732                iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2733
2734        err = rio_register_mport(mport);
2735        if (err) {
2736                tsi721_unregister_dma(priv);
2737                goto err_exit;
2738        }
2739
2740        return 0;
2741
2742err_exit:
2743        tsi721_free_irq(priv);
2744        return err;
2745}
2746
2747static int tsi721_probe(struct pci_dev *pdev,
2748                                  const struct pci_device_id *id)
2749{
2750        struct tsi721_device *priv;
2751        int err;
2752
2753        priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
2754        if (!priv) {
2755                err = -ENOMEM;
2756                goto err_exit;
2757        }
2758
2759        err = pci_enable_device(pdev);
2760        if (err) {
2761                tsi_err(&pdev->dev, "Failed to enable PCI device");
2762                goto err_clean;
2763        }
2764
2765        priv->pdev = pdev;
2766
2767#ifdef DEBUG
2768        {
2769                int i;
2770
2771                for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2772                        tsi_debug(INIT, &pdev->dev, "res%d %pR",
2773                                  i, &pdev->resource[i]);
2774                }
2775        }
2776#endif
2777        /*
2778         * Verify BAR configuration
2779         */
2780
2781        /* BAR_0 (registers) must be 512KB+ in 32-bit address space */
2782        if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
2783            pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
2784            pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
2785                tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0");
2786                err = -ENODEV;
2787                goto err_disable_pdev;
2788        }
2789
2790        /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */
2791        if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
2792            pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
2793            pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
2794                tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1");
2795                err = -ENODEV;
2796                goto err_disable_pdev;
2797        }
2798
2799        /*
2800         * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address
2801         * space.
2802         * NOTE: BAR_2 and BAR_4 are not used by this version of driver.
2803         * It may be a good idea to keep them disabled using HW configuration
2804         * to save PCI memory space.
2805         */
2806
2807        priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0;
2808
2809        if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) {
2810                if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH)
2811                        tsi_debug(INIT, &pdev->dev,
2812                                 "Prefetchable OBW BAR2 will not be used");
2813                else {
2814                        priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2);
2815                        priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2);
2816                }
2817        }
2818
2819        if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) {
2820                if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH)
2821                        tsi_debug(INIT, &pdev->dev,
2822                                 "Prefetchable OBW BAR4 will not be used");
2823                else {
2824                        priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4);
2825                        priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4);
2826                }
2827        }
2828
2829        err = pci_request_regions(pdev, DRV_NAME);
2830        if (err) {
2831                tsi_err(&pdev->dev, "Unable to obtain PCI resources");
2832                goto err_disable_pdev;
2833        }
2834
2835        pci_set_master(pdev);
2836
2837        priv->regs = pci_ioremap_bar(pdev, BAR_0);
2838        if (!priv->regs) {
2839                tsi_err(&pdev->dev, "Unable to map device registers space");
2840                err = -ENOMEM;
2841                goto err_free_res;
2842        }
2843
2844        priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
2845        if (!priv->odb_base) {
2846                tsi_err(&pdev->dev, "Unable to map outbound doorbells space");
2847                err = -ENOMEM;
2848                goto err_unmap_bars;
2849        }
2850
2851        /* Configure DMA attributes. */
2852        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2853                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2854                if (err) {
2855                        tsi_err(&pdev->dev, "Unable to set DMA mask");
2856                        goto err_unmap_bars;
2857                }
2858
2859                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2860                        tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
2861        } else {
2862                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2863                if (err)
2864                        tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
2865        }
2866
2867        BUG_ON(!pci_is_pcie(pdev));
2868
2869        /* Clear "no snoop" and "relaxed ordering" bits. */
2870        pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2871                PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
2872
2873        /* Override PCIe Maximum Read Request Size setting if requested */
2874        if (pcie_mrrs >= 0) {
2875                if (pcie_mrrs <= 5)
2876                        pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2877                                        PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12);
2878                else
2879                        tsi_info(&pdev->dev,
2880                                 "Invalid MRRS override value %d", pcie_mrrs);
2881        }
2882
2883        /* Set PCIe completion timeout to 1-10ms */
2884        pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2,
2885                                           PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2);
2886
2887        /*
2888         * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
2889         */
2890        pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01);
2891        pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL,
2892                                                TSI721_MSIXTBL_OFFSET);
2893        pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA,
2894                                                TSI721_MSIXPBA_OFFSET);
2895        pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0);
2896        /* End of FIXUP */
2897
2898        tsi721_disable_ints(priv);
2899
2900        tsi721_init_pc2sr_mapping(priv);
2901        tsi721_init_sr2pc_mapping(priv);
2902
2903        if (tsi721_bdma_maint_init(priv)) {
2904                tsi_err(&pdev->dev, "BDMA initialization failed");
2905                err = -ENOMEM;
2906                goto err_unmap_bars;
2907        }
2908
2909        err = tsi721_doorbell_init(priv);
2910        if (err)
2911                goto err_free_bdma;
2912
2913        tsi721_port_write_init(priv);
2914
2915        err = tsi721_messages_init(priv);
2916        if (err)
2917                goto err_free_consistent;
2918
2919        err = tsi721_setup_mport(priv);
2920        if (err)
2921                goto err_free_consistent;
2922
2923        pci_set_drvdata(pdev, priv);
2924        tsi721_interrupts_init(priv);
2925
2926        return 0;
2927
2928err_free_consistent:
2929        tsi721_port_write_free(priv);
2930        tsi721_doorbell_free(priv);
2931err_free_bdma:
2932        tsi721_bdma_maint_free(priv);
2933err_unmap_bars:
2934        if (priv->regs)
2935                iounmap(priv->regs);
2936        if (priv->odb_base)
2937                iounmap(priv->odb_base);
2938err_free_res:
2939        pci_release_regions(pdev);
2940        pci_clear_master(pdev);
2941err_disable_pdev:
2942        pci_disable_device(pdev);
2943err_clean:
2944        kfree(priv);
2945err_exit:
2946        return err;
2947}
2948
2949static void tsi721_remove(struct pci_dev *pdev)
2950{
2951        struct tsi721_device *priv = pci_get_drvdata(pdev);
2952
2953        tsi_debug(EXIT, &pdev->dev, "enter");
2954
2955        tsi721_disable_ints(priv);
2956        tsi721_free_irq(priv);
2957        flush_scheduled_work();
2958        rio_unregister_mport(&priv->mport);
2959
2960        tsi721_unregister_dma(priv);
2961        tsi721_bdma_maint_free(priv);
2962        tsi721_doorbell_free(priv);
2963        tsi721_port_write_free(priv);
2964        tsi721_close_sr2pc_mapping(priv);
2965
2966        if (priv->regs)
2967                iounmap(priv->regs);
2968        if (priv->odb_base)
2969                iounmap(priv->odb_base);
2970#ifdef CONFIG_PCI_MSI
2971        if (priv->flags & TSI721_USING_MSIX)
2972                pci_disable_msix(priv->pdev);
2973        else if (priv->flags & TSI721_USING_MSI)
2974                pci_disable_msi(priv->pdev);
2975#endif
2976        pci_release_regions(pdev);
2977        pci_clear_master(pdev);
2978        pci_disable_device(pdev);
2979        pci_set_drvdata(pdev, NULL);
2980        kfree(priv);
2981        tsi_debug(EXIT, &pdev->dev, "exit");
2982}
2983
2984static void tsi721_shutdown(struct pci_dev *pdev)
2985{
2986        struct tsi721_device *priv = pci_get_drvdata(pdev);
2987
2988        tsi_debug(EXIT, &pdev->dev, "enter");
2989
2990        tsi721_disable_ints(priv);
2991        tsi721_dma_stop_all(priv);
2992        pci_clear_master(pdev);
2993        pci_disable_device(pdev);
2994}
2995
2996static const struct pci_device_id tsi721_pci_tbl[] = {
2997        { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
2998        { 0, }  /* terminate list */
2999};
3000
3001MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl);
3002
3003static struct pci_driver tsi721_driver = {
3004        .name           = "tsi721",
3005        .id_table       = tsi721_pci_tbl,
3006        .probe          = tsi721_probe,
3007        .remove         = tsi721_remove,
3008        .shutdown       = tsi721_shutdown,
3009};
3010
3011module_pci_driver(tsi721_driver);
3012
3013MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver");
3014MODULE_AUTHOR("Integrated Device Technology, Inc.");
3015MODULE_LICENSE("GPL");
3016