linux/drivers/scsi/csiostor/csio_isr.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio FCoE driver for Linux.
   3 *
   4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/pci.h>
  37#include <linux/interrupt.h>
  38#include <linux/cpumask.h>
  39#include <linux/string.h>
  40
  41#include "csio_init.h"
  42#include "csio_hw.h"
  43
  44static irqreturn_t
  45csio_nondata_isr(int irq, void *dev_id)
  46{
  47        struct csio_hw *hw = (struct csio_hw *) dev_id;
  48        int rv;
  49        unsigned long flags;
  50
  51        if (unlikely(!hw))
  52                return IRQ_NONE;
  53
  54        if (unlikely(pci_channel_offline(hw->pdev))) {
  55                CSIO_INC_STATS(hw, n_pcich_offline);
  56                return IRQ_NONE;
  57        }
  58
  59        spin_lock_irqsave(&hw->lock, flags);
  60        csio_hw_slow_intr_handler(hw);
  61        rv = csio_mb_isr_handler(hw);
  62
  63        if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
  64                hw->flags |= CSIO_HWF_FWEVT_PENDING;
  65                spin_unlock_irqrestore(&hw->lock, flags);
  66                schedule_work(&hw->evtq_work);
  67                return IRQ_HANDLED;
  68        }
  69        spin_unlock_irqrestore(&hw->lock, flags);
  70        return IRQ_HANDLED;
  71}
  72
  73/*
  74 * csio_fwevt_handler - Common FW event handler routine.
  75 * @hw: HW module.
  76 *
  77 * This is the ISR for FW events. It is shared b/w MSIX
  78 * and INTx handlers.
  79 */
  80static void
  81csio_fwevt_handler(struct csio_hw *hw)
  82{
  83        int rv;
  84        unsigned long flags;
  85
  86        rv = csio_fwevtq_handler(hw);
  87
  88        spin_lock_irqsave(&hw->lock, flags);
  89        if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
  90                hw->flags |= CSIO_HWF_FWEVT_PENDING;
  91                spin_unlock_irqrestore(&hw->lock, flags);
  92                schedule_work(&hw->evtq_work);
  93                return;
  94        }
  95        spin_unlock_irqrestore(&hw->lock, flags);
  96
  97} /* csio_fwevt_handler */
  98
  99/*
 100 * csio_fwevt_isr() - FW events MSIX ISR
 101 * @irq:
 102 * @dev_id:
 103 *
 104 * Process WRs on the FW event queue.
 105 *
 106 */
 107static irqreturn_t
 108csio_fwevt_isr(int irq, void *dev_id)
 109{
 110        struct csio_hw *hw = (struct csio_hw *) dev_id;
 111
 112        if (unlikely(!hw))
 113                return IRQ_NONE;
 114
 115        if (unlikely(pci_channel_offline(hw->pdev))) {
 116                CSIO_INC_STATS(hw, n_pcich_offline);
 117                return IRQ_NONE;
 118        }
 119
 120        csio_fwevt_handler(hw);
 121
 122        return IRQ_HANDLED;
 123}
 124
 125/*
 126 * csio_fwevt_isr() - INTx wrapper for handling FW events.
 127 * @irq:
 128 * @dev_id:
 129 */
 130void
 131csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
 132                           struct csio_fl_dma_buf *flb, void *priv)
 133{
 134        csio_fwevt_handler(hw);
 135} /* csio_fwevt_intx_handler */
 136
 137/*
 138 * csio_process_scsi_cmpl - Process a SCSI WR completion.
 139 * @hw: HW module.
 140 * @wr: The completed WR from the ingress queue.
 141 * @len: Length of the WR.
 142 * @flb: Freelist buffer array.
 143 *
 144 */
 145static void
 146csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
 147                        struct csio_fl_dma_buf *flb, void *cbfn_q)
 148{
 149        struct csio_ioreq *ioreq;
 150        uint8_t *scsiwr;
 151        uint8_t subop;
 152        void *cmnd;
 153        unsigned long flags;
 154
 155        ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
 156        if (likely(ioreq)) {
 157                if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
 158                        subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
 159                                        ((struct fw_scsi_abrt_cls_wr *)
 160                                            scsiwr)->sub_opcode_to_chk_all_io);
 161
 162                        csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
 163                                    subop ? "Close" : "Abort",
 164                                    ioreq, ioreq->wr_status);
 165
 166                        spin_lock_irqsave(&hw->lock, flags);
 167                        if (subop)
 168                                csio_scsi_closed(ioreq,
 169                                                 (struct list_head *)cbfn_q);
 170                        else
 171                                csio_scsi_aborted(ioreq,
 172                                                  (struct list_head *)cbfn_q);
 173                        /*
 174                         * We call scsi_done for I/Os that driver thinks aborts
 175                         * have timed out. If there is a race caused by FW
 176                         * completing abort at the exact same time that the
 177                         * driver has deteced the abort timeout, the following
 178                         * check prevents calling of scsi_done twice for the
 179                         * same command: once from the eh_abort_handler, another
 180                         * from csio_scsi_isr_handler(). This also avoids the
 181                         * need to check if csio_scsi_cmnd(req) is NULL in the
 182                         * fast path.
 183                         */
 184                        cmnd = csio_scsi_cmnd(ioreq);
 185                        if (unlikely(cmnd == NULL))
 186                                list_del_init(&ioreq->sm.sm_list);
 187
 188                        spin_unlock_irqrestore(&hw->lock, flags);
 189
 190                        if (unlikely(cmnd == NULL))
 191                                csio_put_scsi_ioreq_lock(hw,
 192                                                csio_hw_to_scsim(hw), ioreq);
 193                } else {
 194                        spin_lock_irqsave(&hw->lock, flags);
 195                        csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
 196                        spin_unlock_irqrestore(&hw->lock, flags);
 197                }
 198        }
 199}
 200
 201/*
 202 * csio_scsi_isr_handler() - Common SCSI ISR handler.
 203 * @iq: Ingress queue pointer.
 204 *
 205 * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
 206 * by calling csio_wr_process_iq_idx. If there are completions on the
 207 * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
 208 * Once done, add these completions onto the freelist.
 209 * This routine is shared b/w MSIX and INTx.
 210 */
 211static inline irqreturn_t
 212csio_scsi_isr_handler(struct csio_q *iq)
 213{
 214        struct csio_hw *hw = (struct csio_hw *)iq->owner;
 215        LIST_HEAD(cbfn_q);
 216        struct list_head *tmp;
 217        struct csio_scsim *scm;
 218        struct csio_ioreq *ioreq;
 219        int isr_completions = 0;
 220
 221        scm = csio_hw_to_scsim(hw);
 222
 223        if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
 224                                        &cbfn_q) != 0))
 225                return IRQ_NONE;
 226
 227        /* Call back the completion routines */
 228        list_for_each(tmp, &cbfn_q) {
 229                ioreq = (struct csio_ioreq *)tmp;
 230                isr_completions++;
 231                ioreq->io_cbfn(hw, ioreq);
 232                /* Release ddp buffer if used for this req */
 233                if (unlikely(ioreq->dcopy))
 234                        csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
 235                                                    ioreq->nsge);
 236        }
 237
 238        if (isr_completions) {
 239                /* Return the ioreqs back to ioreq->freelist */
 240                csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
 241                                              isr_completions);
 242        }
 243
 244        return IRQ_HANDLED;
 245}
 246
 247/*
 248 * csio_scsi_isr() - SCSI MSIX handler
 249 * @irq:
 250 * @dev_id:
 251 *
 252 * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
 253 * for handling SCSI completions.
 254 */
 255static irqreturn_t
 256csio_scsi_isr(int irq, void *dev_id)
 257{
 258        struct csio_q *iq = (struct csio_q *) dev_id;
 259        struct csio_hw *hw;
 260
 261        if (unlikely(!iq))
 262                return IRQ_NONE;
 263
 264        hw = (struct csio_hw *)iq->owner;
 265
 266        if (unlikely(pci_channel_offline(hw->pdev))) {
 267                CSIO_INC_STATS(hw, n_pcich_offline);
 268                return IRQ_NONE;
 269        }
 270
 271        csio_scsi_isr_handler(iq);
 272
 273        return IRQ_HANDLED;
 274}
 275
 276/*
 277 * csio_scsi_intx_handler() - SCSI INTx handler
 278 * @irq:
 279 * @dev_id:
 280 *
 281 * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
 282 * for handling SCSI completions.
 283 */
 284void
 285csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
 286                        struct csio_fl_dma_buf *flb, void *priv)
 287{
 288        struct csio_q *iq = priv;
 289
 290        csio_scsi_isr_handler(iq);
 291
 292} /* csio_scsi_intx_handler */
 293
 294/*
 295 * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
 296 * @irq:
 297 * @dev_id:
 298 *
 299 *
 300 */
 301static irqreturn_t
 302csio_fcoe_isr(int irq, void *dev_id)
 303{
 304        struct csio_hw *hw = (struct csio_hw *) dev_id;
 305        struct csio_q *intx_q = NULL;
 306        int rv;
 307        irqreturn_t ret = IRQ_NONE;
 308        unsigned long flags;
 309
 310        if (unlikely(!hw))
 311                return IRQ_NONE;
 312
 313        if (unlikely(pci_channel_offline(hw->pdev))) {
 314                CSIO_INC_STATS(hw, n_pcich_offline);
 315                return IRQ_NONE;
 316        }
 317
 318        /* Disable the interrupt for this PCI function. */
 319        if (hw->intr_mode == CSIO_IM_INTX)
 320                csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
 321
 322        /*
 323         * The read in the following function will flush the
 324         * above write.
 325         */
 326        if (csio_hw_slow_intr_handler(hw))
 327                ret = IRQ_HANDLED;
 328
 329        /* Get the INTx Forward interrupt IQ. */
 330        intx_q = csio_get_q(hw, hw->intr_iq_idx);
 331
 332        CSIO_DB_ASSERT(intx_q);
 333
 334        /* IQ handler is not possible for intx_q, hence pass in NULL */
 335        if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
 336                ret = IRQ_HANDLED;
 337
 338        spin_lock_irqsave(&hw->lock, flags);
 339        rv = csio_mb_isr_handler(hw);
 340        if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
 341                hw->flags |= CSIO_HWF_FWEVT_PENDING;
 342                spin_unlock_irqrestore(&hw->lock, flags);
 343                schedule_work(&hw->evtq_work);
 344                return IRQ_HANDLED;
 345        }
 346        spin_unlock_irqrestore(&hw->lock, flags);
 347
 348        return ret;
 349}
 350
 351static void
 352csio_add_msix_desc(struct csio_hw *hw)
 353{
 354        int i;
 355        struct csio_msix_entries *entryp = &hw->msix_entries[0];
 356        int k = CSIO_EXTRA_VECS;
 357        int len = sizeof(entryp->desc) - 1;
 358        int cnt = hw->num_sqsets + k;
 359
 360        /* Non-data vector */
 361        memset(entryp->desc, 0, len + 1);
 362        snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
 363                 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
 364
 365        entryp++;
 366        memset(entryp->desc, 0, len + 1);
 367        snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
 368                 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
 369        entryp++;
 370
 371        /* Name SCSI vecs */
 372        for (i = k; i < cnt; i++, entryp++) {
 373                memset(entryp->desc, 0, len + 1);
 374                snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
 375                         CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
 376                         CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
 377        }
 378}
 379
 380int
 381csio_request_irqs(struct csio_hw *hw)
 382{
 383        int rv, i, j, k = 0;
 384        struct csio_msix_entries *entryp = &hw->msix_entries[0];
 385        struct csio_scsi_cpu_info *info;
 386
 387        if (hw->intr_mode != CSIO_IM_MSIX) {
 388                rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
 389                                        (hw->intr_mode == CSIO_IM_MSI) ?
 390                                                        0 : IRQF_SHARED,
 391                                        KBUILD_MODNAME, hw);
 392                if (rv) {
 393                        if (hw->intr_mode == CSIO_IM_MSI)
 394                                pci_disable_msi(hw->pdev);
 395                        csio_err(hw, "Failed to allocate interrupt line.\n");
 396                        return -EINVAL;
 397                }
 398
 399                goto out;
 400        }
 401
 402        /* Add the MSIX vector descriptions */
 403        csio_add_msix_desc(hw);
 404
 405        rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
 406                         entryp[k].desc, hw);
 407        if (rv) {
 408                csio_err(hw, "IRQ request failed for vec %d err:%d\n",
 409                         entryp[k].vector, rv);
 410                goto err;
 411        }
 412
 413        entryp[k++].dev_id = (void *)hw;
 414
 415        rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
 416                         entryp[k].desc, hw);
 417        if (rv) {
 418                csio_err(hw, "IRQ request failed for vec %d err:%d\n",
 419                         entryp[k].vector, rv);
 420                goto err;
 421        }
 422
 423        entryp[k++].dev_id = (void *)hw;
 424
 425        /* Allocate IRQs for SCSI */
 426        for (i = 0; i < hw->num_pports; i++) {
 427                info = &hw->scsi_cpu_info[i];
 428                for (j = 0; j < info->max_cpus; j++, k++) {
 429                        struct csio_scsi_qset *sqset = &hw->sqset[i][j];
 430                        struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
 431
 432                        rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
 433                                         entryp[k].desc, q);
 434                        if (rv) {
 435                                csio_err(hw,
 436                                       "IRQ request failed for vec %d err:%d\n",
 437                                       entryp[k].vector, rv);
 438                                goto err;
 439                        }
 440
 441                        entryp[k].dev_id = (void *)q;
 442
 443                } /* for all scsi cpus */
 444        } /* for all ports */
 445
 446out:
 447        hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
 448
 449        return 0;
 450
 451err:
 452        for (i = 0; i < k; i++) {
 453                entryp = &hw->msix_entries[i];
 454                free_irq(entryp->vector, entryp->dev_id);
 455        }
 456        pci_disable_msix(hw->pdev);
 457
 458        return -EINVAL;
 459}
 460
 461static void
 462csio_disable_msix(struct csio_hw *hw, bool free)
 463{
 464        int i;
 465        struct csio_msix_entries *entryp;
 466        int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
 467
 468        if (free) {
 469                for (i = 0; i < cnt; i++) {
 470                        entryp = &hw->msix_entries[i];
 471                        free_irq(entryp->vector, entryp->dev_id);
 472                }
 473        }
 474        pci_disable_msix(hw->pdev);
 475}
 476
 477/* Reduce per-port max possible CPUs */
 478static void
 479csio_reduce_sqsets(struct csio_hw *hw, int cnt)
 480{
 481        int i;
 482        struct csio_scsi_cpu_info *info;
 483
 484        while (cnt < hw->num_sqsets) {
 485                for (i = 0; i < hw->num_pports; i++) {
 486                        info = &hw->scsi_cpu_info[i];
 487                        if (info->max_cpus > 1) {
 488                                info->max_cpus--;
 489                                hw->num_sqsets--;
 490                                if (hw->num_sqsets <= cnt)
 491                                        break;
 492                        }
 493                }
 494        }
 495
 496        csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
 497}
 498
 499static int
 500csio_enable_msix(struct csio_hw *hw)
 501{
 502        int i, j, k, n, min, cnt;
 503        struct csio_msix_entries *entryp;
 504        struct msix_entry *entries;
 505        int extra = CSIO_EXTRA_VECS;
 506        struct csio_scsi_cpu_info *info;
 507
 508        min = hw->num_pports + extra;
 509        cnt = hw->num_sqsets + extra;
 510
 511        /* Max vectors required based on #niqs configured in fw */
 512        if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
 513                cnt = min_t(uint8_t, hw->cfg_niq, cnt);
 514
 515        entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
 516        if (!entries)
 517                return -ENOMEM;
 518
 519        for (i = 0; i < cnt; i++)
 520                entries[i].entry = (uint16_t)i;
 521
 522        csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
 523
 524        cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
 525        if (cnt < 0) {
 526                kfree(entries);
 527                return cnt;
 528        }
 529
 530        if (cnt < (hw->num_sqsets + extra)) {
 531                csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
 532                csio_reduce_sqsets(hw, cnt - extra);
 533        }
 534
 535        /* Save off vectors */
 536        for (i = 0; i < cnt; i++) {
 537                entryp = &hw->msix_entries[i];
 538                entryp->vector = entries[i].vector;
 539        }
 540
 541        /* Distribute vectors */
 542        k = 0;
 543        csio_set_nondata_intr_idx(hw, entries[k].entry);
 544        csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
 545        csio_set_fwevt_intr_idx(hw, entries[k++].entry);
 546
 547        for (i = 0; i < hw->num_pports; i++) {
 548                info = &hw->scsi_cpu_info[i];
 549
 550                for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
 551                        n = (j % info->max_cpus) +  k;
 552                        hw->sqset[i][j].intr_idx = entries[n].entry;
 553                }
 554
 555                k += info->max_cpus;
 556        }
 557
 558        kfree(entries);
 559        return 0;
 560}
 561
 562void
 563csio_intr_enable(struct csio_hw *hw)
 564{
 565        hw->intr_mode = CSIO_IM_NONE;
 566        hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
 567
 568        /* Try MSIX, then MSI or fall back to INTx */
 569        if ((csio_msi == 2) && !csio_enable_msix(hw))
 570                hw->intr_mode = CSIO_IM_MSIX;
 571        else {
 572                /* Max iqs required based on #niqs configured in fw */
 573                if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
 574                        !csio_is_hw_master(hw)) {
 575                        int extra = CSIO_EXTRA_MSI_IQS;
 576
 577                        if (hw->cfg_niq < (hw->num_sqsets + extra)) {
 578                                csio_dbg(hw, "Reducing sqsets to %d\n",
 579                                         hw->cfg_niq - extra);
 580                                csio_reduce_sqsets(hw, hw->cfg_niq - extra);
 581                        }
 582                }
 583
 584                if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
 585                        hw->intr_mode = CSIO_IM_MSI;
 586                else
 587                        hw->intr_mode = CSIO_IM_INTX;
 588        }
 589
 590        csio_dbg(hw, "Using %s interrupt mode.\n",
 591                (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
 592                ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
 593}
 594
 595void
 596csio_intr_disable(struct csio_hw *hw, bool free)
 597{
 598        csio_hw_intr_disable(hw);
 599
 600        switch (hw->intr_mode) {
 601        case CSIO_IM_MSIX:
 602                csio_disable_msix(hw, free);
 603                break;
 604        case CSIO_IM_MSI:
 605                if (free)
 606                        free_irq(hw->pdev->irq, hw);
 607                pci_disable_msi(hw->pdev);
 608                break;
 609        case CSIO_IM_INTX:
 610                if (free)
 611                        free_irq(hw->pdev->irq, hw);
 612                break;
 613        default:
 614                break;
 615        }
 616        hw->intr_mode = CSIO_IM_NONE;
 617        hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
 618}
 619