linux/drivers/scsi/mpt3sas/mpt3sas_base.c
<<
>>
Prefs
   1/*
   2 * This is the Fusion MPT base driver providing common API layer interface
   3 * for access to MPT (Message Passing Technology) firmware.
   4 *
   5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
   6 * Copyright (C) 2012-2014  LSI Corporation
   7 * Copyright (C) 2013-2014 Avago Technologies
   8 *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * as published by the Free Software Foundation; either version 2
  13 * of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * NO WARRANTY
  21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  25 * solely responsible for determining the appropriateness of using and
  26 * distributing the Program and assumes all risks associated with its
  27 * exercise of rights under this Agreement, including but not limited to
  28 * the risks and costs of program errors, damage to or loss of data,
  29 * programs or equipment, and unavailability or interruption of operations.
  30
  31 * DISCLAIMER OF LIABILITY
  32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  39
  40 * You should have received a copy of the GNU General Public License
  41 * along with this program; if not, write to the Free Software
  42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
  43 * USA.
  44 */
  45
  46#include <linux/kernel.h>
  47#include <linux/module.h>
  48#include <linux/errno.h>
  49#include <linux/init.h>
  50#include <linux/slab.h>
  51#include <linux/types.h>
  52#include <linux/pci.h>
  53#include <linux/kdev_t.h>
  54#include <linux/blkdev.h>
  55#include <linux/delay.h>
  56#include <linux/interrupt.h>
  57#include <linux/dma-mapping.h>
  58#include <linux/io.h>
  59#include <linux/time.h>
  60#include <linux/ktime.h>
  61#include <linux/kthread.h>
  62#include <asm/page.h>        /* To get host page size per arch */
  63#include <linux/aer.h>
  64
  65
  66#include "mpt3sas_base.h"
  67
  68static MPT_CALLBACK     mpt_callbacks[MPT_MAX_CALLBACKS];
  69
  70
  71#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
  72
  73 /* maximum controller queue depth */
  74#define MAX_HBA_QUEUE_DEPTH     30000
  75#define MAX_CHAIN_DEPTH         100000
  76static int max_queue_depth = -1;
  77module_param(max_queue_depth, int, 0444);
  78MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
  79
  80static int max_sgl_entries = -1;
  81module_param(max_sgl_entries, int, 0444);
  82MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
  83
  84static int msix_disable = -1;
  85module_param(msix_disable, int, 0444);
  86MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
  87
  88static int smp_affinity_enable = 1;
  89module_param(smp_affinity_enable, int, 0444);
  90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
  91
  92static int max_msix_vectors = -1;
  93module_param(max_msix_vectors, int, 0444);
  94MODULE_PARM_DESC(max_msix_vectors,
  95        " max msix vectors");
  96
  97static int irqpoll_weight = -1;
  98module_param(irqpoll_weight, int, 0444);
  99MODULE_PARM_DESC(irqpoll_weight,
 100        "irq poll weight (default= one fourth of HBA queue depth)");
 101
 102static int mpt3sas_fwfault_debug;
 103MODULE_PARM_DESC(mpt3sas_fwfault_debug,
 104        " enable detection of firmware fault and halt firmware - (default=0)");
 105
 106static int perf_mode = -1;
 107module_param(perf_mode, int, 0444);
 108MODULE_PARM_DESC(perf_mode,
 109        "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
 110        "0 - balanced: high iops mode is enabled &\n\t\t"
 111        "interrupt coalescing is enabled only on high iops queues,\n\t\t"
 112        "1 - iops: high iops mode is disabled &\n\t\t"
 113        "interrupt coalescing is enabled on all queues,\n\t\t"
 114        "2 - latency: high iops mode is disabled &\n\t\t"
 115        "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
 116        "\t\tdefault - default perf_mode is 'balanced'"
 117        );
 118
 119enum mpt3sas_perf_mode {
 120        MPT_PERF_MODE_DEFAULT   = -1,
 121        MPT_PERF_MODE_BALANCED  = 0,
 122        MPT_PERF_MODE_IOPS      = 1,
 123        MPT_PERF_MODE_LATENCY   = 2,
 124};
 125
 126static int
 127_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
 128                u32 ioc_state, int timeout);
 129static int
 130_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
 131static void
 132_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
 133
 134/**
 135 * mpt3sas_base_check_cmd_timeout - Function
 136 *              to check timeout and command termination due
 137 *              to Host reset.
 138 *
 139 * @ioc:        per adapter object.
 140 * @status:     Status of issued command.
 141 * @mpi_request:mf request pointer.
 142 * @sz:         size of buffer.
 143 *
 144 * @Returns - 1/0 Reset to be done or Not
 145 */
 146u8
 147mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
 148                u8 status, void *mpi_request, int sz)
 149{
 150        u8 issue_reset = 0;
 151
 152        if (!(status & MPT3_CMD_RESET))
 153                issue_reset = 1;
 154
 155        ioc_err(ioc, "Command %s\n",
 156                issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
 157        _debug_dump_mf(mpi_request, sz);
 158
 159        return issue_reset;
 160}
 161
 162/**
 163 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
 164 * @val: ?
 165 * @kp: ?
 166 *
 167 * Return: ?
 168 */
 169static int
 170_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
 171{
 172        int ret = param_set_int(val, kp);
 173        struct MPT3SAS_ADAPTER *ioc;
 174
 175        if (ret)
 176                return ret;
 177
 178        /* global ioc spinlock to protect controller list on list operations */
 179        pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
 180        spin_lock(&gioc_lock);
 181        list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
 182                ioc->fwfault_debug = mpt3sas_fwfault_debug;
 183        spin_unlock(&gioc_lock);
 184        return 0;
 185}
 186module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
 187        param_get_int, &mpt3sas_fwfault_debug, 0644);
 188
 189/**
 190 * _base_readl_aero - retry readl for max three times.
 191 * @addr: MPT Fusion system interface register address
 192 *
 193 * Retry the readl() for max three times if it gets zero value
 194 * while reading the system interface register.
 195 */
 196static inline u32
 197_base_readl_aero(const volatile void __iomem *addr)
 198{
 199        u32 i = 0, ret_val;
 200
 201        do {
 202                ret_val = readl(addr);
 203                i++;
 204        } while (ret_val == 0 && i < 3);
 205
 206        return ret_val;
 207}
 208
 209static inline u32
 210_base_readl(const volatile void __iomem *addr)
 211{
 212        return readl(addr);
 213}
 214
 215/**
 216 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
 217 *                                in BAR0 space.
 218 *
 219 * @ioc: per adapter object
 220 * @reply: reply message frame(lower 32bit addr)
 221 * @index: System request message index.
 222 */
 223static void
 224_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
 225                u32 index)
 226{
 227        /*
 228         * 256 is offset within sys register.
 229         * 256 offset MPI frame starts. Max MPI frame supported is 32.
 230         * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
 231         */
 232        u16 cmd_credit = ioc->facts.RequestCredit + 1;
 233        void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
 234                        MPI_FRAME_START_OFFSET +
 235                        (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
 236
 237        writel(reply, reply_free_iomem);
 238}
 239
 240/**
 241 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
 242 *                              to system/BAR0 region.
 243 *
 244 * @dst_iomem: Pointer to the destination location in BAR0 space.
 245 * @src: Pointer to the Source data.
 246 * @size: Size of data to be copied.
 247 */
 248static void
 249_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
 250{
 251        int i;
 252        u32 *src_virt_mem = (u32 *)src;
 253
 254        for (i = 0; i < size/4; i++)
 255                writel((u32)src_virt_mem[i],
 256                                (void __iomem *)dst_iomem + (i * 4));
 257}
 258
 259/**
 260 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
 261 *
 262 * @dst_iomem: Pointer to the destination location in BAR0 space.
 263 * @src: Pointer to the Source data.
 264 * @size: Size of data to be copied.
 265 */
 266static void
 267_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
 268{
 269        int i;
 270        u32 *src_virt_mem = (u32 *)(src);
 271
 272        for (i = 0; i < size/4; i++)
 273                writel((u32)src_virt_mem[i],
 274                        (void __iomem *)dst_iomem + (i * 4));
 275}
 276
 277/**
 278 * _base_get_chain - Calculates and Returns virtual chain address
 279 *                       for the provided smid in BAR0 space.
 280 *
 281 * @ioc: per adapter object
 282 * @smid: system request message index
 283 * @sge_chain_count: Scatter gather chain count.
 284 *
 285 * Return: the chain address.
 286 */
 287static inline void __iomem*
 288_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 289                u8 sge_chain_count)
 290{
 291        void __iomem *base_chain, *chain_virt;
 292        u16 cmd_credit = ioc->facts.RequestCredit + 1;
 293
 294        base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
 295                (cmd_credit * ioc->request_sz) +
 296                REPLY_FREE_POOL_SIZE;
 297        chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
 298                        ioc->request_sz) + (sge_chain_count * ioc->request_sz);
 299        return chain_virt;
 300}
 301
 302/**
 303 * _base_get_chain_phys - Calculates and Returns physical address
 304 *                      in BAR0 for scatter gather chains, for
 305 *                      the provided smid.
 306 *
 307 * @ioc: per adapter object
 308 * @smid: system request message index
 309 * @sge_chain_count: Scatter gather chain count.
 310 *
 311 * Return: Physical chain address.
 312 */
 313static inline phys_addr_t
 314_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 315                u8 sge_chain_count)
 316{
 317        phys_addr_t base_chain_phys, chain_phys;
 318        u16 cmd_credit = ioc->facts.RequestCredit + 1;
 319
 320        base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
 321                (cmd_credit * ioc->request_sz) +
 322                REPLY_FREE_POOL_SIZE;
 323        chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
 324                        ioc->request_sz) + (sge_chain_count * ioc->request_sz);
 325        return chain_phys;
 326}
 327
 328/**
 329 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
 330 *                      buffer address for the provided smid.
 331 *                      (Each smid can have 64K starts from 17024)
 332 *
 333 * @ioc: per adapter object
 334 * @smid: system request message index
 335 *
 336 * Return: Pointer to buffer location in BAR0.
 337 */
 338
 339static void __iomem *
 340_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 341{
 342        u16 cmd_credit = ioc->facts.RequestCredit + 1;
 343        // Added extra 1 to reach end of chain.
 344        void __iomem *chain_end = _base_get_chain(ioc,
 345                        cmd_credit + 1,
 346                        ioc->facts.MaxChainDepth);
 347        return chain_end + (smid * 64 * 1024);
 348}
 349
 350/**
 351 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
 352 *              Host buffer Physical address for the provided smid.
 353 *              (Each smid can have 64K starts from 17024)
 354 *
 355 * @ioc: per adapter object
 356 * @smid: system request message index
 357 *
 358 * Return: Pointer to buffer location in BAR0.
 359 */
 360static phys_addr_t
 361_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 362{
 363        u16 cmd_credit = ioc->facts.RequestCredit + 1;
 364        phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
 365                        cmd_credit + 1,
 366                        ioc->facts.MaxChainDepth);
 367        return chain_end_phys + (smid * 64 * 1024);
 368}
 369
 370/**
 371 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
 372 *                      lookup list and Provides chain_buffer
 373 *                      address for the matching dma address.
 374 *                      (Each smid can have 64K starts from 17024)
 375 *
 376 * @ioc: per adapter object
 377 * @chain_buffer_dma: Chain buffer dma address.
 378 *
 379 * Return: Pointer to chain buffer. Or Null on Failure.
 380 */
 381static void *
 382_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
 383                dma_addr_t chain_buffer_dma)
 384{
 385        u16 index, j;
 386        struct chain_tracker *ct;
 387
 388        for (index = 0; index < ioc->scsiio_depth; index++) {
 389                for (j = 0; j < ioc->chains_needed_per_io; j++) {
 390                        ct = &ioc->chain_lookup[index].chains_per_smid[j];
 391                        if (ct && ct->chain_buffer_dma == chain_buffer_dma)
 392                                return ct->chain_buffer;
 393                }
 394        }
 395        ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
 396        return NULL;
 397}
 398
 399/**
 400 * _clone_sg_entries -  MPI EP's scsiio and config requests
 401 *                      are handled here. Base function for
 402 *                      double buffering, before submitting
 403 *                      the requests.
 404 *
 405 * @ioc: per adapter object.
 406 * @mpi_request: mf request pointer.
 407 * @smid: system request message index.
 408 */
 409static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
 410                void *mpi_request, u16 smid)
 411{
 412        Mpi2SGESimple32_t *sgel, *sgel_next;
 413        u32  sgl_flags, sge_chain_count = 0;
 414        bool is_write = false;
 415        u16 i = 0;
 416        void __iomem *buffer_iomem;
 417        phys_addr_t buffer_iomem_phys;
 418        void __iomem *buff_ptr;
 419        phys_addr_t buff_ptr_phys;
 420        void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
 421        void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
 422        phys_addr_t dst_addr_phys;
 423        MPI2RequestHeader_t *request_hdr;
 424        struct scsi_cmnd *scmd;
 425        struct scatterlist *sg_scmd = NULL;
 426        int is_scsiio_req = 0;
 427
 428        request_hdr = (MPI2RequestHeader_t *) mpi_request;
 429
 430        if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
 431                Mpi25SCSIIORequest_t *scsiio_request =
 432                        (Mpi25SCSIIORequest_t *)mpi_request;
 433                sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
 434                is_scsiio_req = 1;
 435        } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
 436                Mpi2ConfigRequest_t  *config_req =
 437                        (Mpi2ConfigRequest_t *)mpi_request;
 438                sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
 439        } else
 440                return;
 441
 442        /* From smid we can get scsi_cmd, once we have sg_scmd,
 443         * we just need to get sg_virt and sg_next to get virual
 444         * address associated with sgel->Address.
 445         */
 446
 447        if (is_scsiio_req) {
 448                /* Get scsi_cmd using smid */
 449                scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
 450                if (scmd == NULL) {
 451                        ioc_err(ioc, "scmd is NULL\n");
 452                        return;
 453                }
 454
 455                /* Get sg_scmd from scmd provided */
 456                sg_scmd = scsi_sglist(scmd);
 457        }
 458
 459        /*
 460         * 0 - 255      System register
 461         * 256 - 4352   MPI Frame. (This is based on maxCredit 32)
 462         * 4352 - 4864  Reply_free pool (512 byte is reserved
 463         *              considering maxCredit 32. Reply need extra
 464         *              room, for mCPU case kept four times of
 465         *              maxCredit).
 466         * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
 467         *              128 byte size = 12288)
 468         * 17152 - x    Host buffer mapped with smid.
 469         *              (Each smid can have 64K Max IO.)
 470         * BAR0+Last 1K MSIX Addr and Data
 471         * Total size in use 2113664 bytes of 4MB BAR0
 472         */
 473
 474        buffer_iomem = _base_get_buffer_bar0(ioc, smid);
 475        buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
 476
 477        buff_ptr = buffer_iomem;
 478        buff_ptr_phys = buffer_iomem_phys;
 479        WARN_ON(buff_ptr_phys > U32_MAX);
 480
 481        if (le32_to_cpu(sgel->FlagsLength) &
 482                        (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
 483                is_write = true;
 484
 485        for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
 486
 487                sgl_flags =
 488                    (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
 489
 490                switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
 491                case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
 492                        /*
 493                         * Helper function which on passing
 494                         * chain_buffer_dma returns chain_buffer. Get
 495                         * the virtual address for sgel->Address
 496                         */
 497                        sgel_next =
 498                                _base_get_chain_buffer_dma_to_chain_buffer(ioc,
 499                                                le32_to_cpu(sgel->Address));
 500                        if (sgel_next == NULL)
 501                                return;
 502                        /*
 503                         * This is coping 128 byte chain
 504                         * frame (not a host buffer)
 505                         */
 506                        dst_chain_addr[sge_chain_count] =
 507                                _base_get_chain(ioc,
 508                                        smid, sge_chain_count);
 509                        src_chain_addr[sge_chain_count] =
 510                                                (void *) sgel_next;
 511                        dst_addr_phys = _base_get_chain_phys(ioc,
 512                                                smid, sge_chain_count);
 513                        WARN_ON(dst_addr_phys > U32_MAX);
 514                        sgel->Address =
 515                                cpu_to_le32(lower_32_bits(dst_addr_phys));
 516                        sgel = sgel_next;
 517                        sge_chain_count++;
 518                        break;
 519                case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
 520                        if (is_write) {
 521                                if (is_scsiio_req) {
 522                                        _base_clone_to_sys_mem(buff_ptr,
 523                                            sg_virt(sg_scmd),
 524                                            (le32_to_cpu(sgel->FlagsLength) &
 525                                            0x00ffffff));
 526                                        /*
 527                                         * FIXME: this relies on a a zero
 528                                         * PCI mem_offset.
 529                                         */
 530                                        sgel->Address =
 531                                            cpu_to_le32((u32)buff_ptr_phys);
 532                                } else {
 533                                        _base_clone_to_sys_mem(buff_ptr,
 534                                            ioc->config_vaddr,
 535                                            (le32_to_cpu(sgel->FlagsLength) &
 536                                            0x00ffffff));
 537                                        sgel->Address =
 538                                            cpu_to_le32((u32)buff_ptr_phys);
 539                                }
 540                        }
 541                        buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
 542                            0x00ffffff);
 543                        buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
 544                            0x00ffffff);
 545                        if ((le32_to_cpu(sgel->FlagsLength) &
 546                            (MPI2_SGE_FLAGS_END_OF_BUFFER
 547                                        << MPI2_SGE_FLAGS_SHIFT)))
 548                                goto eob_clone_chain;
 549                        else {
 550                                /*
 551                                 * Every single element in MPT will have
 552                                 * associated sg_next. Better to sanity that
 553                                 * sg_next is not NULL, but it will be a bug
 554                                 * if it is null.
 555                                 */
 556                                if (is_scsiio_req) {
 557                                        sg_scmd = sg_next(sg_scmd);
 558                                        if (sg_scmd)
 559                                                sgel++;
 560                                        else
 561                                                goto eob_clone_chain;
 562                                }
 563                        }
 564                        break;
 565                }
 566        }
 567
 568eob_clone_chain:
 569        for (i = 0; i < sge_chain_count; i++) {
 570                if (is_scsiio_req)
 571                        _base_clone_to_sys_mem(dst_chain_addr[i],
 572                                src_chain_addr[i], ioc->request_sz);
 573        }
 574}
 575
 576/**
 577 *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
 578 * @arg: input argument, used to derive ioc
 579 *
 580 * Return:
 581 * 0 if controller is removed from pci subsystem.
 582 * -1 for other case.
 583 */
 584static int mpt3sas_remove_dead_ioc_func(void *arg)
 585{
 586        struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
 587        struct pci_dev *pdev;
 588
 589        if (!ioc)
 590                return -1;
 591
 592        pdev = ioc->pdev;
 593        if (!pdev)
 594                return -1;
 595        pci_stop_and_remove_bus_device_locked(pdev);
 596        return 0;
 597}
 598
 599/**
 600 * _base_fault_reset_work - workq handling ioc fault conditions
 601 * @work: input argument, used to derive ioc
 602 *
 603 * Context: sleep.
 604 */
 605static void
 606_base_fault_reset_work(struct work_struct *work)
 607{
 608        struct MPT3SAS_ADAPTER *ioc =
 609            container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
 610        unsigned long    flags;
 611        u32 doorbell;
 612        int rc;
 613        struct task_struct *p;
 614
 615
 616        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 617        if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
 618                        ioc->pci_error_recovery)
 619                goto rearm_timer;
 620        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 621
 622        doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 623        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
 624                ioc_err(ioc, "SAS host is non-operational !!!!\n");
 625
 626                /* It may be possible that EEH recovery can resolve some of
 627                 * pci bus failure issues rather removing the dead ioc function
 628                 * by considering controller is in a non-operational state. So
 629                 * here priority is given to the EEH recovery. If it doesn't
 630                 * not resolve this issue, mpt3sas driver will consider this
 631                 * controller to non-operational state and remove the dead ioc
 632                 * function.
 633                 */
 634                if (ioc->non_operational_loop++ < 5) {
 635                        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
 636                                                         flags);
 637                        goto rearm_timer;
 638                }
 639
 640                /*
 641                 * Call _scsih_flush_pending_cmds callback so that we flush all
 642                 * pending commands back to OS. This call is required to aovid
 643                 * deadlock at block layer. Dead IOC will fail to do diag reset,
 644                 * and this call is safe since dead ioc will never return any
 645                 * command back from HW.
 646                 */
 647                ioc->schedule_dead_ioc_flush_running_cmds(ioc);
 648                /*
 649                 * Set remove_host flag early since kernel thread will
 650                 * take some time to execute.
 651                 */
 652                ioc->remove_host = 1;
 653                /*Remove the Dead Host */
 654                p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
 655                    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
 656                if (IS_ERR(p))
 657                        ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
 658                                __func__);
 659                else
 660                        ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
 661                                __func__);
 662                return; /* don't rearm timer */
 663        }
 664
 665        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
 666                u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
 667                    ioc->manu_pg11.CoreDumpTOSec :
 668                    MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
 669
 670                timeout /= (FAULT_POLLING_INTERVAL/1000);
 671
 672                if (ioc->ioc_coredump_loop == 0) {
 673                        mpt3sas_print_coredump_info(ioc,
 674                            doorbell & MPI2_DOORBELL_DATA_MASK);
 675                        /* do not accept any IOs and disable the interrupts */
 676                        spin_lock_irqsave(
 677                            &ioc->ioc_reset_in_progress_lock, flags);
 678                        ioc->shost_recovery = 1;
 679                        spin_unlock_irqrestore(
 680                            &ioc->ioc_reset_in_progress_lock, flags);
 681                        mpt3sas_base_mask_interrupts(ioc);
 682                        _base_clear_outstanding_commands(ioc);
 683                }
 684
 685                ioc_info(ioc, "%s: CoreDump loop %d.",
 686                    __func__, ioc->ioc_coredump_loop);
 687
 688                /* Wait until CoreDump completes or times out */
 689                if (ioc->ioc_coredump_loop++ < timeout) {
 690                        spin_lock_irqsave(
 691                            &ioc->ioc_reset_in_progress_lock, flags);
 692                        goto rearm_timer;
 693                }
 694        }
 695
 696        if (ioc->ioc_coredump_loop) {
 697                if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
 698                        ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
 699                            __func__, ioc->ioc_coredump_loop);
 700                else
 701                        ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
 702                            __func__, ioc->ioc_coredump_loop);
 703                ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
 704        }
 705        ioc->non_operational_loop = 0;
 706        if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
 707                rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 708                ioc_warn(ioc, "%s: hard reset: %s\n",
 709                         __func__, rc == 0 ? "success" : "failed");
 710                doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 711                if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 712                        mpt3sas_print_fault_code(ioc, doorbell &
 713                            MPI2_DOORBELL_DATA_MASK);
 714                } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
 715                    MPI2_IOC_STATE_COREDUMP)
 716                        mpt3sas_print_coredump_info(ioc, doorbell &
 717                            MPI2_DOORBELL_DATA_MASK);
 718                if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
 719                    MPI2_IOC_STATE_OPERATIONAL)
 720                        return; /* don't rearm timer */
 721        }
 722        ioc->ioc_coredump_loop = 0;
 723
 724        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 725 rearm_timer:
 726        if (ioc->fault_reset_work_q)
 727                queue_delayed_work(ioc->fault_reset_work_q,
 728                    &ioc->fault_reset_work,
 729                    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
 730        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 731}
 732
 733/**
 734 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
 735 * @ioc: per adapter object
 736 *
 737 * Context: sleep.
 738 */
 739void
 740mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
 741{
 742        unsigned long    flags;
 743
 744        if (ioc->fault_reset_work_q)
 745                return;
 746
 747        /* initialize fault polling */
 748
 749        INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
 750        snprintf(ioc->fault_reset_work_q_name,
 751            sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
 752            ioc->driver_name, ioc->id);
 753        ioc->fault_reset_work_q =
 754                create_singlethread_workqueue(ioc->fault_reset_work_q_name);
 755        if (!ioc->fault_reset_work_q) {
 756                ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
 757                return;
 758        }
 759        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 760        if (ioc->fault_reset_work_q)
 761                queue_delayed_work(ioc->fault_reset_work_q,
 762                    &ioc->fault_reset_work,
 763                    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
 764        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 765}
 766
 767/**
 768 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
 769 * @ioc: per adapter object
 770 *
 771 * Context: sleep.
 772 */
 773void
 774mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
 775{
 776        unsigned long flags;
 777        struct workqueue_struct *wq;
 778
 779        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 780        wq = ioc->fault_reset_work_q;
 781        ioc->fault_reset_work_q = NULL;
 782        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 783        if (wq) {
 784                if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
 785                        flush_workqueue(wq);
 786                destroy_workqueue(wq);
 787        }
 788}
 789
 790/**
 791 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
 792 * @ioc: per adapter object
 793 * @fault_code: fault code
 794 */
 795void
 796mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
 797{
 798        ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
 799}
 800
 801/**
 802 * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
 803 * @ioc: per adapter object
 804 * @fault_code: fault code
 805 *
 806 * Return nothing.
 807 */
 808void
 809mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
 810{
 811        ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
 812}
 813
 814/**
 815 * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
 816 * completes or times out
 817 * @ioc: per adapter object
 818 * @caller: caller function name
 819 *
 820 * Returns 0 for success, non-zero for failure.
 821 */
 822int
 823mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
 824                const char *caller)
 825{
 826        u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
 827                        ioc->manu_pg11.CoreDumpTOSec :
 828                        MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
 829
 830        int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
 831                                        timeout);
 832
 833        if (ioc_state)
 834                ioc_err(ioc,
 835                    "%s: CoreDump timed out. (ioc_state=0x%x)\n",
 836                    caller, ioc_state);
 837        else
 838                ioc_info(ioc,
 839                    "%s: CoreDump completed. (ioc_state=0x%x)\n",
 840                    caller, ioc_state);
 841
 842        return ioc_state;
 843}
 844
 845/**
 846 * mpt3sas_halt_firmware - halt's mpt controller firmware
 847 * @ioc: per adapter object
 848 *
 849 * For debugging timeout related issues.  Writing 0xCOFFEE00
 850 * to the doorbell register will halt controller firmware. With
 851 * the purpose to stop both driver and firmware, the enduser can
 852 * obtain a ring buffer from controller UART.
 853 */
 854void
 855mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
 856{
 857        u32 doorbell;
 858
 859        if (!ioc->fwfault_debug)
 860                return;
 861
 862        dump_stack();
 863
 864        doorbell = ioc->base_readl(&ioc->chip->Doorbell);
 865        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 866                mpt3sas_print_fault_code(ioc, doorbell &
 867                    MPI2_DOORBELL_DATA_MASK);
 868        } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
 869            MPI2_IOC_STATE_COREDUMP) {
 870                mpt3sas_print_coredump_info(ioc, doorbell &
 871                    MPI2_DOORBELL_DATA_MASK);
 872        } else {
 873                writel(0xC0FFEE00, &ioc->chip->Doorbell);
 874                ioc_err(ioc, "Firmware is halted due to command timeout\n");
 875        }
 876
 877        if (ioc->fwfault_debug == 2)
 878                for (;;)
 879                        ;
 880        else
 881                panic("panic in %s\n", __func__);
 882}
 883
 884/**
 885 * _base_sas_ioc_info - verbose translation of the ioc status
 886 * @ioc: per adapter object
 887 * @mpi_reply: reply mf payload returned from firmware
 888 * @request_hdr: request mf
 889 */
 890static void
 891_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
 892        MPI2RequestHeader_t *request_hdr)
 893{
 894        u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
 895            MPI2_IOCSTATUS_MASK;
 896        char *desc = NULL;
 897        u16 frame_sz;
 898        char *func_str = NULL;
 899
 900        /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
 901        if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 902            request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
 903            request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
 904                return;
 905
 906        if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 907                return;
 908
 909        switch (ioc_status) {
 910
 911/****************************************************************************
 912*  Common IOCStatus values for all replies
 913****************************************************************************/
 914
 915        case MPI2_IOCSTATUS_INVALID_FUNCTION:
 916                desc = "invalid function";
 917                break;
 918        case MPI2_IOCSTATUS_BUSY:
 919                desc = "busy";
 920                break;
 921        case MPI2_IOCSTATUS_INVALID_SGL:
 922                desc = "invalid sgl";
 923                break;
 924        case MPI2_IOCSTATUS_INTERNAL_ERROR:
 925                desc = "internal error";
 926                break;
 927        case MPI2_IOCSTATUS_INVALID_VPID:
 928                desc = "invalid vpid";
 929                break;
 930        case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
 931                desc = "insufficient resources";
 932                break;
 933        case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
 934                desc = "insufficient power";
 935                break;
 936        case MPI2_IOCSTATUS_INVALID_FIELD:
 937                desc = "invalid field";
 938                break;
 939        case MPI2_IOCSTATUS_INVALID_STATE:
 940                desc = "invalid state";
 941                break;
 942        case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
 943                desc = "op state not supported";
 944                break;
 945
 946/****************************************************************************
 947*  Config IOCStatus values
 948****************************************************************************/
 949
 950        case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
 951                desc = "config invalid action";
 952                break;
 953        case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
 954                desc = "config invalid type";
 955                break;
 956        case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
 957                desc = "config invalid page";
 958                break;
 959        case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
 960                desc = "config invalid data";
 961                break;
 962        case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
 963                desc = "config no defaults";
 964                break;
 965        case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
 966                desc = "config cant commit";
 967                break;
 968
 969/****************************************************************************
 970*  SCSI IO Reply
 971****************************************************************************/
 972
 973        case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 974        case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 975        case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 976        case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 977        case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 978        case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 979        case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 980        case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 981        case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 982        case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 983        case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 984        case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 985                break;
 986
 987/****************************************************************************
 988*  For use by SCSI Initiator and SCSI Target end-to-end data protection
 989****************************************************************************/
 990
 991        case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
 992                desc = "eedp guard error";
 993                break;
 994        case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
 995                desc = "eedp ref tag error";
 996                break;
 997        case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
 998                desc = "eedp app tag error";
 999                break;
1000
1001/****************************************************************************
1002*  SCSI Target values
1003****************************************************************************/
1004
1005        case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1006                desc = "target invalid io index";
1007                break;
1008        case MPI2_IOCSTATUS_TARGET_ABORTED:
1009                desc = "target aborted";
1010                break;
1011        case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1012                desc = "target no conn retryable";
1013                break;
1014        case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1015                desc = "target no connection";
1016                break;
1017        case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1018                desc = "target xfer count mismatch";
1019                break;
1020        case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1021                desc = "target data offset error";
1022                break;
1023        case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1024                desc = "target too much write data";
1025                break;
1026        case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1027                desc = "target iu too short";
1028                break;
1029        case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1030                desc = "target ack nak timeout";
1031                break;
1032        case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1033                desc = "target nak received";
1034                break;
1035
1036/****************************************************************************
1037*  Serial Attached SCSI values
1038****************************************************************************/
1039
1040        case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1041                desc = "smp request failed";
1042                break;
1043        case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1044                desc = "smp data overrun";
1045                break;
1046
1047/****************************************************************************
1048*  Diagnostic Buffer Post / Diagnostic Release values
1049****************************************************************************/
1050
1051        case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1052                desc = "diagnostic released";
1053                break;
1054        default:
1055                break;
1056        }
1057
1058        if (!desc)
1059                return;
1060
1061        switch (request_hdr->Function) {
1062        case MPI2_FUNCTION_CONFIG:
1063                frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1064                func_str = "config_page";
1065                break;
1066        case MPI2_FUNCTION_SCSI_TASK_MGMT:
1067                frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1068                func_str = "task_mgmt";
1069                break;
1070        case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1071                frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1072                func_str = "sas_iounit_ctl";
1073                break;
1074        case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1075                frame_sz = sizeof(Mpi2SepRequest_t);
1076                func_str = "enclosure";
1077                break;
1078        case MPI2_FUNCTION_IOC_INIT:
1079                frame_sz = sizeof(Mpi2IOCInitRequest_t);
1080                func_str = "ioc_init";
1081                break;
1082        case MPI2_FUNCTION_PORT_ENABLE:
1083                frame_sz = sizeof(Mpi2PortEnableRequest_t);
1084                func_str = "port_enable";
1085                break;
1086        case MPI2_FUNCTION_SMP_PASSTHROUGH:
1087                frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1088                func_str = "smp_passthru";
1089                break;
1090        case MPI2_FUNCTION_NVME_ENCAPSULATED:
1091                frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1092                    ioc->sge_size;
1093                func_str = "nvme_encapsulated";
1094                break;
1095        default:
1096                frame_sz = 32;
1097                func_str = "unknown";
1098                break;
1099        }
1100
1101        ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1102                 desc, ioc_status, request_hdr, func_str);
1103
1104        _debug_dump_mf(request_hdr, frame_sz/4);
1105}
1106
1107/**
1108 * _base_display_event_data - verbose translation of firmware asyn events
1109 * @ioc: per adapter object
1110 * @mpi_reply: reply mf payload returned from firmware
1111 */
1112static void
1113_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1114        Mpi2EventNotificationReply_t *mpi_reply)
1115{
1116        char *desc = NULL;
1117        u16 event;
1118
1119        if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1120                return;
1121
1122        event = le16_to_cpu(mpi_reply->Event);
1123
1124        switch (event) {
1125        case MPI2_EVENT_LOG_DATA:
1126                desc = "Log Data";
1127                break;
1128        case MPI2_EVENT_STATE_CHANGE:
1129                desc = "Status Change";
1130                break;
1131        case MPI2_EVENT_HARD_RESET_RECEIVED:
1132                desc = "Hard Reset Received";
1133                break;
1134        case MPI2_EVENT_EVENT_CHANGE:
1135                desc = "Event Change";
1136                break;
1137        case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1138                desc = "Device Status Change";
1139                break;
1140        case MPI2_EVENT_IR_OPERATION_STATUS:
1141                if (!ioc->hide_ir_msg)
1142                        desc = "IR Operation Status";
1143                break;
1144        case MPI2_EVENT_SAS_DISCOVERY:
1145        {
1146                Mpi2EventDataSasDiscovery_t *event_data =
1147                    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1148                ioc_info(ioc, "Discovery: (%s)",
1149                         event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1150                         "start" : "stop");
1151                if (event_data->DiscoveryStatus)
1152                        pr_cont(" discovery_status(0x%08x)",
1153                            le32_to_cpu(event_data->DiscoveryStatus));
1154                pr_cont("\n");
1155                return;
1156        }
1157        case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1158                desc = "SAS Broadcast Primitive";
1159                break;
1160        case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1161                desc = "SAS Init Device Status Change";
1162                break;
1163        case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1164                desc = "SAS Init Table Overflow";
1165                break;
1166        case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1167                desc = "SAS Topology Change List";
1168                break;
1169        case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1170                desc = "SAS Enclosure Device Status Change";
1171                break;
1172        case MPI2_EVENT_IR_VOLUME:
1173                if (!ioc->hide_ir_msg)
1174                        desc = "IR Volume";
1175                break;
1176        case MPI2_EVENT_IR_PHYSICAL_DISK:
1177                if (!ioc->hide_ir_msg)
1178                        desc = "IR Physical Disk";
1179                break;
1180        case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1181                if (!ioc->hide_ir_msg)
1182                        desc = "IR Configuration Change List";
1183                break;
1184        case MPI2_EVENT_LOG_ENTRY_ADDED:
1185                if (!ioc->hide_ir_msg)
1186                        desc = "Log Entry Added";
1187                break;
1188        case MPI2_EVENT_TEMP_THRESHOLD:
1189                desc = "Temperature Threshold";
1190                break;
1191        case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1192                desc = "Cable Event";
1193                break;
1194        case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1195                desc = "SAS Device Discovery Error";
1196                break;
1197        case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1198                desc = "PCIE Device Status Change";
1199                break;
1200        case MPI2_EVENT_PCIE_ENUMERATION:
1201        {
1202                Mpi26EventDataPCIeEnumeration_t *event_data =
1203                        (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1204                ioc_info(ioc, "PCIE Enumeration: (%s)",
1205                         event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1206                         "start" : "stop");
1207                if (event_data->EnumerationStatus)
1208                        pr_cont("enumeration_status(0x%08x)",
1209                                le32_to_cpu(event_data->EnumerationStatus));
1210                pr_cont("\n");
1211                return;
1212        }
1213        case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1214                desc = "PCIE Topology Change List";
1215                break;
1216        }
1217
1218        if (!desc)
1219                return;
1220
1221        ioc_info(ioc, "%s\n", desc);
1222}
1223
1224/**
1225 * _base_sas_log_info - verbose translation of firmware log info
1226 * @ioc: per adapter object
1227 * @log_info: log info
1228 */
1229static void
1230_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1231{
1232        union loginfo_type {
1233                u32     loginfo;
1234                struct {
1235                        u32     subcode:16;
1236                        u32     code:8;
1237                        u32     originator:4;
1238                        u32     bus_type:4;
1239                } dw;
1240        };
1241        union loginfo_type sas_loginfo;
1242        char *originator_str = NULL;
1243
1244        sas_loginfo.loginfo = log_info;
1245        if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1246                return;
1247
1248        /* each nexus loss loginfo */
1249        if (log_info == 0x31170000)
1250                return;
1251
1252        /* eat the loginfos associated with task aborts */
1253        if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1254            0x31140000 || log_info == 0x31130000))
1255                return;
1256
1257        switch (sas_loginfo.dw.originator) {
1258        case 0:
1259                originator_str = "IOP";
1260                break;
1261        case 1:
1262                originator_str = "PL";
1263                break;
1264        case 2:
1265                if (!ioc->hide_ir_msg)
1266                        originator_str = "IR";
1267                else
1268                        originator_str = "WarpDrive";
1269                break;
1270        }
1271
1272        ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1273                 log_info,
1274                 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1275}
1276
1277/**
1278 * _base_display_reply_info -
1279 * @ioc: per adapter object
1280 * @smid: system request message index
1281 * @msix_index: MSIX table index supplied by the OS
1282 * @reply: reply message frame(lower 32bit addr)
1283 */
1284static void
1285_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1286        u32 reply)
1287{
1288        MPI2DefaultReply_t *mpi_reply;
1289        u16 ioc_status;
1290        u32 loginfo = 0;
1291
1292        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1293        if (unlikely(!mpi_reply)) {
1294                ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1295                        __FILE__, __LINE__, __func__);
1296                return;
1297        }
1298        ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1299
1300        if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1301            (ioc->logging_level & MPT_DEBUG_REPLY)) {
1302                _base_sas_ioc_info(ioc , mpi_reply,
1303                   mpt3sas_base_get_msg_frame(ioc, smid));
1304        }
1305
1306        if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1307                loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1308                _base_sas_log_info(ioc, loginfo);
1309        }
1310
1311        if (ioc_status || loginfo) {
1312                ioc_status &= MPI2_IOCSTATUS_MASK;
1313                mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1314        }
1315}
1316
1317/**
1318 * mpt3sas_base_done - base internal command completion routine
1319 * @ioc: per adapter object
1320 * @smid: system request message index
1321 * @msix_index: MSIX table index supplied by the OS
1322 * @reply: reply message frame(lower 32bit addr)
1323 *
1324 * Return:
1325 * 1 meaning mf should be freed from _base_interrupt
1326 * 0 means the mf is freed from this function.
1327 */
1328u8
1329mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1330        u32 reply)
1331{
1332        MPI2DefaultReply_t *mpi_reply;
1333
1334        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1335        if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1336                return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1337
1338        if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1339                return 1;
1340
1341        ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1342        if (mpi_reply) {
1343                ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1344                memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1345        }
1346        ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1347
1348        complete(&ioc->base_cmds.done);
1349        return 1;
1350}
1351
1352/**
1353 * _base_async_event - main callback handler for firmware asyn events
1354 * @ioc: per adapter object
1355 * @msix_index: MSIX table index supplied by the OS
1356 * @reply: reply message frame(lower 32bit addr)
1357 *
1358 * Return:
1359 * 1 meaning mf should be freed from _base_interrupt
1360 * 0 means the mf is freed from this function.
1361 */
1362static u8
1363_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1364{
1365        Mpi2EventNotificationReply_t *mpi_reply;
1366        Mpi2EventAckRequest_t *ack_request;
1367        u16 smid;
1368        struct _event_ack_list *delayed_event_ack;
1369
1370        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1371        if (!mpi_reply)
1372                return 1;
1373        if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1374                return 1;
1375
1376        _base_display_event_data(ioc, mpi_reply);
1377
1378        if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1379                goto out;
1380        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1381        if (!smid) {
1382                delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1383                                        GFP_ATOMIC);
1384                if (!delayed_event_ack)
1385                        goto out;
1386                INIT_LIST_HEAD(&delayed_event_ack->list);
1387                delayed_event_ack->Event = mpi_reply->Event;
1388                delayed_event_ack->EventContext = mpi_reply->EventContext;
1389                list_add_tail(&delayed_event_ack->list,
1390                                &ioc->delayed_event_ack_list);
1391                dewtprintk(ioc,
1392                           ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1393                                    le16_to_cpu(mpi_reply->Event)));
1394                goto out;
1395        }
1396
1397        ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1398        memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1399        ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1400        ack_request->Event = mpi_reply->Event;
1401        ack_request->EventContext = mpi_reply->EventContext;
1402        ack_request->VF_ID = 0;  /* TODO */
1403        ack_request->VP_ID = 0;
1404        ioc->put_smid_default(ioc, smid);
1405
1406 out:
1407
1408        /* scsih callback handler */
1409        mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1410
1411        /* ctl callback handler */
1412        mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1413
1414        return 1;
1415}
1416
1417static struct scsiio_tracker *
1418_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1419{
1420        struct scsi_cmnd *cmd;
1421
1422        if (WARN_ON(!smid) ||
1423            WARN_ON(smid >= ioc->hi_priority_smid))
1424                return NULL;
1425
1426        cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1427        if (cmd)
1428                return scsi_cmd_priv(cmd);
1429
1430        return NULL;
1431}
1432
1433/**
1434 * _base_get_cb_idx - obtain the callback index
1435 * @ioc: per adapter object
1436 * @smid: system request message index
1437 *
1438 * Return: callback index.
1439 */
1440static u8
1441_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1442{
1443        int i;
1444        u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1445        u8 cb_idx = 0xFF;
1446
1447        if (smid < ioc->hi_priority_smid) {
1448                struct scsiio_tracker *st;
1449
1450                if (smid < ctl_smid) {
1451                        st = _get_st_from_smid(ioc, smid);
1452                        if (st)
1453                                cb_idx = st->cb_idx;
1454                } else if (smid == ctl_smid)
1455                        cb_idx = ioc->ctl_cb_idx;
1456        } else if (smid < ioc->internal_smid) {
1457                i = smid - ioc->hi_priority_smid;
1458                cb_idx = ioc->hpr_lookup[i].cb_idx;
1459        } else if (smid <= ioc->hba_queue_depth) {
1460                i = smid - ioc->internal_smid;
1461                cb_idx = ioc->internal_lookup[i].cb_idx;
1462        }
1463        return cb_idx;
1464}
1465
1466/**
1467 * mpt3sas_base_mask_interrupts - disable interrupts
1468 * @ioc: per adapter object
1469 *
1470 * Disabling ResetIRQ, Reply and Doorbell Interrupts
1471 */
1472void
1473mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1474{
1475        u32 him_register;
1476
1477        ioc->mask_interrupts = 1;
1478        him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1479        him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1480        writel(him_register, &ioc->chip->HostInterruptMask);
1481        ioc->base_readl(&ioc->chip->HostInterruptMask);
1482}
1483
1484/**
1485 * mpt3sas_base_unmask_interrupts - enable interrupts
1486 * @ioc: per adapter object
1487 *
1488 * Enabling only Reply Interrupts
1489 */
1490void
1491mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1492{
1493        u32 him_register;
1494
1495        him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1496        him_register &= ~MPI2_HIM_RIM;
1497        writel(him_register, &ioc->chip->HostInterruptMask);
1498        ioc->mask_interrupts = 0;
1499}
1500
1501union reply_descriptor {
1502        u64 word;
1503        struct {
1504                u32 low;
1505                u32 high;
1506        } u;
1507};
1508
1509static u32 base_mod64(u64 dividend, u32 divisor)
1510{
1511        u32 remainder;
1512
1513        if (!divisor)
1514                pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1515        remainder = do_div(dividend, divisor);
1516        return remainder;
1517}
1518
1519/**
1520 * _base_process_reply_queue - Process reply descriptors from reply
1521 *              descriptor post queue.
1522 * @reply_q: per IRQ's reply queue object.
1523 *
1524 * Return: number of reply descriptors processed from reply
1525 *              descriptor queue.
1526 */
1527static int
1528_base_process_reply_queue(struct adapter_reply_queue *reply_q)
1529{
1530        union reply_descriptor rd;
1531        u64 completed_cmds;
1532        u8 request_descript_type;
1533        u16 smid;
1534        u8 cb_idx;
1535        u32 reply;
1536        u8 msix_index = reply_q->msix_index;
1537        struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1538        Mpi2ReplyDescriptorsUnion_t *rpf;
1539        u8 rc;
1540
1541        completed_cmds = 0;
1542        if (!atomic_add_unless(&reply_q->busy, 1, 1))
1543                return completed_cmds;
1544
1545        rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1546        request_descript_type = rpf->Default.ReplyFlags
1547             & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1548        if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1549                atomic_dec(&reply_q->busy);
1550                return completed_cmds;
1551        }
1552
1553        cb_idx = 0xFF;
1554        do {
1555                rd.word = le64_to_cpu(rpf->Words);
1556                if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1557                        goto out;
1558                reply = 0;
1559                smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1560                if (request_descript_type ==
1561                    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1562                    request_descript_type ==
1563                    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1564                    request_descript_type ==
1565                    MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1566                        cb_idx = _base_get_cb_idx(ioc, smid);
1567                        if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1568                            (likely(mpt_callbacks[cb_idx] != NULL))) {
1569                                rc = mpt_callbacks[cb_idx](ioc, smid,
1570                                    msix_index, 0);
1571                                if (rc)
1572                                        mpt3sas_base_free_smid(ioc, smid);
1573                        }
1574                } else if (request_descript_type ==
1575                    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1576                        reply = le32_to_cpu(
1577                            rpf->AddressReply.ReplyFrameAddress);
1578                        if (reply > ioc->reply_dma_max_address ||
1579                            reply < ioc->reply_dma_min_address)
1580                                reply = 0;
1581                        if (smid) {
1582                                cb_idx = _base_get_cb_idx(ioc, smid);
1583                                if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1584                                    (likely(mpt_callbacks[cb_idx] != NULL))) {
1585                                        rc = mpt_callbacks[cb_idx](ioc, smid,
1586                                            msix_index, reply);
1587                                        if (reply)
1588                                                _base_display_reply_info(ioc,
1589                                                    smid, msix_index, reply);
1590                                        if (rc)
1591                                                mpt3sas_base_free_smid(ioc,
1592                                                    smid);
1593                                }
1594                        } else {
1595                                _base_async_event(ioc, msix_index, reply);
1596                        }
1597
1598                        /* reply free queue handling */
1599                        if (reply) {
1600                                ioc->reply_free_host_index =
1601                                    (ioc->reply_free_host_index ==
1602                                    (ioc->reply_free_queue_depth - 1)) ?
1603                                    0 : ioc->reply_free_host_index + 1;
1604                                ioc->reply_free[ioc->reply_free_host_index] =
1605                                    cpu_to_le32(reply);
1606                                if (ioc->is_mcpu_endpoint)
1607                                        _base_clone_reply_to_sys_mem(ioc,
1608                                                reply,
1609                                                ioc->reply_free_host_index);
1610                                writel(ioc->reply_free_host_index,
1611                                    &ioc->chip->ReplyFreeHostIndex);
1612                        }
1613                }
1614
1615                rpf->Words = cpu_to_le64(ULLONG_MAX);
1616                reply_q->reply_post_host_index =
1617                    (reply_q->reply_post_host_index ==
1618                    (ioc->reply_post_queue_depth - 1)) ? 0 :
1619                    reply_q->reply_post_host_index + 1;
1620                request_descript_type =
1621                    reply_q->reply_post_free[reply_q->reply_post_host_index].
1622                    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1623                completed_cmds++;
1624                /* Update the reply post host index after continuously
1625                 * processing the threshold number of Reply Descriptors.
1626                 * So that FW can find enough entries to post the Reply
1627                 * Descriptors in the reply descriptor post queue.
1628                 */
1629                if (completed_cmds >= ioc->thresh_hold) {
1630                        if (ioc->combined_reply_queue) {
1631                                writel(reply_q->reply_post_host_index |
1632                                                ((msix_index  & 7) <<
1633                                                 MPI2_RPHI_MSIX_INDEX_SHIFT),
1634                                    ioc->replyPostRegisterIndex[msix_index/8]);
1635                        } else {
1636                                writel(reply_q->reply_post_host_index |
1637                                                (msix_index <<
1638                                                 MPI2_RPHI_MSIX_INDEX_SHIFT),
1639                                                &ioc->chip->ReplyPostHostIndex);
1640                        }
1641                        if (!reply_q->irq_poll_scheduled) {
1642                                reply_q->irq_poll_scheduled = true;
1643                                irq_poll_sched(&reply_q->irqpoll);
1644                        }
1645                        atomic_dec(&reply_q->busy);
1646                        return completed_cmds;
1647                }
1648                if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1649                        goto out;
1650                if (!reply_q->reply_post_host_index)
1651                        rpf = reply_q->reply_post_free;
1652                else
1653                        rpf++;
1654        } while (1);
1655
1656 out:
1657
1658        if (!completed_cmds) {
1659                atomic_dec(&reply_q->busy);
1660                return completed_cmds;
1661        }
1662
1663        if (ioc->is_warpdrive) {
1664                writel(reply_q->reply_post_host_index,
1665                ioc->reply_post_host_index[msix_index]);
1666                atomic_dec(&reply_q->busy);
1667                return completed_cmds;
1668        }
1669
1670        /* Update Reply Post Host Index.
1671         * For those HBA's which support combined reply queue feature
1672         * 1. Get the correct Supplemental Reply Post Host Index Register.
1673         *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1674         *    Index Register address bank i.e replyPostRegisterIndex[],
1675         * 2. Then update this register with new reply host index value
1676         *    in ReplyPostIndex field and the MSIxIndex field with
1677         *    msix_index value reduced to a value between 0 and 7,
1678         *    using a modulo 8 operation. Since each Supplemental Reply Post
1679         *    Host Index Register supports 8 MSI-X vectors.
1680         *
1681         * For other HBA's just update the Reply Post Host Index register with
1682         * new reply host index value in ReplyPostIndex Field and msix_index
1683         * value in MSIxIndex field.
1684         */
1685        if (ioc->combined_reply_queue)
1686                writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1687                        MPI2_RPHI_MSIX_INDEX_SHIFT),
1688                        ioc->replyPostRegisterIndex[msix_index/8]);
1689        else
1690                writel(reply_q->reply_post_host_index | (msix_index <<
1691                        MPI2_RPHI_MSIX_INDEX_SHIFT),
1692                        &ioc->chip->ReplyPostHostIndex);
1693        atomic_dec(&reply_q->busy);
1694        return completed_cmds;
1695}
1696
1697/**
1698 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1699 * @irq: irq number (not used)
1700 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1701 *
1702 * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1703 */
1704static irqreturn_t
1705_base_interrupt(int irq, void *bus_id)
1706{
1707        struct adapter_reply_queue *reply_q = bus_id;
1708        struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1709
1710        if (ioc->mask_interrupts)
1711                return IRQ_NONE;
1712        if (reply_q->irq_poll_scheduled)
1713                return IRQ_HANDLED;
1714        return ((_base_process_reply_queue(reply_q) > 0) ?
1715                        IRQ_HANDLED : IRQ_NONE);
1716}
1717
1718/**
1719 * _base_irqpoll - IRQ poll callback handler
1720 * @irqpoll: irq_poll object
1721 * @budget: irq poll weight
1722 *
1723 * returns number of reply descriptors processed
1724 */
1725static int
1726_base_irqpoll(struct irq_poll *irqpoll, int budget)
1727{
1728        struct adapter_reply_queue *reply_q;
1729        int num_entries = 0;
1730
1731        reply_q = container_of(irqpoll, struct adapter_reply_queue,
1732                        irqpoll);
1733        if (reply_q->irq_line_enable) {
1734                disable_irq_nosync(reply_q->os_irq);
1735                reply_q->irq_line_enable = false;
1736        }
1737        num_entries = _base_process_reply_queue(reply_q);
1738        if (num_entries < budget) {
1739                irq_poll_complete(irqpoll);
1740                reply_q->irq_poll_scheduled = false;
1741                reply_q->irq_line_enable = true;
1742                enable_irq(reply_q->os_irq);
1743                /*
1744                 * Go for one more round of processing the
1745                 * reply descriptor post queue incase if HBA
1746                 * Firmware has posted some reply descriptors
1747                 * while reenabling the IRQ.
1748                 */
1749                _base_process_reply_queue(reply_q);
1750        }
1751
1752        return num_entries;
1753}
1754
1755/**
1756 * _base_init_irqpolls - initliaze IRQ polls
1757 * @ioc: per adapter object
1758 *
1759 * returns nothing
1760 */
1761static void
1762_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1763{
1764        struct adapter_reply_queue *reply_q, *next;
1765
1766        if (list_empty(&ioc->reply_queue_list))
1767                return;
1768
1769        list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1770                irq_poll_init(&reply_q->irqpoll,
1771                        ioc->hba_queue_depth/4, _base_irqpoll);
1772                reply_q->irq_poll_scheduled = false;
1773                reply_q->irq_line_enable = true;
1774                reply_q->os_irq = pci_irq_vector(ioc->pdev,
1775                    reply_q->msix_index);
1776        }
1777}
1778
1779/**
1780 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1781 * @ioc: per adapter object
1782 *
1783 * Return: Whether or not MSI/X is enabled.
1784 */
1785static inline int
1786_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1787{
1788        return (ioc->facts.IOCCapabilities &
1789            MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1790}
1791
1792/**
1793 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1794 * @ioc: per adapter object
1795 * @poll: poll over reply descriptor pools incase interrupt for
1796 *              timed-out SCSI command got delayed
1797 * Context: non ISR conext
1798 *
1799 * Called when a Task Management request has completed.
1800 */
1801void
1802mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1803{
1804        struct adapter_reply_queue *reply_q;
1805
1806        /* If MSIX capability is turned off
1807         * then multi-queues are not enabled
1808         */
1809        if (!_base_is_controller_msix_enabled(ioc))
1810                return;
1811
1812        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1813                if (ioc->shost_recovery || ioc->remove_host ||
1814                                ioc->pci_error_recovery)
1815                        return;
1816                /* TMs are on msix_index == 0 */
1817                if (reply_q->msix_index == 0)
1818                        continue;
1819                synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1820                if (reply_q->irq_poll_scheduled) {
1821                        /* Calling irq_poll_disable will wait for any pending
1822                         * callbacks to have completed.
1823                         */
1824                        irq_poll_disable(&reply_q->irqpoll);
1825                        irq_poll_enable(&reply_q->irqpoll);
1826                        /* check how the scheduled poll has ended,
1827                         * clean up only if necessary
1828                         */
1829                        if (reply_q->irq_poll_scheduled) {
1830                                reply_q->irq_poll_scheduled = false;
1831                                reply_q->irq_line_enable = true;
1832                                enable_irq(reply_q->os_irq);
1833                        }
1834                }
1835        }
1836        if (poll)
1837                _base_process_reply_queue(reply_q);
1838}
1839
1840/**
1841 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1842 * @cb_idx: callback index
1843 */
1844void
1845mpt3sas_base_release_callback_handler(u8 cb_idx)
1846{
1847        mpt_callbacks[cb_idx] = NULL;
1848}
1849
1850/**
1851 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1852 * @cb_func: callback function
1853 *
1854 * Return: Index of @cb_func.
1855 */
1856u8
1857mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1858{
1859        u8 cb_idx;
1860
1861        for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1862                if (mpt_callbacks[cb_idx] == NULL)
1863                        break;
1864
1865        mpt_callbacks[cb_idx] = cb_func;
1866        return cb_idx;
1867}
1868
1869/**
1870 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1871 */
1872void
1873mpt3sas_base_initialize_callback_handler(void)
1874{
1875        u8 cb_idx;
1876
1877        for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1878                mpt3sas_base_release_callback_handler(cb_idx);
1879}
1880
1881
1882/**
1883 * _base_build_zero_len_sge - build zero length sg entry
1884 * @ioc: per adapter object
1885 * @paddr: virtual address for SGE
1886 *
1887 * Create a zero length scatter gather entry to insure the IOCs hardware has
1888 * something to use if the target device goes brain dead and tries
1889 * to send data even when none is asked for.
1890 */
1891static void
1892_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1893{
1894        u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1895            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1896            MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1897            MPI2_SGE_FLAGS_SHIFT);
1898        ioc->base_add_sg_single(paddr, flags_length, -1);
1899}
1900
1901/**
1902 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1903 * @paddr: virtual address for SGE
1904 * @flags_length: SGE flags and data transfer length
1905 * @dma_addr: Physical address
1906 */
1907static void
1908_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1909{
1910        Mpi2SGESimple32_t *sgel = paddr;
1911
1912        flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1913            MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1914        sgel->FlagsLength = cpu_to_le32(flags_length);
1915        sgel->Address = cpu_to_le32(dma_addr);
1916}
1917
1918
1919/**
1920 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1921 * @paddr: virtual address for SGE
1922 * @flags_length: SGE flags and data transfer length
1923 * @dma_addr: Physical address
1924 */
1925static void
1926_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1927{
1928        Mpi2SGESimple64_t *sgel = paddr;
1929
1930        flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1931            MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1932        sgel->FlagsLength = cpu_to_le32(flags_length);
1933        sgel->Address = cpu_to_le64(dma_addr);
1934}
1935
1936/**
1937 * _base_get_chain_buffer_tracker - obtain chain tracker
1938 * @ioc: per adapter object
1939 * @scmd: SCSI commands of the IO request
1940 *
1941 * Return: chain tracker from chain_lookup table using key as
1942 * smid and smid's chain_offset.
1943 */
1944static struct chain_tracker *
1945_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1946                               struct scsi_cmnd *scmd)
1947{
1948        struct chain_tracker *chain_req;
1949        struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1950        u16 smid = st->smid;
1951        u8 chain_offset =
1952           atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1953
1954        if (chain_offset == ioc->chains_needed_per_io)
1955                return NULL;
1956
1957        chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1958        atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1959        return chain_req;
1960}
1961
1962
1963/**
1964 * _base_build_sg - build generic sg
1965 * @ioc: per adapter object
1966 * @psge: virtual address for SGE
1967 * @data_out_dma: physical address for WRITES
1968 * @data_out_sz: data xfer size for WRITES
1969 * @data_in_dma: physical address for READS
1970 * @data_in_sz: data xfer size for READS
1971 */
1972static void
1973_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1974        dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1975        size_t data_in_sz)
1976{
1977        u32 sgl_flags;
1978
1979        if (!data_out_sz && !data_in_sz) {
1980                _base_build_zero_len_sge(ioc, psge);
1981                return;
1982        }
1983
1984        if (data_out_sz && data_in_sz) {
1985                /* WRITE sgel first */
1986                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1987                    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1988                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1989                ioc->base_add_sg_single(psge, sgl_flags |
1990                    data_out_sz, data_out_dma);
1991
1992                /* incr sgel */
1993                psge += ioc->sge_size;
1994
1995                /* READ sgel last */
1996                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1997                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1998                    MPI2_SGE_FLAGS_END_OF_LIST);
1999                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2000                ioc->base_add_sg_single(psge, sgl_flags |
2001                    data_in_sz, data_in_dma);
2002        } else if (data_out_sz) /* WRITE */ {
2003                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2004                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2005                    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
2006                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2007                ioc->base_add_sg_single(psge, sgl_flags |
2008                    data_out_sz, data_out_dma);
2009        } else if (data_in_sz) /* READ */ {
2010                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2011                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2012                    MPI2_SGE_FLAGS_END_OF_LIST);
2013                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2014                ioc->base_add_sg_single(psge, sgl_flags |
2015                    data_in_sz, data_in_dma);
2016        }
2017}
2018
2019/* IEEE format sgls */
2020
2021/**
2022 * _base_build_nvme_prp - This function is called for NVMe end devices to build
2023 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
2024 * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
2025 * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
2026 * used to describe a larger data buffer.  If the data buffer is too large to
2027 * describe using the two PRP entriess inside the NVMe message, then PRP1
2028 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2029 * list located elsewhere in memory to describe the remaining data memory
2030 * segments.  The PRP list will be contiguous.
2031 *
2032 * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
2033 * consists of a list of PRP entries to describe a number of noncontigous
2034 * physical memory segments as a single memory buffer, just as a SGL does.  Note
2035 * however, that this function is only used by the IOCTL call, so the memory
2036 * given will be guaranteed to be contiguous.  There is no need to translate
2037 * non-contiguous SGL into a PRP in this case.  All PRPs will describe
2038 * contiguous space that is one page size each.
2039 *
2040 * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
2041 * a PRP list pointer or a PRP element, depending upon the command.  PRP2
2042 * contains the second PRP element if the memory being described fits within 2
2043 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2044 *
2045 * A PRP list pointer contains the address of a PRP list, structured as a linear
2046 * array of PRP entries.  Each PRP entry in this list describes a segment of
2047 * physical memory.
2048 *
2049 * Each 64-bit PRP entry comprises an address and an offset field.  The address
2050 * always points at the beginning of a 4KB physical memory page, and the offset
2051 * describes where within that 4KB page the memory segment begins.  Only the
2052 * first element in a PRP list may contain a non-zero offest, implying that all
2053 * memory segments following the first begin at the start of a 4KB page.
2054 *
2055 * Each PRP element normally describes 4KB of physical memory, with exceptions
2056 * for the first and last elements in the list.  If the memory being described
2057 * by the list begins at a non-zero offset within the first 4KB page, then the
2058 * first PRP element will contain a non-zero offset indicating where the region
2059 * begins within the 4KB page.  The last memory segment may end before the end
2060 * of the 4KB segment, depending upon the overall size of the memory being
2061 * described by the PRP list.
2062 *
2063 * Since PRP entries lack any indication of size, the overall data buffer length
2064 * is used to determine where the end of the data memory buffer is located, and
2065 * how many PRP entries are required to describe it.
2066 *
2067 * @ioc: per adapter object
2068 * @smid: system request message index for getting asscociated SGL
2069 * @nvme_encap_request: the NVMe request msg frame pointer
2070 * @data_out_dma: physical address for WRITES
2071 * @data_out_sz: data xfer size for WRITES
2072 * @data_in_dma: physical address for READS
2073 * @data_in_sz: data xfer size for READS
2074 */
2075static void
2076_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2077        Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2078        dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2079        size_t data_in_sz)
2080{
2081        int             prp_size = NVME_PRP_SIZE;
2082        __le64          *prp_entry, *prp1_entry, *prp2_entry;
2083        __le64          *prp_page;
2084        dma_addr_t      prp_entry_dma, prp_page_dma, dma_addr;
2085        u32             offset, entry_len;
2086        u32             page_mask_result, page_mask;
2087        size_t          length;
2088        struct mpt3sas_nvme_cmd *nvme_cmd =
2089                (void *)nvme_encap_request->NVMe_Command;
2090
2091        /*
2092         * Not all commands require a data transfer. If no data, just return
2093         * without constructing any PRP.
2094         */
2095        if (!data_in_sz && !data_out_sz)
2096                return;
2097        prp1_entry = &nvme_cmd->prp1;
2098        prp2_entry = &nvme_cmd->prp2;
2099        prp_entry = prp1_entry;
2100        /*
2101         * For the PRP entries, use the specially allocated buffer of
2102         * contiguous memory.
2103         */
2104        prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2105        prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2106
2107        /*
2108         * Check if we are within 1 entry of a page boundary we don't
2109         * want our first entry to be a PRP List entry.
2110         */
2111        page_mask = ioc->page_size - 1;
2112        page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2113        if (!page_mask_result) {
2114                /* Bump up to next page boundary. */
2115                prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2116                prp_page_dma = prp_page_dma + prp_size;
2117        }
2118
2119        /*
2120         * Set PRP physical pointer, which initially points to the current PRP
2121         * DMA memory page.
2122         */
2123        prp_entry_dma = prp_page_dma;
2124
2125        /* Get physical address and length of the data buffer. */
2126        if (data_in_sz) {
2127                dma_addr = data_in_dma;
2128                length = data_in_sz;
2129        } else {
2130                dma_addr = data_out_dma;
2131                length = data_out_sz;
2132        }
2133
2134        /* Loop while the length is not zero. */
2135        while (length) {
2136                /*
2137                 * Check if we need to put a list pointer here if we are at
2138                 * page boundary - prp_size (8 bytes).
2139                 */
2140                page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2141                if (!page_mask_result) {
2142                        /*
2143                         * This is the last entry in a PRP List, so we need to
2144                         * put a PRP list pointer here.  What this does is:
2145                         *   - bump the current memory pointer to the next
2146                         *     address, which will be the next full page.
2147                         *   - set the PRP Entry to point to that page.  This
2148                         *     is now the PRP List pointer.
2149                         *   - bump the PRP Entry pointer the start of the
2150                         *     next page.  Since all of this PRP memory is
2151                         *     contiguous, no need to get a new page - it's
2152                         *     just the next address.
2153                         */
2154                        prp_entry_dma++;
2155                        *prp_entry = cpu_to_le64(prp_entry_dma);
2156                        prp_entry++;
2157                }
2158
2159                /* Need to handle if entry will be part of a page. */
2160                offset = dma_addr & page_mask;
2161                entry_len = ioc->page_size - offset;
2162
2163                if (prp_entry == prp1_entry) {
2164                        /*
2165                         * Must fill in the first PRP pointer (PRP1) before
2166                         * moving on.
2167                         */
2168                        *prp1_entry = cpu_to_le64(dma_addr);
2169
2170                        /*
2171                         * Now point to the second PRP entry within the
2172                         * command (PRP2).
2173                         */
2174                        prp_entry = prp2_entry;
2175                } else if (prp_entry == prp2_entry) {
2176                        /*
2177                         * Should the PRP2 entry be a PRP List pointer or just
2178                         * a regular PRP pointer?  If there is more than one
2179                         * more page of data, must use a PRP List pointer.
2180                         */
2181                        if (length > ioc->page_size) {
2182                                /*
2183                                 * PRP2 will contain a PRP List pointer because
2184                                 * more PRP's are needed with this command. The
2185                                 * list will start at the beginning of the
2186                                 * contiguous buffer.
2187                                 */
2188                                *prp2_entry = cpu_to_le64(prp_entry_dma);
2189
2190                                /*
2191                                 * The next PRP Entry will be the start of the
2192                                 * first PRP List.
2193                                 */
2194                                prp_entry = prp_page;
2195                        } else {
2196                                /*
2197                                 * After this, the PRP Entries are complete.
2198                                 * This command uses 2 PRP's and no PRP list.
2199                                 */
2200                                *prp2_entry = cpu_to_le64(dma_addr);
2201                        }
2202                } else {
2203                        /*
2204                         * Put entry in list and bump the addresses.
2205                         *
2206                         * After PRP1 and PRP2 are filled in, this will fill in
2207                         * all remaining PRP entries in a PRP List, one per
2208                         * each time through the loop.
2209                         */
2210                        *prp_entry = cpu_to_le64(dma_addr);
2211                        prp_entry++;
2212                        prp_entry_dma++;
2213                }
2214
2215                /*
2216                 * Bump the phys address of the command's data buffer by the
2217                 * entry_len.
2218                 */
2219                dma_addr += entry_len;
2220
2221                /* Decrement length accounting for last partial page. */
2222                if (entry_len > length)
2223                        length = 0;
2224                else
2225                        length -= entry_len;
2226        }
2227}
2228
2229/**
2230 * base_make_prp_nvme -
2231 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2232 *
2233 * @ioc:                per adapter object
2234 * @scmd:               SCSI command from the mid-layer
2235 * @mpi_request:        mpi request
2236 * @smid:               msg Index
2237 * @sge_count:          scatter gather element count.
2238 *
2239 * Return:              true: PRPs are built
2240 *                      false: IEEE SGLs needs to be built
2241 */
2242static void
2243base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2244                struct scsi_cmnd *scmd,
2245                Mpi25SCSIIORequest_t *mpi_request,
2246                u16 smid, int sge_count)
2247{
2248        int sge_len, num_prp_in_chain = 0;
2249        Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2250        __le64 *curr_buff;
2251        dma_addr_t msg_dma, sge_addr, offset;
2252        u32 page_mask, page_mask_result;
2253        struct scatterlist *sg_scmd;
2254        u32 first_prp_len;
2255        int data_len = scsi_bufflen(scmd);
2256        u32 nvme_pg_size;
2257
2258        nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2259        /*
2260         * Nvme has a very convoluted prp format.  One prp is required
2261         * for each page or partial page. Driver need to split up OS sg_list
2262         * entries if it is longer than one page or cross a page
2263         * boundary.  Driver also have to insert a PRP list pointer entry as
2264         * the last entry in each physical page of the PRP list.
2265         *
2266         * NOTE: The first PRP "entry" is actually placed in the first
2267         * SGL entry in the main message as IEEE 64 format.  The 2nd
2268         * entry in the main message is the chain element, and the rest
2269         * of the PRP entries are built in the contiguous pcie buffer.
2270         */
2271        page_mask = nvme_pg_size - 1;
2272
2273        /*
2274         * Native SGL is needed.
2275         * Put a chain element in main message frame that points to the first
2276         * chain buffer.
2277         *
2278         * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
2279         *        a native SGL.
2280         */
2281
2282        /* Set main message chain element pointer */
2283        main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2284        /*
2285         * For NVMe the chain element needs to be the 2nd SG entry in the main
2286         * message.
2287         */
2288        main_chain_element = (Mpi25IeeeSgeChain64_t *)
2289                ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2290
2291        /*
2292         * For the PRP entries, use the specially allocated buffer of
2293         * contiguous memory.  Normal chain buffers can't be used
2294         * because each chain buffer would need to be the size of an OS
2295         * page (4k).
2296         */
2297        curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2298        msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2299
2300        main_chain_element->Address = cpu_to_le64(msg_dma);
2301        main_chain_element->NextChainOffset = 0;
2302        main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2303                        MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2304                        MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2305
2306        /* Build first prp, sge need not to be page aligned*/
2307        ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2308        sg_scmd = scsi_sglist(scmd);
2309        sge_addr = sg_dma_address(sg_scmd);
2310        sge_len = sg_dma_len(sg_scmd);
2311
2312        offset = sge_addr & page_mask;
2313        first_prp_len = nvme_pg_size - offset;
2314
2315        ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2316        ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2317
2318        data_len -= first_prp_len;
2319
2320        if (sge_len > first_prp_len) {
2321                sge_addr += first_prp_len;
2322                sge_len -= first_prp_len;
2323        } else if (data_len && (sge_len == first_prp_len)) {
2324                sg_scmd = sg_next(sg_scmd);
2325                sge_addr = sg_dma_address(sg_scmd);
2326                sge_len = sg_dma_len(sg_scmd);
2327        }
2328
2329        for (;;) {
2330                offset = sge_addr & page_mask;
2331
2332                /* Put PRP pointer due to page boundary*/
2333                page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2334                if (unlikely(!page_mask_result)) {
2335                        scmd_printk(KERN_NOTICE,
2336                                scmd, "page boundary curr_buff: 0x%p\n",
2337                                curr_buff);
2338                        msg_dma += 8;
2339                        *curr_buff = cpu_to_le64(msg_dma);
2340                        curr_buff++;
2341                        num_prp_in_chain++;
2342                }
2343
2344                *curr_buff = cpu_to_le64(sge_addr);
2345                curr_buff++;
2346                msg_dma += 8;
2347                num_prp_in_chain++;
2348
2349                sge_addr += nvme_pg_size;
2350                sge_len -= nvme_pg_size;
2351                data_len -= nvme_pg_size;
2352
2353                if (data_len <= 0)
2354                        break;
2355
2356                if (sge_len > 0)
2357                        continue;
2358
2359                sg_scmd = sg_next(sg_scmd);
2360                sge_addr = sg_dma_address(sg_scmd);
2361                sge_len = sg_dma_len(sg_scmd);
2362        }
2363
2364        main_chain_element->Length =
2365                cpu_to_le32(num_prp_in_chain * sizeof(u64));
2366        return;
2367}
2368
2369static bool
2370base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2371        struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2372{
2373        u32 data_length = 0;
2374        bool build_prp = true;
2375
2376        data_length = scsi_bufflen(scmd);
2377        if (pcie_device &&
2378            (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2379                build_prp = false;
2380                return build_prp;
2381        }
2382
2383        /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2384         * we built IEEE SGL
2385         */
2386        if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2387                build_prp = false;
2388
2389        return build_prp;
2390}
2391
2392/**
2393 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2394 * determine if the driver needs to build a native SGL.  If so, that native
2395 * SGL is built in the special contiguous buffers allocated especially for
2396 * PCIe SGL creation.  If the driver will not build a native SGL, return
2397 * TRUE and a normal IEEE SGL will be built.  Currently this routine
2398 * supports NVMe.
2399 * @ioc: per adapter object
2400 * @mpi_request: mf request pointer
2401 * @smid: system request message index
2402 * @scmd: scsi command
2403 * @pcie_device: points to the PCIe device's info
2404 *
2405 * Return: 0 if native SGL was built, 1 if no SGL was built
2406 */
2407static int
2408_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2409        Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2410        struct _pcie_device *pcie_device)
2411{
2412        int sges_left;
2413
2414        /* Get the SG list pointer and info. */
2415        sges_left = scsi_dma_map(scmd);
2416        if (sges_left < 0) {
2417                sdev_printk(KERN_ERR, scmd->device,
2418                        "scsi_dma_map failed: request for %d bytes!\n",
2419                        scsi_bufflen(scmd));
2420                return 1;
2421        }
2422
2423        /* Check if we need to build a native SG list. */
2424        if (base_is_prp_possible(ioc, pcie_device,
2425                                scmd, sges_left) == 0) {
2426                /* We built a native SG list, just return. */
2427                goto out;
2428        }
2429
2430        /*
2431         * Build native NVMe PRP.
2432         */
2433        base_make_prp_nvme(ioc, scmd, mpi_request,
2434                        smid, sges_left);
2435
2436        return 0;
2437out:
2438        scsi_dma_unmap(scmd);
2439        return 1;
2440}
2441
2442/**
2443 * _base_add_sg_single_ieee - add sg element for IEEE format
2444 * @paddr: virtual address for SGE
2445 * @flags: SGE flags
2446 * @chain_offset: number of 128 byte elements from start of segment
2447 * @length: data transfer length
2448 * @dma_addr: Physical address
2449 */
2450static void
2451_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2452        dma_addr_t dma_addr)
2453{
2454        Mpi25IeeeSgeChain64_t *sgel = paddr;
2455
2456        sgel->Flags = flags;
2457        sgel->NextChainOffset = chain_offset;
2458        sgel->Length = cpu_to_le32(length);
2459        sgel->Address = cpu_to_le64(dma_addr);
2460}
2461
2462/**
2463 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2464 * @ioc: per adapter object
2465 * @paddr: virtual address for SGE
2466 *
2467 * Create a zero length scatter gather entry to insure the IOCs hardware has
2468 * something to use if the target device goes brain dead and tries
2469 * to send data even when none is asked for.
2470 */
2471static void
2472_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2473{
2474        u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2475                MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2476                MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2477
2478        _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2479}
2480
2481/**
2482 * _base_build_sg_scmd - main sg creation routine
2483 *              pcie_device is unused here!
2484 * @ioc: per adapter object
2485 * @scmd: scsi command
2486 * @smid: system request message index
2487 * @unused: unused pcie_device pointer
2488 * Context: none.
2489 *
2490 * The main routine that builds scatter gather table from a given
2491 * scsi request sent via the .queuecommand main handler.
2492 *
2493 * Return: 0 success, anything else error
2494 */
2495static int
2496_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2497        struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2498{
2499        Mpi2SCSIIORequest_t *mpi_request;
2500        dma_addr_t chain_dma;
2501        struct scatterlist *sg_scmd;
2502        void *sg_local, *chain;
2503        u32 chain_offset;
2504        u32 chain_length;
2505        u32 chain_flags;
2506        int sges_left;
2507        u32 sges_in_segment;
2508        u32 sgl_flags;
2509        u32 sgl_flags_last_element;
2510        u32 sgl_flags_end_buffer;
2511        struct chain_tracker *chain_req;
2512
2513        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2514
2515        /* init scatter gather flags */
2516        sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2517        if (scmd->sc_data_direction == DMA_TO_DEVICE)
2518                sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2519        sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2520            << MPI2_SGE_FLAGS_SHIFT;
2521        sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2522            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2523            << MPI2_SGE_FLAGS_SHIFT;
2524        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2525
2526        sg_scmd = scsi_sglist(scmd);
2527        sges_left = scsi_dma_map(scmd);
2528        if (sges_left < 0) {
2529                sdev_printk(KERN_ERR, scmd->device,
2530                 "scsi_dma_map failed: request for %d bytes!\n",
2531                 scsi_bufflen(scmd));
2532                return -ENOMEM;
2533        }
2534
2535        sg_local = &mpi_request->SGL;
2536        sges_in_segment = ioc->max_sges_in_main_message;
2537        if (sges_left <= sges_in_segment)
2538                goto fill_in_last_segment;
2539
2540        mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2541            (sges_in_segment * ioc->sge_size))/4;
2542
2543        /* fill in main message segment when there is a chain following */
2544        while (sges_in_segment) {
2545                if (sges_in_segment == 1)
2546                        ioc->base_add_sg_single(sg_local,
2547                            sgl_flags_last_element | sg_dma_len(sg_scmd),
2548                            sg_dma_address(sg_scmd));
2549                else
2550                        ioc->base_add_sg_single(sg_local, sgl_flags |
2551                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2552                sg_scmd = sg_next(sg_scmd);
2553                sg_local += ioc->sge_size;
2554                sges_left--;
2555                sges_in_segment--;
2556        }
2557
2558        /* initializing the chain flags and pointers */
2559        chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2560        chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2561        if (!chain_req)
2562                return -1;
2563        chain = chain_req->chain_buffer;
2564        chain_dma = chain_req->chain_buffer_dma;
2565        do {
2566                sges_in_segment = (sges_left <=
2567                    ioc->max_sges_in_chain_message) ? sges_left :
2568                    ioc->max_sges_in_chain_message;
2569                chain_offset = (sges_left == sges_in_segment) ?
2570                    0 : (sges_in_segment * ioc->sge_size)/4;
2571                chain_length = sges_in_segment * ioc->sge_size;
2572                if (chain_offset) {
2573                        chain_offset = chain_offset <<
2574                            MPI2_SGE_CHAIN_OFFSET_SHIFT;
2575                        chain_length += ioc->sge_size;
2576                }
2577                ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2578                    chain_length, chain_dma);
2579                sg_local = chain;
2580                if (!chain_offset)
2581                        goto fill_in_last_segment;
2582
2583                /* fill in chain segments */
2584                while (sges_in_segment) {
2585                        if (sges_in_segment == 1)
2586                                ioc->base_add_sg_single(sg_local,
2587                                    sgl_flags_last_element |
2588                                    sg_dma_len(sg_scmd),
2589                                    sg_dma_address(sg_scmd));
2590                        else
2591                                ioc->base_add_sg_single(sg_local, sgl_flags |
2592                                    sg_dma_len(sg_scmd),
2593                                    sg_dma_address(sg_scmd));
2594                        sg_scmd = sg_next(sg_scmd);
2595                        sg_local += ioc->sge_size;
2596                        sges_left--;
2597                        sges_in_segment--;
2598                }
2599
2600                chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2601                if (!chain_req)
2602                        return -1;
2603                chain = chain_req->chain_buffer;
2604                chain_dma = chain_req->chain_buffer_dma;
2605        } while (1);
2606
2607
2608 fill_in_last_segment:
2609
2610        /* fill the last segment */
2611        while (sges_left) {
2612                if (sges_left == 1)
2613                        ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2614                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2615                else
2616                        ioc->base_add_sg_single(sg_local, sgl_flags |
2617                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2618                sg_scmd = sg_next(sg_scmd);
2619                sg_local += ioc->sge_size;
2620                sges_left--;
2621        }
2622
2623        return 0;
2624}
2625
2626/**
2627 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2628 * @ioc: per adapter object
2629 * @scmd: scsi command
2630 * @smid: system request message index
2631 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2632 * constructed on need.
2633 * Context: none.
2634 *
2635 * The main routine that builds scatter gather table from a given
2636 * scsi request sent via the .queuecommand main handler.
2637 *
2638 * Return: 0 success, anything else error
2639 */
2640static int
2641_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2642        struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2643{
2644        Mpi25SCSIIORequest_t *mpi_request;
2645        dma_addr_t chain_dma;
2646        struct scatterlist *sg_scmd;
2647        void *sg_local, *chain;
2648        u32 chain_offset;
2649        u32 chain_length;
2650        int sges_left;
2651        u32 sges_in_segment;
2652        u8 simple_sgl_flags;
2653        u8 simple_sgl_flags_last;
2654        u8 chain_sgl_flags;
2655        struct chain_tracker *chain_req;
2656
2657        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2658
2659        /* init scatter gather flags */
2660        simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2661            MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2662        simple_sgl_flags_last = simple_sgl_flags |
2663            MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2664        chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2665            MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2666
2667        /* Check if we need to build a native SG list. */
2668        if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2669                        smid, scmd, pcie_device) == 0)) {
2670                /* We built a native SG list, just return. */
2671                return 0;
2672        }
2673
2674        sg_scmd = scsi_sglist(scmd);
2675        sges_left = scsi_dma_map(scmd);
2676        if (sges_left < 0) {
2677                sdev_printk(KERN_ERR, scmd->device,
2678                        "scsi_dma_map failed: request for %d bytes!\n",
2679                        scsi_bufflen(scmd));
2680                return -ENOMEM;
2681        }
2682
2683        sg_local = &mpi_request->SGL;
2684        sges_in_segment = (ioc->request_sz -
2685                   offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2686        if (sges_left <= sges_in_segment)
2687                goto fill_in_last_segment;
2688
2689        mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2690            (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2691
2692        /* fill in main message segment when there is a chain following */
2693        while (sges_in_segment > 1) {
2694                _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2695                    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2696                sg_scmd = sg_next(sg_scmd);
2697                sg_local += ioc->sge_size_ieee;
2698                sges_left--;
2699                sges_in_segment--;
2700        }
2701
2702        /* initializing the pointers */
2703        chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2704        if (!chain_req)
2705                return -1;
2706        chain = chain_req->chain_buffer;
2707        chain_dma = chain_req->chain_buffer_dma;
2708        do {
2709                sges_in_segment = (sges_left <=
2710                    ioc->max_sges_in_chain_message) ? sges_left :
2711                    ioc->max_sges_in_chain_message;
2712                chain_offset = (sges_left == sges_in_segment) ?
2713                    0 : sges_in_segment;
2714                chain_length = sges_in_segment * ioc->sge_size_ieee;
2715                if (chain_offset)
2716                        chain_length += ioc->sge_size_ieee;
2717                _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2718                    chain_offset, chain_length, chain_dma);
2719
2720                sg_local = chain;
2721                if (!chain_offset)
2722                        goto fill_in_last_segment;
2723
2724                /* fill in chain segments */
2725                while (sges_in_segment) {
2726                        _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2727                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2728                        sg_scmd = sg_next(sg_scmd);
2729                        sg_local += ioc->sge_size_ieee;
2730                        sges_left--;
2731                        sges_in_segment--;
2732                }
2733
2734                chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2735                if (!chain_req)
2736                        return -1;
2737                chain = chain_req->chain_buffer;
2738                chain_dma = chain_req->chain_buffer_dma;
2739        } while (1);
2740
2741
2742 fill_in_last_segment:
2743
2744        /* fill the last segment */
2745        while (sges_left > 0) {
2746                if (sges_left == 1)
2747                        _base_add_sg_single_ieee(sg_local,
2748                            simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2749                            sg_dma_address(sg_scmd));
2750                else
2751                        _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2752                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2753                sg_scmd = sg_next(sg_scmd);
2754                sg_local += ioc->sge_size_ieee;
2755                sges_left--;
2756        }
2757
2758        return 0;
2759}
2760
2761/**
2762 * _base_build_sg_ieee - build generic sg for IEEE format
2763 * @ioc: per adapter object
2764 * @psge: virtual address for SGE
2765 * @data_out_dma: physical address for WRITES
2766 * @data_out_sz: data xfer size for WRITES
2767 * @data_in_dma: physical address for READS
2768 * @data_in_sz: data xfer size for READS
2769 */
2770static void
2771_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2772        dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2773        size_t data_in_sz)
2774{
2775        u8 sgl_flags;
2776
2777        if (!data_out_sz && !data_in_sz) {
2778                _base_build_zero_len_sge_ieee(ioc, psge);
2779                return;
2780        }
2781
2782        if (data_out_sz && data_in_sz) {
2783                /* WRITE sgel first */
2784                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2785                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2786                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2787                    data_out_dma);
2788
2789                /* incr sgel */
2790                psge += ioc->sge_size_ieee;
2791
2792                /* READ sgel last */
2793                sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2794                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2795                    data_in_dma);
2796        } else if (data_out_sz) /* WRITE */ {
2797                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2798                    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2799                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2800                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2801                    data_out_dma);
2802        } else if (data_in_sz) /* READ */ {
2803                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2804                    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2805                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2806                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2807                    data_in_dma);
2808        }
2809}
2810
2811#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2812
2813/**
2814 * _base_config_dma_addressing - set dma addressing
2815 * @ioc: per adapter object
2816 * @pdev: PCI device struct
2817 *
2818 * Return: 0 for success, non-zero for failure.
2819 */
2820static int
2821_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2822{
2823        struct sysinfo s;
2824        int dma_mask;
2825
2826        if (ioc->is_mcpu_endpoint ||
2827            sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2828            dma_get_required_mask(&pdev->dev) <= 32)
2829                dma_mask = 32;
2830        /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2831        else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2832                dma_mask = 63;
2833        else
2834                dma_mask = 64;
2835
2836        if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
2837            dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
2838                return -ENODEV;
2839
2840        if (dma_mask > 32) {
2841                ioc->base_add_sg_single = &_base_add_sg_single_64;
2842                ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2843        } else {
2844                ioc->base_add_sg_single = &_base_add_sg_single_32;
2845                ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2846        }
2847
2848        si_meminfo(&s);
2849        ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2850                dma_mask, convert_to_kb(s.totalram));
2851
2852        return 0;
2853}
2854
2855/**
2856 * _base_check_enable_msix - checks MSIX capabable.
2857 * @ioc: per adapter object
2858 *
2859 * Check to see if card is capable of MSIX, and set number
2860 * of available msix vectors
2861 */
2862static int
2863_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2864{
2865        int base;
2866        u16 message_control;
2867
2868        /* Check whether controller SAS2008 B0 controller,
2869         * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2870         */
2871        if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2872            ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2873                return -EINVAL;
2874        }
2875
2876        base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2877        if (!base) {
2878                dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2879                return -EINVAL;
2880        }
2881
2882        /* get msix vector count */
2883        /* NUMA_IO not supported for older controllers */
2884        if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2885            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2886            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2887            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2888            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2889            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2890            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2891                ioc->msix_vector_count = 1;
2892        else {
2893                pci_read_config_word(ioc->pdev, base + 2, &message_control);
2894                ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2895        }
2896        dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2897                                  ioc->msix_vector_count));
2898        return 0;
2899}
2900
2901/**
2902 * _base_free_irq - free irq
2903 * @ioc: per adapter object
2904 *
2905 * Freeing respective reply_queue from the list.
2906 */
2907static void
2908_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2909{
2910        struct adapter_reply_queue *reply_q, *next;
2911
2912        if (list_empty(&ioc->reply_queue_list))
2913                return;
2914
2915        list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2916                list_del(&reply_q->list);
2917                if (ioc->smp_affinity_enable)
2918                        irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2919                            reply_q->msix_index), NULL);
2920                free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2921                         reply_q);
2922                kfree(reply_q);
2923        }
2924}
2925
2926/**
2927 * _base_request_irq - request irq
2928 * @ioc: per adapter object
2929 * @index: msix index into vector table
2930 *
2931 * Inserting respective reply_queue into the list.
2932 */
2933static int
2934_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2935{
2936        struct pci_dev *pdev = ioc->pdev;
2937        struct adapter_reply_queue *reply_q;
2938        int r;
2939
2940        reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2941        if (!reply_q) {
2942                ioc_err(ioc, "unable to allocate memory %zu!\n",
2943                        sizeof(struct adapter_reply_queue));
2944                return -ENOMEM;
2945        }
2946        reply_q->ioc = ioc;
2947        reply_q->msix_index = index;
2948
2949        atomic_set(&reply_q->busy, 0);
2950        if (ioc->msix_enable)
2951                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2952                    ioc->driver_name, ioc->id, index);
2953        else
2954                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2955                    ioc->driver_name, ioc->id);
2956        r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2957                        IRQF_SHARED, reply_q->name, reply_q);
2958        if (r) {
2959                pr_err("%s: unable to allocate interrupt %d!\n",
2960                       reply_q->name, pci_irq_vector(pdev, index));
2961                kfree(reply_q);
2962                return -EBUSY;
2963        }
2964
2965        INIT_LIST_HEAD(&reply_q->list);
2966        list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2967        return 0;
2968}
2969
2970/**
2971 * _base_assign_reply_queues - assigning msix index for each cpu
2972 * @ioc: per adapter object
2973 *
2974 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
2975 *
2976 * It would nice if we could call irq_set_affinity, however it is not
2977 * an exported symbol
2978 */
2979static void
2980_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2981{
2982        unsigned int cpu, nr_cpus, nr_msix, index = 0;
2983        struct adapter_reply_queue *reply_q;
2984        int local_numa_node;
2985
2986        if (!_base_is_controller_msix_enabled(ioc))
2987                return;
2988
2989        if (ioc->msix_load_balance)
2990                return;
2991
2992        memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2993
2994        nr_cpus = num_online_cpus();
2995        nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2996                                               ioc->facts.MaxMSIxVectors);
2997        if (!nr_msix)
2998                return;
2999
3000        if (ioc->smp_affinity_enable) {
3001
3002                /*
3003                 * set irq affinity to local numa node for those irqs
3004                 * corresponding to high iops queues.
3005                 */
3006                if (ioc->high_iops_queues) {
3007                        local_numa_node = dev_to_node(&ioc->pdev->dev);
3008                        for (index = 0; index < ioc->high_iops_queues;
3009                            index++) {
3010                                irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3011                                    index), cpumask_of_node(local_numa_node));
3012                        }
3013                }
3014
3015                list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3016                        const cpumask_t *mask;
3017
3018                        if (reply_q->msix_index < ioc->high_iops_queues)
3019                                continue;
3020
3021                        mask = pci_irq_get_affinity(ioc->pdev,
3022                            reply_q->msix_index);
3023                        if (!mask) {
3024                                ioc_warn(ioc, "no affinity for msi %x\n",
3025                                         reply_q->msix_index);
3026                                goto fall_back;
3027                        }
3028
3029                        for_each_cpu_and(cpu, mask, cpu_online_mask) {
3030                                if (cpu >= ioc->cpu_msix_table_sz)
3031                                        break;
3032                                ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3033                        }
3034                }
3035                return;
3036        }
3037
3038fall_back:
3039        cpu = cpumask_first(cpu_online_mask);
3040        nr_msix -= ioc->high_iops_queues;
3041        index = 0;
3042
3043        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3044                unsigned int i, group = nr_cpus / nr_msix;
3045
3046                if (reply_q->msix_index < ioc->high_iops_queues)
3047                        continue;
3048
3049                if (cpu >= nr_cpus)
3050                        break;
3051
3052                if (index < nr_cpus % nr_msix)
3053                        group++;
3054
3055                for (i = 0 ; i < group ; i++) {
3056                        ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3057                        cpu = cpumask_next(cpu, cpu_online_mask);
3058                }
3059                index++;
3060        }
3061}
3062
3063/**
3064 * _base_check_and_enable_high_iops_queues - enable high iops mode
3065 * @ioc: per adapter object
3066 * @hba_msix_vector_count: msix vectors supported by HBA
3067 *
3068 * Enable high iops queues only if
3069 *  - HBA is a SEA/AERO controller and
3070 *  - MSI-Xs vector supported by the HBA is 128 and
3071 *  - total CPU count in the system >=16 and
3072 *  - loaded driver with default max_msix_vectors module parameter and
3073 *  - system booted in non kdump mode
3074 *
3075 * returns nothing.
3076 */
3077static void
3078_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3079                int hba_msix_vector_count)
3080{
3081        u16 lnksta, speed;
3082
3083        if (perf_mode == MPT_PERF_MODE_IOPS ||
3084            perf_mode == MPT_PERF_MODE_LATENCY) {
3085                ioc->high_iops_queues = 0;
3086                return;
3087        }
3088
3089        if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3090
3091                pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3092                speed = lnksta & PCI_EXP_LNKSTA_CLS;
3093
3094                if (speed < 0x4) {
3095                        ioc->high_iops_queues = 0;
3096                        return;
3097                }
3098        }
3099
3100        if (!reset_devices && ioc->is_aero_ioc &&
3101            hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3102            num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3103            max_msix_vectors == -1)
3104                ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3105        else
3106                ioc->high_iops_queues = 0;
3107}
3108
3109/**
3110 * _base_disable_msix - disables msix
3111 * @ioc: per adapter object
3112 *
3113 */
3114static void
3115_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3116{
3117        if (!ioc->msix_enable)
3118                return;
3119        pci_free_irq_vectors(ioc->pdev);
3120        ioc->msix_enable = 0;
3121}
3122
3123/**
3124 * _base_alloc_irq_vectors - allocate msix vectors
3125 * @ioc: per adapter object
3126 *
3127 */
3128static int
3129_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3130{
3131        int i, irq_flags = PCI_IRQ_MSIX;
3132        struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3133        struct irq_affinity *descp = &desc;
3134
3135        if (ioc->smp_affinity_enable)
3136                irq_flags |= PCI_IRQ_AFFINITY;
3137        else
3138                descp = NULL;
3139
3140        ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3141            ioc->reply_queue_count);
3142
3143        i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3144            ioc->high_iops_queues,
3145            ioc->reply_queue_count, irq_flags, descp);
3146
3147        return i;
3148}
3149
3150/**
3151 * _base_enable_msix - enables msix, failback to io_apic
3152 * @ioc: per adapter object
3153 *
3154 */
3155static int
3156_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3157{
3158        int r;
3159        int i, local_max_msix_vectors;
3160        u8 try_msix = 0;
3161
3162        ioc->msix_load_balance = false;
3163
3164        if (msix_disable == -1 || msix_disable == 0)
3165                try_msix = 1;
3166
3167        if (!try_msix)
3168                goto try_ioapic;
3169
3170        if (_base_check_enable_msix(ioc) != 0)
3171                goto try_ioapic;
3172
3173        ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3174        pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3175                ioc->cpu_count, max_msix_vectors);
3176        if (ioc->is_aero_ioc)
3177                _base_check_and_enable_high_iops_queues(ioc,
3178                        ioc->msix_vector_count);
3179        ioc->reply_queue_count =
3180                min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3181                ioc->msix_vector_count);
3182
3183        if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3184                local_max_msix_vectors = (reset_devices) ? 1 : 8;
3185        else
3186                local_max_msix_vectors = max_msix_vectors;
3187
3188        if (local_max_msix_vectors > 0)
3189                ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3190                        ioc->reply_queue_count);
3191        else if (local_max_msix_vectors == 0)
3192                goto try_ioapic;
3193
3194        /*
3195         * Enable msix_load_balance only if combined reply queue mode is
3196         * disabled on SAS3 & above generation HBA devices.
3197         */
3198        if (!ioc->combined_reply_queue &&
3199            ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3200                ioc_info(ioc,
3201                    "combined ReplyQueue is off, Enabling msix load balance\n");
3202                ioc->msix_load_balance = true;
3203        }
3204
3205        /*
3206         * smp affinity setting is not need when msix load balance
3207         * is enabled.
3208         */
3209        if (ioc->msix_load_balance)
3210                ioc->smp_affinity_enable = 0;
3211
3212        r = _base_alloc_irq_vectors(ioc);
3213        if (r < 0) {
3214                ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3215                goto try_ioapic;
3216        }
3217
3218        ioc->msix_enable = 1;
3219        ioc->reply_queue_count = r;
3220        for (i = 0; i < ioc->reply_queue_count; i++) {
3221                r = _base_request_irq(ioc, i);
3222                if (r) {
3223                        _base_free_irq(ioc);
3224                        _base_disable_msix(ioc);
3225                        goto try_ioapic;
3226                }
3227        }
3228
3229        ioc_info(ioc, "High IOPs queues : %s\n",
3230                        ioc->high_iops_queues ? "enabled" : "disabled");
3231
3232        return 0;
3233
3234/* failback to io_apic interrupt routing */
3235 try_ioapic:
3236        ioc->high_iops_queues = 0;
3237        ioc_info(ioc, "High IOPs queues : disabled\n");
3238        ioc->reply_queue_count = 1;
3239        r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3240        if (r < 0) {
3241                dfailprintk(ioc,
3242                            ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3243                                     r));
3244        } else
3245                r = _base_request_irq(ioc, 0);
3246
3247        return r;
3248}
3249
3250/**
3251 * mpt3sas_base_unmap_resources - free controller resources
3252 * @ioc: per adapter object
3253 */
3254static void
3255mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3256{
3257        struct pci_dev *pdev = ioc->pdev;
3258
3259        dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3260
3261        _base_free_irq(ioc);
3262        _base_disable_msix(ioc);
3263
3264        kfree(ioc->replyPostRegisterIndex);
3265        ioc->replyPostRegisterIndex = NULL;
3266
3267
3268        if (ioc->chip_phys) {
3269                iounmap(ioc->chip);
3270                ioc->chip_phys = 0;
3271        }
3272
3273        if (pci_is_enabled(pdev)) {
3274                pci_release_selected_regions(ioc->pdev, ioc->bars);
3275                pci_disable_pcie_error_reporting(pdev);
3276                pci_disable_device(pdev);
3277        }
3278}
3279
3280static int
3281_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3282
3283/**
3284 * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
3285 *     and if it is in fault state then issue diag reset.
3286 * @ioc: per adapter object
3287 *
3288 * Returns: 0 for success, non-zero for failure.
3289 */
3290static int
3291_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3292{
3293        u32 ioc_state;
3294        int rc = -EFAULT;
3295
3296        dinitprintk(ioc, pr_info("%s\n", __func__));
3297        if (ioc->pci_error_recovery)
3298                return 0;
3299        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3300        dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3301
3302        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3303                mpt3sas_print_fault_code(ioc, ioc_state &
3304                    MPI2_DOORBELL_DATA_MASK);
3305                rc = _base_diag_reset(ioc);
3306        } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3307            MPI2_IOC_STATE_COREDUMP) {
3308                mpt3sas_print_coredump_info(ioc, ioc_state &
3309                     MPI2_DOORBELL_DATA_MASK);
3310                mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3311                rc = _base_diag_reset(ioc);
3312        }
3313
3314        return rc;
3315}
3316
3317/**
3318 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3319 * @ioc: per adapter object
3320 *
3321 * Return: 0 for success, non-zero for failure.
3322 */
3323int
3324mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3325{
3326        struct pci_dev *pdev = ioc->pdev;
3327        u32 memap_sz;
3328        u32 pio_sz;
3329        int i, r = 0, rc;
3330        u64 pio_chip = 0;
3331        phys_addr_t chip_phys = 0;
3332        struct adapter_reply_queue *reply_q;
3333
3334        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3335
3336        ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3337        if (pci_enable_device_mem(pdev)) {
3338                ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3339                ioc->bars = 0;
3340                return -ENODEV;
3341        }
3342
3343
3344        if (pci_request_selected_regions(pdev, ioc->bars,
3345            ioc->driver_name)) {
3346                ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3347                ioc->bars = 0;
3348                r = -ENODEV;
3349                goto out_fail;
3350        }
3351
3352/* AER (Advanced Error Reporting) hooks */
3353        pci_enable_pcie_error_reporting(pdev);
3354
3355        pci_set_master(pdev);
3356
3357
3358        if (_base_config_dma_addressing(ioc, pdev) != 0) {
3359                ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3360                r = -ENODEV;
3361                goto out_fail;
3362        }
3363
3364        for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3365             (!memap_sz || !pio_sz); i++) {
3366                if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3367                        if (pio_sz)
3368                                continue;
3369                        pio_chip = (u64)pci_resource_start(pdev, i);
3370                        pio_sz = pci_resource_len(pdev, i);
3371                } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3372                        if (memap_sz)
3373                                continue;
3374                        ioc->chip_phys = pci_resource_start(pdev, i);
3375                        chip_phys = ioc->chip_phys;
3376                        memap_sz = pci_resource_len(pdev, i);
3377                        ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3378                }
3379        }
3380
3381        if (ioc->chip == NULL) {
3382                ioc_err(ioc,
3383                    "unable to map adapter memory! or resource not found\n");
3384                r = -EINVAL;
3385                goto out_fail;
3386        }
3387
3388        mpt3sas_base_mask_interrupts(ioc);
3389
3390        r = _base_get_ioc_facts(ioc);
3391        if (r) {
3392                rc = _base_check_for_fault_and_issue_reset(ioc);
3393                if (rc || (_base_get_ioc_facts(ioc)))
3394                        goto out_fail;
3395        }
3396
3397        if (!ioc->rdpq_array_enable_assigned) {
3398                ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3399                ioc->rdpq_array_enable_assigned = 1;
3400        }
3401
3402        r = _base_enable_msix(ioc);
3403        if (r)
3404                goto out_fail;
3405
3406        if (!ioc->is_driver_loading)
3407                _base_init_irqpolls(ioc);
3408        /* Use the Combined reply queue feature only for SAS3 C0 & higher
3409         * revision HBAs and also only when reply queue count is greater than 8
3410         */
3411        if (ioc->combined_reply_queue) {
3412                /* Determine the Supplemental Reply Post Host Index Registers
3413                 * Addresse. Supplemental Reply Post Host Index Registers
3414                 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3415                 * each register is at offset bytes of
3416                 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3417                 */
3418                ioc->replyPostRegisterIndex = kcalloc(
3419                     ioc->combined_reply_index_count,
3420                     sizeof(resource_size_t *), GFP_KERNEL);
3421                if (!ioc->replyPostRegisterIndex) {
3422                        ioc_err(ioc,
3423                            "allocation for replyPostRegisterIndex failed!\n");
3424                        r = -ENOMEM;
3425                        goto out_fail;
3426                }
3427
3428                for (i = 0; i < ioc->combined_reply_index_count; i++) {
3429                        ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3430                             ((u8 __force *)&ioc->chip->Doorbell +
3431                             MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3432                             (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3433                }
3434        }
3435
3436        if (ioc->is_warpdrive) {
3437                ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3438                    &ioc->chip->ReplyPostHostIndex;
3439
3440                for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3441                        ioc->reply_post_host_index[i] =
3442                        (resource_size_t __iomem *)
3443                        ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3444                        * 4)));
3445        }
3446
3447        list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3448                pr_info("%s: %s enabled: IRQ %d\n",
3449                        reply_q->name,
3450                        ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3451                        pci_irq_vector(ioc->pdev, reply_q->msix_index));
3452
3453        ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3454                 &chip_phys, ioc->chip, memap_sz);
3455        ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3456                 (unsigned long long)pio_chip, pio_sz);
3457
3458        /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3459        pci_save_state(pdev);
3460        return 0;
3461
3462 out_fail:
3463        mpt3sas_base_unmap_resources(ioc);
3464        return r;
3465}
3466
3467/**
3468 * mpt3sas_base_get_msg_frame - obtain request mf pointer
3469 * @ioc: per adapter object
3470 * @smid: system request message index(smid zero is invalid)
3471 *
3472 * Return: virt pointer to message frame.
3473 */
3474void *
3475mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3476{
3477        return (void *)(ioc->request + (smid * ioc->request_sz));
3478}
3479
3480/**
3481 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3482 * @ioc: per adapter object
3483 * @smid: system request message index
3484 *
3485 * Return: virt pointer to sense buffer.
3486 */
3487void *
3488mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3489{
3490        return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3491}
3492
3493/**
3494 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3495 * @ioc: per adapter object
3496 * @smid: system request message index
3497 *
3498 * Return: phys pointer to the low 32bit address of the sense buffer.
3499 */
3500__le32
3501mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3502{
3503        return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3504            SCSI_SENSE_BUFFERSIZE));
3505}
3506
3507/**
3508 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3509 * @ioc: per adapter object
3510 * @smid: system request message index
3511 *
3512 * Return: virt pointer to a PCIe SGL.
3513 */
3514void *
3515mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3516{
3517        return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3518}
3519
3520/**
3521 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3522 * @ioc: per adapter object
3523 * @smid: system request message index
3524 *
3525 * Return: phys pointer to the address of the PCIe buffer.
3526 */
3527dma_addr_t
3528mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3529{
3530        return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3531}
3532
3533/**
3534 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3535 * @ioc: per adapter object
3536 * @phys_addr: lower 32 physical addr of the reply
3537 *
3538 * Converts 32bit lower physical addr into a virt address.
3539 */
3540void *
3541mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3542{
3543        if (!phys_addr)
3544                return NULL;
3545        return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3546}
3547
3548/**
3549 * _base_get_msix_index - get the msix index
3550 * @ioc: per adapter object
3551 * @scmd: scsi_cmnd object
3552 *
3553 * returns msix index of general reply queues,
3554 * i.e. reply queue on which IO request's reply
3555 * should be posted by the HBA firmware.
3556 */
3557static inline u8
3558_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3559        struct scsi_cmnd *scmd)
3560{
3561        /* Enables reply_queue load balancing */
3562        if (ioc->msix_load_balance)
3563                return ioc->reply_queue_count ?
3564                    base_mod64(atomic64_add_return(1,
3565                    &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3566
3567        return ioc->cpu_msix_table[raw_smp_processor_id()];
3568}
3569
3570/**
3571 * _base_sdev_nr_inflight_request -get number of inflight requests
3572 *                                 of a request queue.
3573 * @q: request_queue object
3574 *
3575 * returns number of inflight request of a request queue.
3576 */
3577inline unsigned long
3578_base_sdev_nr_inflight_request(struct request_queue *q)
3579{
3580        struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
3581
3582        return atomic_read(&hctx->nr_active);
3583}
3584
3585
3586/**
3587 * _base_get_high_iops_msix_index - get the msix index of
3588 *                              high iops queues
3589 * @ioc: per adapter object
3590 * @scmd: scsi_cmnd object
3591 *
3592 * Returns: msix index of high iops reply queues.
3593 * i.e. high iops reply queue on which IO request's
3594 * reply should be posted by the HBA firmware.
3595 */
3596static inline u8
3597_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3598        struct scsi_cmnd *scmd)
3599{
3600        /**
3601         * Round robin the IO interrupts among the high iops
3602         * reply queues in terms of batch count 16 when outstanding
3603         * IOs on the target device is >=8.
3604         */
3605        if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
3606            MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3607                return base_mod64((
3608                    atomic64_add_return(1, &ioc->high_iops_outstanding) /
3609                    MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3610                    MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3611
3612        return _base_get_msix_index(ioc, scmd);
3613}
3614
3615/**
3616 * mpt3sas_base_get_smid - obtain a free smid from internal queue
3617 * @ioc: per adapter object
3618 * @cb_idx: callback index
3619 *
3620 * Return: smid (zero is invalid)
3621 */
3622u16
3623mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3624{
3625        unsigned long flags;
3626        struct request_tracker *request;
3627        u16 smid;
3628
3629        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3630        if (list_empty(&ioc->internal_free_list)) {
3631                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3632                ioc_err(ioc, "%s: smid not available\n", __func__);
3633                return 0;
3634        }
3635
3636        request = list_entry(ioc->internal_free_list.next,
3637            struct request_tracker, tracker_list);
3638        request->cb_idx = cb_idx;
3639        smid = request->smid;
3640        list_del(&request->tracker_list);
3641        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3642        return smid;
3643}
3644
3645/**
3646 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3647 * @ioc: per adapter object
3648 * @cb_idx: callback index
3649 * @scmd: pointer to scsi command object
3650 *
3651 * Return: smid (zero is invalid)
3652 */
3653u16
3654mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3655        struct scsi_cmnd *scmd)
3656{
3657        struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3658        unsigned int tag = scmd->request->tag;
3659        u16 smid;
3660
3661        smid = tag + 1;
3662        request->cb_idx = cb_idx;
3663        request->smid = smid;
3664        request->scmd = scmd;
3665        INIT_LIST_HEAD(&request->chain_list);
3666        return smid;
3667}
3668
3669/**
3670 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3671 * @ioc: per adapter object
3672 * @cb_idx: callback index
3673 *
3674 * Return: smid (zero is invalid)
3675 */
3676u16
3677mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3678{
3679        unsigned long flags;
3680        struct request_tracker *request;
3681        u16 smid;
3682
3683        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3684        if (list_empty(&ioc->hpr_free_list)) {
3685                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3686                return 0;
3687        }
3688
3689        request = list_entry(ioc->hpr_free_list.next,
3690            struct request_tracker, tracker_list);
3691        request->cb_idx = cb_idx;
3692        smid = request->smid;
3693        list_del(&request->tracker_list);
3694        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3695        return smid;
3696}
3697
3698static void
3699_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3700{
3701        /*
3702         * See _wait_for_commands_to_complete() call with regards to this code.
3703         */
3704        if (ioc->shost_recovery && ioc->pending_io_count) {
3705                ioc->pending_io_count = scsi_host_busy(ioc->shost);
3706                if (ioc->pending_io_count == 0)
3707                        wake_up(&ioc->reset_wq);
3708        }
3709}
3710
3711void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3712                           struct scsiio_tracker *st)
3713{
3714        if (WARN_ON(st->smid == 0))
3715                return;
3716        st->cb_idx = 0xFF;
3717        st->direct_io = 0;
3718        st->scmd = NULL;
3719        atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3720        st->smid = 0;
3721}
3722
3723/**
3724 * mpt3sas_base_free_smid - put smid back on free_list
3725 * @ioc: per adapter object
3726 * @smid: system request message index
3727 */
3728void
3729mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3730{
3731        unsigned long flags;
3732        int i;
3733
3734        if (smid < ioc->hi_priority_smid) {
3735                struct scsiio_tracker *st;
3736                void *request;
3737
3738                st = _get_st_from_smid(ioc, smid);
3739                if (!st) {
3740                        _base_recovery_check(ioc);
3741                        return;
3742                }
3743
3744                /* Clear MPI request frame */
3745                request = mpt3sas_base_get_msg_frame(ioc, smid);
3746                memset(request, 0, ioc->request_sz);
3747
3748                mpt3sas_base_clear_st(ioc, st);
3749                _base_recovery_check(ioc);
3750                return;
3751        }
3752
3753        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3754        if (smid < ioc->internal_smid) {
3755                /* hi-priority */
3756                i = smid - ioc->hi_priority_smid;
3757                ioc->hpr_lookup[i].cb_idx = 0xFF;
3758                list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3759        } else if (smid <= ioc->hba_queue_depth) {
3760                /* internal queue */
3761                i = smid - ioc->internal_smid;
3762                ioc->internal_lookup[i].cb_idx = 0xFF;
3763                list_add(&ioc->internal_lookup[i].tracker_list,
3764                    &ioc->internal_free_list);
3765        }
3766        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3767}
3768
3769/**
3770 * _base_mpi_ep_writeq - 32 bit write to MMIO
3771 * @b: data payload
3772 * @addr: address in MMIO space
3773 * @writeq_lock: spin lock
3774 *
3775 * This special handling for MPI EP to take care of 32 bit
3776 * environment where its not quarenteed to send the entire word
3777 * in one transfer.
3778 */
3779static inline void
3780_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3781                                        spinlock_t *writeq_lock)
3782{
3783        unsigned long flags;
3784
3785        spin_lock_irqsave(writeq_lock, flags);
3786        __raw_writel((u32)(b), addr);
3787        __raw_writel((u32)(b >> 32), (addr + 4));
3788        spin_unlock_irqrestore(writeq_lock, flags);
3789}
3790
3791/**
3792 * _base_writeq - 64 bit write to MMIO
3793 * @b: data payload
3794 * @addr: address in MMIO space
3795 * @writeq_lock: spin lock
3796 *
3797 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3798 * care of 32 bit environment where its not quarenteed to send the entire word
3799 * in one transfer.
3800 */
3801#if defined(writeq) && defined(CONFIG_64BIT)
3802static inline void
3803_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3804{
3805        wmb();
3806        __raw_writeq(b, addr);
3807        barrier();
3808}
3809#else
3810static inline void
3811_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3812{
3813        _base_mpi_ep_writeq(b, addr, writeq_lock);
3814}
3815#endif
3816
3817/**
3818 * _base_set_and_get_msix_index - get the msix index and assign to msix_io
3819 *                                variable of scsi tracker
3820 * @ioc: per adapter object
3821 * @smid: system request message index
3822 *
3823 * returns msix index.
3824 */
3825static u8
3826_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3827{
3828        struct scsiio_tracker *st = NULL;
3829
3830        if (smid < ioc->hi_priority_smid)
3831                st = _get_st_from_smid(ioc, smid);
3832
3833        if (st == NULL)
3834                return  _base_get_msix_index(ioc, NULL);
3835
3836        st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3837        return st->msix_io;
3838}
3839
3840/**
3841 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3842 * @ioc: per adapter object
3843 * @smid: system request message index
3844 * @handle: device handle
3845 */
3846static void
3847_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3848        u16 smid, u16 handle)
3849{
3850        Mpi2RequestDescriptorUnion_t descriptor;
3851        u64 *request = (u64 *)&descriptor;
3852        void *mpi_req_iomem;
3853        __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3854
3855        _clone_sg_entries(ioc, (void *) mfp, smid);
3856        mpi_req_iomem = (void __force *)ioc->chip +
3857                        MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3858        _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3859                                        ioc->request_sz);
3860        descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3861        descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3862        descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3863        descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3864        descriptor.SCSIIO.LMID = 0;
3865        _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3866            &ioc->scsi_lookup_lock);
3867}
3868
3869/**
3870 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
3871 * @ioc: per adapter object
3872 * @smid: system request message index
3873 * @handle: device handle
3874 */
3875static void
3876_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3877{
3878        Mpi2RequestDescriptorUnion_t descriptor;
3879        u64 *request = (u64 *)&descriptor;
3880
3881
3882        descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3883        descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3884        descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3885        descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3886        descriptor.SCSIIO.LMID = 0;
3887        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3888            &ioc->scsi_lookup_lock);
3889}
3890
3891/**
3892 * _base_put_smid_fast_path - send fast path request to firmware
3893 * @ioc: per adapter object
3894 * @smid: system request message index
3895 * @handle: device handle
3896 */
3897static void
3898_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3899        u16 handle)
3900{
3901        Mpi2RequestDescriptorUnion_t descriptor;
3902        u64 *request = (u64 *)&descriptor;
3903
3904        descriptor.SCSIIO.RequestFlags =
3905            MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3906        descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3907        descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3908        descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3909        descriptor.SCSIIO.LMID = 0;
3910        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3911            &ioc->scsi_lookup_lock);
3912}
3913
3914/**
3915 * _base_put_smid_hi_priority - send Task Management request to firmware
3916 * @ioc: per adapter object
3917 * @smid: system request message index
3918 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
3919 */
3920static void
3921_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3922        u16 msix_task)
3923{
3924        Mpi2RequestDescriptorUnion_t descriptor;
3925        void *mpi_req_iomem;
3926        u64 *request;
3927
3928        if (ioc->is_mcpu_endpoint) {
3929                __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3930
3931                /* TBD 256 is offset within sys register. */
3932                mpi_req_iomem = (void __force *)ioc->chip
3933                                        + MPI_FRAME_START_OFFSET
3934                                        + (smid * ioc->request_sz);
3935                _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3936                                                        ioc->request_sz);
3937        }
3938
3939        request = (u64 *)&descriptor;
3940
3941        descriptor.HighPriority.RequestFlags =
3942            MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3943        descriptor.HighPriority.MSIxIndex =  msix_task;
3944        descriptor.HighPriority.SMID = cpu_to_le16(smid);
3945        descriptor.HighPriority.LMID = 0;
3946        descriptor.HighPriority.Reserved1 = 0;
3947        if (ioc->is_mcpu_endpoint)
3948                _base_mpi_ep_writeq(*request,
3949                                &ioc->chip->RequestDescriptorPostLow,
3950                                &ioc->scsi_lookup_lock);
3951        else
3952                _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3953                    &ioc->scsi_lookup_lock);
3954}
3955
3956/**
3957 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
3958 *  firmware
3959 * @ioc: per adapter object
3960 * @smid: system request message index
3961 */
3962void
3963mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3964{
3965        Mpi2RequestDescriptorUnion_t descriptor;
3966        u64 *request = (u64 *)&descriptor;
3967
3968        descriptor.Default.RequestFlags =
3969                MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3970        descriptor.Default.MSIxIndex =  _base_set_and_get_msix_index(ioc, smid);
3971        descriptor.Default.SMID = cpu_to_le16(smid);
3972        descriptor.Default.LMID = 0;
3973        descriptor.Default.DescriptorTypeDependent = 0;
3974        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3975            &ioc->scsi_lookup_lock);
3976}
3977
3978/**
3979 * _base_put_smid_default - Default, primarily used for config pages
3980 * @ioc: per adapter object
3981 * @smid: system request message index
3982 */
3983static void
3984_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3985{
3986        Mpi2RequestDescriptorUnion_t descriptor;
3987        void *mpi_req_iomem;
3988        u64 *request;
3989
3990        if (ioc->is_mcpu_endpoint) {
3991                __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3992
3993                _clone_sg_entries(ioc, (void *) mfp, smid);
3994                /* TBD 256 is offset within sys register */
3995                mpi_req_iomem = (void __force *)ioc->chip +
3996                        MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3997                _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3998                                                        ioc->request_sz);
3999        }
4000        request = (u64 *)&descriptor;
4001        descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4002        descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4003        descriptor.Default.SMID = cpu_to_le16(smid);
4004        descriptor.Default.LMID = 0;
4005        descriptor.Default.DescriptorTypeDependent = 0;
4006        if (ioc->is_mcpu_endpoint)
4007                _base_mpi_ep_writeq(*request,
4008                                &ioc->chip->RequestDescriptorPostLow,
4009                                &ioc->scsi_lookup_lock);
4010        else
4011                _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4012                                &ioc->scsi_lookup_lock);
4013}
4014
4015/**
4016 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4017 *   Atomic Request Descriptor
4018 * @ioc: per adapter object
4019 * @smid: system request message index
4020 * @handle: device handle, unused in this function, for function type match
4021 *
4022 * Return nothing.
4023 */
4024static void
4025_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4026        u16 handle)
4027{
4028        Mpi26AtomicRequestDescriptor_t descriptor;
4029        u32 *request = (u32 *)&descriptor;
4030
4031        descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4032        descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4033        descriptor.SMID = cpu_to_le16(smid);
4034
4035        writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4036}
4037
4038/**
4039 * _base_put_smid_fast_path_atomic - send fast path request to firmware
4040 * using Atomic Request Descriptor
4041 * @ioc: per adapter object
4042 * @smid: system request message index
4043 * @handle: device handle, unused in this function, for function type match
4044 * Return nothing
4045 */
4046static void
4047_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4048        u16 handle)
4049{
4050        Mpi26AtomicRequestDescriptor_t descriptor;
4051        u32 *request = (u32 *)&descriptor;
4052
4053        descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4054        descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4055        descriptor.SMID = cpu_to_le16(smid);
4056
4057        writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4058}
4059
4060/**
4061 * _base_put_smid_hi_priority_atomic - send Task Management request to
4062 * firmware using Atomic Request Descriptor
4063 * @ioc: per adapter object
4064 * @smid: system request message index
4065 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
4066 *
4067 * Return nothing.
4068 */
4069static void
4070_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4071        u16 msix_task)
4072{
4073        Mpi26AtomicRequestDescriptor_t descriptor;
4074        u32 *request = (u32 *)&descriptor;
4075
4076        descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4077        descriptor.MSIxIndex = msix_task;
4078        descriptor.SMID = cpu_to_le16(smid);
4079
4080        writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4081}
4082
4083/**
4084 * _base_put_smid_default - Default, primarily used for config pages
4085 * use Atomic Request Descriptor
4086 * @ioc: per adapter object
4087 * @smid: system request message index
4088 *
4089 * Return nothing.
4090 */
4091static void
4092_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4093{
4094        Mpi26AtomicRequestDescriptor_t descriptor;
4095        u32 *request = (u32 *)&descriptor;
4096
4097        descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4098        descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4099        descriptor.SMID = cpu_to_le16(smid);
4100
4101        writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4102}
4103
4104/**
4105 * _base_display_OEMs_branding - Display branding string
4106 * @ioc: per adapter object
4107 */
4108static void
4109_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4110{
4111        if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4112                return;
4113
4114        switch (ioc->pdev->subsystem_vendor) {
4115        case PCI_VENDOR_ID_INTEL:
4116                switch (ioc->pdev->device) {
4117                case MPI2_MFGPAGE_DEVID_SAS2008:
4118                        switch (ioc->pdev->subsystem_device) {
4119                        case MPT2SAS_INTEL_RMS2LL080_SSDID:
4120                                ioc_info(ioc, "%s\n",
4121                                         MPT2SAS_INTEL_RMS2LL080_BRANDING);
4122                                break;
4123                        case MPT2SAS_INTEL_RMS2LL040_SSDID:
4124                                ioc_info(ioc, "%s\n",
4125                                         MPT2SAS_INTEL_RMS2LL040_BRANDING);
4126                                break;
4127                        case MPT2SAS_INTEL_SSD910_SSDID:
4128                                ioc_info(ioc, "%s\n",
4129                                         MPT2SAS_INTEL_SSD910_BRANDING);
4130                                break;
4131                        default:
4132                                ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4133                                         ioc->pdev->subsystem_device);
4134                                break;
4135                        }
4136                        break;
4137                case MPI2_MFGPAGE_DEVID_SAS2308_2:
4138                        switch (ioc->pdev->subsystem_device) {
4139                        case MPT2SAS_INTEL_RS25GB008_SSDID:
4140                                ioc_info(ioc, "%s\n",
4141                                         MPT2SAS_INTEL_RS25GB008_BRANDING);
4142                                break;
4143                        case MPT2SAS_INTEL_RMS25JB080_SSDID:
4144                                ioc_info(ioc, "%s\n",
4145                                         MPT2SAS_INTEL_RMS25JB080_BRANDING);
4146                                break;
4147                        case MPT2SAS_INTEL_RMS25JB040_SSDID:
4148                                ioc_info(ioc, "%s\n",
4149                                         MPT2SAS_INTEL_RMS25JB040_BRANDING);
4150                                break;
4151                        case MPT2SAS_INTEL_RMS25KB080_SSDID:
4152                                ioc_info(ioc, "%s\n",
4153                                         MPT2SAS_INTEL_RMS25KB080_BRANDING);
4154                                break;
4155                        case MPT2SAS_INTEL_RMS25KB040_SSDID:
4156                                ioc_info(ioc, "%s\n",
4157                                         MPT2SAS_INTEL_RMS25KB040_BRANDING);
4158                                break;
4159                        case MPT2SAS_INTEL_RMS25LB040_SSDID:
4160                                ioc_info(ioc, "%s\n",
4161                                         MPT2SAS_INTEL_RMS25LB040_BRANDING);
4162                                break;
4163                        case MPT2SAS_INTEL_RMS25LB080_SSDID:
4164                                ioc_info(ioc, "%s\n",
4165                                         MPT2SAS_INTEL_RMS25LB080_BRANDING);
4166                                break;
4167                        default:
4168                                ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4169                                         ioc->pdev->subsystem_device);
4170                                break;
4171                        }
4172                        break;
4173                case MPI25_MFGPAGE_DEVID_SAS3008:
4174                        switch (ioc->pdev->subsystem_device) {
4175                        case MPT3SAS_INTEL_RMS3JC080_SSDID:
4176                                ioc_info(ioc, "%s\n",
4177                                         MPT3SAS_INTEL_RMS3JC080_BRANDING);
4178                                break;
4179
4180                        case MPT3SAS_INTEL_RS3GC008_SSDID:
4181                                ioc_info(ioc, "%s\n",
4182                                         MPT3SAS_INTEL_RS3GC008_BRANDING);
4183                                break;
4184                        case MPT3SAS_INTEL_RS3FC044_SSDID:
4185                                ioc_info(ioc, "%s\n",
4186                                         MPT3SAS_INTEL_RS3FC044_BRANDING);
4187                                break;
4188                        case MPT3SAS_INTEL_RS3UC080_SSDID:
4189                                ioc_info(ioc, "%s\n",
4190                                         MPT3SAS_INTEL_RS3UC080_BRANDING);
4191                                break;
4192                        default:
4193                                ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4194                                         ioc->pdev->subsystem_device);
4195                                break;
4196                        }
4197                        break;
4198                default:
4199                        ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4200                                 ioc->pdev->subsystem_device);
4201                        break;
4202                }
4203                break;
4204        case PCI_VENDOR_ID_DELL:
4205                switch (ioc->pdev->device) {
4206                case MPI2_MFGPAGE_DEVID_SAS2008:
4207                        switch (ioc->pdev->subsystem_device) {
4208                        case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4209                                ioc_info(ioc, "%s\n",
4210                                         MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4211                                break;
4212                        case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4213                                ioc_info(ioc, "%s\n",
4214                                         MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4215                                break;
4216                        case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4217                                ioc_info(ioc, "%s\n",
4218                                         MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4219                                break;
4220                        case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4221                                ioc_info(ioc, "%s\n",
4222                                         MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4223                                break;
4224                        case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4225                                ioc_info(ioc, "%s\n",
4226                                         MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4227                                break;
4228                        case MPT2SAS_DELL_PERC_H200_SSDID:
4229                                ioc_info(ioc, "%s\n",
4230                                         MPT2SAS_DELL_PERC_H200_BRANDING);
4231                                break;
4232                        case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4233                                ioc_info(ioc, "%s\n",
4234                                         MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4235                                break;
4236                        default:
4237                                ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4238                                         ioc->pdev->subsystem_device);
4239                                break;
4240                        }
4241                        break;
4242                case MPI25_MFGPAGE_DEVID_SAS3008:
4243                        switch (ioc->pdev->subsystem_device) {
4244                        case MPT3SAS_DELL_12G_HBA_SSDID:
4245                                ioc_info(ioc, "%s\n",
4246                                         MPT3SAS_DELL_12G_HBA_BRANDING);
4247                                break;
4248                        default:
4249                                ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4250                                         ioc->pdev->subsystem_device);
4251                                break;
4252                        }
4253                        break;
4254                default:
4255                        ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4256                                 ioc->pdev->subsystem_device);
4257                        break;
4258                }
4259                break;
4260        case PCI_VENDOR_ID_CISCO:
4261                switch (ioc->pdev->device) {
4262                case MPI25_MFGPAGE_DEVID_SAS3008:
4263                        switch (ioc->pdev->subsystem_device) {
4264                        case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4265                                ioc_info(ioc, "%s\n",
4266                                         MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4267                                break;
4268                        case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4269                                ioc_info(ioc, "%s\n",
4270                                         MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4271                                break;
4272                        case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4273                                ioc_info(ioc, "%s\n",
4274                                         MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4275                                break;
4276                        default:
4277                                ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4278                                         ioc->pdev->subsystem_device);
4279                                break;
4280                        }
4281                        break;
4282                case MPI25_MFGPAGE_DEVID_SAS3108_1:
4283                        switch (ioc->pdev->subsystem_device) {
4284                        case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4285                                ioc_info(ioc, "%s\n",
4286                                         MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4287                                break;
4288                        case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4289                                ioc_info(ioc, "%s\n",
4290                                         MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4291                                break;
4292                        default:
4293                                ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4294                                         ioc->pdev->subsystem_device);
4295                                break;
4296                        }
4297                        break;
4298                default:
4299                        ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4300                                 ioc->pdev->subsystem_device);
4301                        break;
4302                }
4303                break;
4304        case MPT2SAS_HP_3PAR_SSVID:
4305                switch (ioc->pdev->device) {
4306                case MPI2_MFGPAGE_DEVID_SAS2004:
4307                        switch (ioc->pdev->subsystem_device) {
4308                        case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4309                                ioc_info(ioc, "%s\n",
4310                                         MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4311                                break;
4312                        default:
4313                                ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4314                                         ioc->pdev->subsystem_device);
4315                                break;
4316                        }
4317                        break;
4318                case MPI2_MFGPAGE_DEVID_SAS2308_2:
4319                        switch (ioc->pdev->subsystem_device) {
4320                        case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4321                                ioc_info(ioc, "%s\n",
4322                                         MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4323                                break;
4324                        case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4325                                ioc_info(ioc, "%s\n",
4326                                         MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4327                                break;
4328                        case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4329                                ioc_info(ioc, "%s\n",
4330                                         MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4331                                break;
4332                        case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4333                                ioc_info(ioc, "%s\n",
4334                                         MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4335                                break;
4336                        default:
4337                                ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4338                                         ioc->pdev->subsystem_device);
4339                                break;
4340                        }
4341                        break;
4342                default:
4343                        ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4344                                 ioc->pdev->subsystem_device);
4345                        break;
4346                }
4347        default:
4348                break;
4349        }
4350}
4351
4352/**
4353 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4354 *                              version from FW Image Header.
4355 * @ioc: per adapter object
4356 *
4357 * Return: 0 for success, non-zero for failure.
4358 */
4359        static int
4360_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4361{
4362        Mpi2FWImageHeader_t *fw_img_hdr;
4363        Mpi26ComponentImageHeader_t *cmp_img_hdr;
4364        Mpi25FWUploadRequest_t *mpi_request;
4365        Mpi2FWUploadReply_t mpi_reply;
4366        int r = 0;
4367        u32  package_version = 0;
4368        void *fwpkg_data = NULL;
4369        dma_addr_t fwpkg_data_dma;
4370        u16 smid, ioc_status;
4371        size_t data_length;
4372
4373        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4374
4375        if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4376                ioc_err(ioc, "%s: internal command already in use\n", __func__);
4377                return -EAGAIN;
4378        }
4379
4380        data_length = sizeof(Mpi2FWImageHeader_t);
4381        fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4382                        &fwpkg_data_dma, GFP_KERNEL);
4383        if (!fwpkg_data) {
4384                ioc_err(ioc,
4385                    "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4386                        __FILE__, __LINE__, __func__);
4387                return -ENOMEM;
4388        }
4389
4390        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4391        if (!smid) {
4392                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4393                r = -EAGAIN;
4394                goto out;
4395        }
4396
4397        ioc->base_cmds.status = MPT3_CMD_PENDING;
4398        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4399        ioc->base_cmds.smid = smid;
4400        memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4401        mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4402        mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4403        mpi_request->ImageSize = cpu_to_le32(data_length);
4404        ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4405                        data_length);
4406        init_completion(&ioc->base_cmds.done);
4407        ioc->put_smid_default(ioc, smid);
4408        /* Wait for 15 seconds */
4409        wait_for_completion_timeout(&ioc->base_cmds.done,
4410                        FW_IMG_HDR_READ_TIMEOUT*HZ);
4411        ioc_info(ioc, "%s: complete\n", __func__);
4412        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4413                ioc_err(ioc, "%s: timeout\n", __func__);
4414                _debug_dump_mf(mpi_request,
4415                                sizeof(Mpi25FWUploadRequest_t)/4);
4416                r = -ETIME;
4417        } else {
4418                memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4419                if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4420                        memcpy(&mpi_reply, ioc->base_cmds.reply,
4421                                        sizeof(Mpi2FWUploadReply_t));
4422                        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4423                                                MPI2_IOCSTATUS_MASK;
4424                        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4425                                fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4426                                if (le32_to_cpu(fw_img_hdr->Signature) ==
4427                                    MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4428                                        cmp_img_hdr =
4429                                            (Mpi26ComponentImageHeader_t *)
4430                                            (fwpkg_data);
4431                                        package_version =
4432                                            le32_to_cpu(
4433                                            cmp_img_hdr->ApplicationSpecific);
4434                                } else
4435                                        package_version =
4436                                            le32_to_cpu(
4437                                            fw_img_hdr->PackageVersion.Word);
4438                                if (package_version)
4439                                        ioc_info(ioc,
4440                                        "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4441                                        ((package_version) & 0xFF000000) >> 24,
4442                                        ((package_version) & 0x00FF0000) >> 16,
4443                                        ((package_version) & 0x0000FF00) >> 8,
4444                                        (package_version) & 0x000000FF);
4445                        } else {
4446                                _debug_dump_mf(&mpi_reply,
4447                                                sizeof(Mpi2FWUploadReply_t)/4);
4448                        }
4449                }
4450        }
4451        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4452out:
4453        if (fwpkg_data)
4454                dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4455                                fwpkg_data_dma);
4456        return r;
4457}
4458
4459/**
4460 * _base_display_ioc_capabilities - Disply IOC's capabilities.
4461 * @ioc: per adapter object
4462 */
4463static void
4464_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4465{
4466        int i = 0;
4467        char desc[16];
4468        u32 iounit_pg1_flags;
4469        u32 bios_version;
4470
4471        bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4472        strncpy(desc, ioc->manu_pg0.ChipName, 16);
4473        ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4474                 desc,
4475                 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4476                 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4477                 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4478                 ioc->facts.FWVersion.Word & 0x000000FF,
4479                 ioc->pdev->revision,
4480                 (bios_version & 0xFF000000) >> 24,
4481                 (bios_version & 0x00FF0000) >> 16,
4482                 (bios_version & 0x0000FF00) >> 8,
4483                 bios_version & 0x000000FF);
4484
4485        _base_display_OEMs_branding(ioc);
4486
4487        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4488                pr_info("%sNVMe", i ? "," : "");
4489                i++;
4490        }
4491
4492        ioc_info(ioc, "Protocol=(");
4493
4494        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4495                pr_cont("Initiator");
4496                i++;
4497        }
4498
4499        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4500                pr_cont("%sTarget", i ? "," : "");
4501                i++;
4502        }
4503
4504        i = 0;
4505        pr_cont("), Capabilities=(");
4506
4507        if (!ioc->hide_ir_msg) {
4508                if (ioc->facts.IOCCapabilities &
4509                    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4510                        pr_cont("Raid");
4511                        i++;
4512                }
4513        }
4514
4515        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4516                pr_cont("%sTLR", i ? "," : "");
4517                i++;
4518        }
4519
4520        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4521                pr_cont("%sMulticast", i ? "," : "");
4522                i++;
4523        }
4524
4525        if (ioc->facts.IOCCapabilities &
4526            MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4527                pr_cont("%sBIDI Target", i ? "," : "");
4528                i++;
4529        }
4530
4531        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4532                pr_cont("%sEEDP", i ? "," : "");
4533                i++;
4534        }
4535
4536        if (ioc->facts.IOCCapabilities &
4537            MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4538                pr_cont("%sSnapshot Buffer", i ? "," : "");
4539                i++;
4540        }
4541
4542        if (ioc->facts.IOCCapabilities &
4543            MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4544                pr_cont("%sDiag Trace Buffer", i ? "," : "");
4545                i++;
4546        }
4547
4548        if (ioc->facts.IOCCapabilities &
4549            MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4550                pr_cont("%sDiag Extended Buffer", i ? "," : "");
4551                i++;
4552        }
4553
4554        if (ioc->facts.IOCCapabilities &
4555            MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4556                pr_cont("%sTask Set Full", i ? "," : "");
4557                i++;
4558        }
4559
4560        iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4561        if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4562                pr_cont("%sNCQ", i ? "," : "");
4563                i++;
4564        }
4565
4566        pr_cont(")\n");
4567}
4568
4569/**
4570 * mpt3sas_base_update_missing_delay - change the missing delay timers
4571 * @ioc: per adapter object
4572 * @device_missing_delay: amount of time till device is reported missing
4573 * @io_missing_delay: interval IO is returned when there is a missing device
4574 *
4575 * Passed on the command line, this function will modify the device missing
4576 * delay, as well as the io missing delay. This should be called at driver
4577 * load time.
4578 */
4579void
4580mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4581        u16 device_missing_delay, u8 io_missing_delay)
4582{
4583        u16 dmd, dmd_new, dmd_orignal;
4584        u8 io_missing_delay_original;
4585        u16 sz;
4586        Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4587        Mpi2ConfigReply_t mpi_reply;
4588        u8 num_phys = 0;
4589        u16 ioc_status;
4590
4591        mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4592        if (!num_phys)
4593                return;
4594
4595        sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4596            sizeof(Mpi2SasIOUnit1PhyData_t));
4597        sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4598        if (!sas_iounit_pg1) {
4599                ioc_err(ioc, "failure at %s:%d/%s()!\n",
4600                        __FILE__, __LINE__, __func__);
4601                goto out;
4602        }
4603        if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4604            sas_iounit_pg1, sz))) {
4605                ioc_err(ioc, "failure at %s:%d/%s()!\n",
4606                        __FILE__, __LINE__, __func__);
4607                goto out;
4608        }
4609        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4610            MPI2_IOCSTATUS_MASK;
4611        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4612                ioc_err(ioc, "failure at %s:%d/%s()!\n",
4613                        __FILE__, __LINE__, __func__);
4614                goto out;
4615        }
4616
4617        /* device missing delay */
4618        dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4619        if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4620                dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4621        else
4622                dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4623        dmd_orignal = dmd;
4624        if (device_missing_delay > 0x7F) {
4625                dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4626                    device_missing_delay;
4627                dmd = dmd / 16;
4628                dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4629        } else
4630                dmd = device_missing_delay;
4631        sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4632
4633        /* io missing delay */
4634        io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4635        sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4636
4637        if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4638            sz)) {
4639                if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4640                        dmd_new = (dmd &
4641                            MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4642                else
4643                        dmd_new =
4644                    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4645                ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4646                         dmd_orignal, dmd_new);
4647                ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4648                         io_missing_delay_original,
4649                         io_missing_delay);
4650                ioc->device_missing_delay = dmd_new;
4651                ioc->io_missing_delay = io_missing_delay;
4652        }
4653
4654out:
4655        kfree(sas_iounit_pg1);
4656}
4657
4658/**
4659 * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4660 *    according to performance mode.
4661 * @ioc : per adapter object
4662 *
4663 * Return nothing.
4664 */
4665static void
4666_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4667{
4668        Mpi2IOCPage1_t ioc_pg1;
4669        Mpi2ConfigReply_t mpi_reply;
4670
4671        mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4672        memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4673
4674        switch (perf_mode) {
4675        case MPT_PERF_MODE_DEFAULT:
4676        case MPT_PERF_MODE_BALANCED:
4677                if (ioc->high_iops_queues) {
4678                        ioc_info(ioc,
4679                                "Enable interrupt coalescing only for first\t"
4680                                "%d reply queues\n",
4681                                MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4682                        /*
4683                         * If 31st bit is zero then interrupt coalescing is
4684                         * enabled for all reply descriptor post queues.
4685                         * If 31st bit is set to one then user can
4686                         * enable/disable interrupt coalescing on per reply
4687                         * descriptor post queue group(8) basis. So to enable
4688                         * interrupt coalescing only on first reply descriptor
4689                         * post queue group 31st bit and zero th bit is enabled.
4690                         */
4691                        ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4692                            ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4693                        mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4694                        ioc_info(ioc, "performance mode: balanced\n");
4695                        return;
4696                }
4697                fallthrough;
4698        case MPT_PERF_MODE_LATENCY:
4699                /*
4700                 * Enable interrupt coalescing on all reply queues
4701                 * with timeout value 0xA
4702                 */
4703                ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4704                ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4705                ioc_pg1.ProductSpecific = 0;
4706                mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4707                ioc_info(ioc, "performance mode: latency\n");
4708                break;
4709        case MPT_PERF_MODE_IOPS:
4710                /*
4711                 * Enable interrupt coalescing on all reply queues.
4712                 */
4713                ioc_info(ioc,
4714                    "performance mode: iops with coalescing timeout: 0x%x\n",
4715                    le32_to_cpu(ioc_pg1.CoalescingTimeout));
4716                ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4717                ioc_pg1.ProductSpecific = 0;
4718                mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4719                break;
4720        }
4721}
4722
4723/**
4724 * _base_static_config_pages - static start of day config pages
4725 * @ioc: per adapter object
4726 */
4727static void
4728_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4729{
4730        Mpi2ConfigReply_t mpi_reply;
4731        u32 iounit_pg1_flags;
4732
4733        ioc->nvme_abort_timeout = 30;
4734        mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4735        if (ioc->ir_firmware)
4736                mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4737                    &ioc->manu_pg10);
4738
4739        /*
4740         * Ensure correct T10 PI operation if vendor left EEDPTagMode
4741         * flag unset in NVDATA.
4742         */
4743        mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4744        if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
4745                pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4746                    ioc->name);
4747                ioc->manu_pg11.EEDPTagMode &= ~0x3;
4748                ioc->manu_pg11.EEDPTagMode |= 0x1;
4749                mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4750                    &ioc->manu_pg11);
4751        }
4752        if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4753                ioc->tm_custom_handling = 1;
4754        else {
4755                ioc->tm_custom_handling = 0;
4756                if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4757                        ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4758                else if (ioc->manu_pg11.NVMeAbortTO >
4759                                        NVME_TASK_ABORT_MAX_TIMEOUT)
4760                        ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4761                else
4762                        ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4763        }
4764
4765        mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4766        mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4767        mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4768        mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4769        mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4770        mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4771        _base_display_ioc_capabilities(ioc);
4772
4773        /*
4774         * Enable task_set_full handling in iounit_pg1 when the
4775         * facts capabilities indicate that its supported.
4776         */
4777        iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4778        if ((ioc->facts.IOCCapabilities &
4779            MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4780                iounit_pg1_flags &=
4781                    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4782        else
4783                iounit_pg1_flags |=
4784                    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4785        ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4786        mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4787
4788        if (ioc->iounit_pg8.NumSensors)
4789                ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4790        if (ioc->is_aero_ioc)
4791                _base_update_ioc_page1_inlinewith_perf_mode(ioc);
4792}
4793
4794/**
4795 * mpt3sas_free_enclosure_list - release memory
4796 * @ioc: per adapter object
4797 *
4798 * Free memory allocated during encloure add.
4799 */
4800void
4801mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4802{
4803        struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4804
4805        /* Free enclosure list */
4806        list_for_each_entry_safe(enclosure_dev,
4807                        enclosure_dev_next, &ioc->enclosure_list, list) {
4808                list_del(&enclosure_dev->list);
4809                kfree(enclosure_dev);
4810        }
4811}
4812
4813/**
4814 * _base_release_memory_pools - release memory
4815 * @ioc: per adapter object
4816 *
4817 * Free memory allocated from _base_allocate_memory_pools.
4818 */
4819static void
4820_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4821{
4822        int i = 0;
4823        int j = 0;
4824        int dma_alloc_count = 0;
4825        struct chain_tracker *ct;
4826        int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4827
4828        dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4829
4830        if (ioc->request) {
4831                dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
4832                    ioc->request,  ioc->request_dma);
4833                dexitprintk(ioc,
4834                            ioc_info(ioc, "request_pool(0x%p): free\n",
4835                                     ioc->request));
4836                ioc->request = NULL;
4837        }
4838
4839        if (ioc->sense) {
4840                dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4841                dma_pool_destroy(ioc->sense_dma_pool);
4842                dexitprintk(ioc,
4843                            ioc_info(ioc, "sense_pool(0x%p): free\n",
4844                                     ioc->sense));
4845                ioc->sense = NULL;
4846        }
4847
4848        if (ioc->reply) {
4849                dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4850                dma_pool_destroy(ioc->reply_dma_pool);
4851                dexitprintk(ioc,
4852                            ioc_info(ioc, "reply_pool(0x%p): free\n",
4853                                     ioc->reply));
4854                ioc->reply = NULL;
4855        }
4856
4857        if (ioc->reply_free) {
4858                dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4859                    ioc->reply_free_dma);
4860                dma_pool_destroy(ioc->reply_free_dma_pool);
4861                dexitprintk(ioc,
4862                            ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4863                                     ioc->reply_free));
4864                ioc->reply_free = NULL;
4865        }
4866
4867        if (ioc->reply_post) {
4868                dma_alloc_count = DIV_ROUND_UP(count,
4869                                RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4870                for (i = 0; i < count; i++) {
4871                        if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
4872                            && dma_alloc_count) {
4873                                if (ioc->reply_post[i].reply_post_free) {
4874                                        dma_pool_free(
4875                                            ioc->reply_post_free_dma_pool,
4876                                            ioc->reply_post[i].reply_post_free,
4877                                        ioc->reply_post[i].reply_post_free_dma);
4878                                        dexitprintk(ioc, ioc_info(ioc,
4879                                           "reply_post_free_pool(0x%p): free\n",
4880                                           ioc->reply_post[i].reply_post_free));
4881                                        ioc->reply_post[i].reply_post_free =
4882                                                                        NULL;
4883                                }
4884                                --dma_alloc_count;
4885                        }
4886                }
4887                dma_pool_destroy(ioc->reply_post_free_dma_pool);
4888                if (ioc->reply_post_free_array &&
4889                        ioc->rdpq_array_enable) {
4890                        dma_pool_free(ioc->reply_post_free_array_dma_pool,
4891                            ioc->reply_post_free_array,
4892                            ioc->reply_post_free_array_dma);
4893                        ioc->reply_post_free_array = NULL;
4894                }
4895                dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4896                kfree(ioc->reply_post);
4897        }
4898
4899        if (ioc->pcie_sgl_dma_pool) {
4900                for (i = 0; i < ioc->scsiio_depth; i++) {
4901                        dma_pool_free(ioc->pcie_sgl_dma_pool,
4902                                        ioc->pcie_sg_lookup[i].pcie_sgl,
4903                                        ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4904                }
4905                dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4906        }
4907
4908        if (ioc->config_page) {
4909                dexitprintk(ioc,
4910                            ioc_info(ioc, "config_page(0x%p): free\n",
4911                                     ioc->config_page));
4912                dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
4913                    ioc->config_page, ioc->config_page_dma);
4914        }
4915
4916        kfree(ioc->hpr_lookup);
4917        ioc->hpr_lookup = NULL;
4918        kfree(ioc->internal_lookup);
4919        ioc->internal_lookup = NULL;
4920        if (ioc->chain_lookup) {
4921                for (i = 0; i < ioc->scsiio_depth; i++) {
4922                        for (j = ioc->chains_per_prp_buffer;
4923                            j < ioc->chains_needed_per_io; j++) {
4924                                ct = &ioc->chain_lookup[i].chains_per_smid[j];
4925                                if (ct && ct->chain_buffer)
4926                                        dma_pool_free(ioc->chain_dma_pool,
4927                                                ct->chain_buffer,
4928                                                ct->chain_buffer_dma);
4929                        }
4930                        kfree(ioc->chain_lookup[i].chains_per_smid);
4931                }
4932                dma_pool_destroy(ioc->chain_dma_pool);
4933                kfree(ioc->chain_lookup);
4934                ioc->chain_lookup = NULL;
4935        }
4936}
4937
4938/**
4939 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
4940 *      having same upper 32bits in their base memory address.
4941 * @reply_pool_start_address: Base address of a reply queue set
4942 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4943 *
4944 * Return: 1 if reply queues in a set have a same upper 32bits in their base
4945 * memory address, else 0.
4946 */
4947
4948static int
4949mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
4950{
4951        long reply_pool_end_address;
4952
4953        reply_pool_end_address = reply_pool_start_address + pool_sz;
4954
4955        if (upper_32_bits(reply_pool_start_address) ==
4956                upper_32_bits(reply_pool_end_address))
4957                return 1;
4958        else
4959                return 0;
4960}
4961
4962/**
4963 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
4964 *                     for reply queues.
4965 * @ioc: per adapter object
4966 * @sz: DMA Pool size
4967 * Return: 0 for success, non-zero for failure.
4968 */
4969static int
4970base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
4971{
4972        int i = 0;
4973        u32 dma_alloc_count = 0;
4974        int reply_post_free_sz = ioc->reply_post_queue_depth *
4975                sizeof(Mpi2DefaultReplyDescriptor_t);
4976        int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4977
4978        ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
4979                        GFP_KERNEL);
4980        if (!ioc->reply_post)
4981                return -ENOMEM;
4982        /*
4983         *  For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
4984         *  VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
4985         *  be within 4GB boundary i.e reply queues in a set must have same
4986         *  upper 32-bits in their memory address. so here driver is allocating
4987         *  the DMA'able memory for reply queues according.
4988         *  Driver uses limitation of
4989         *  VENTURA_SERIES to manage INVADER_SERIES as well.
4990         */
4991        dma_alloc_count = DIV_ROUND_UP(count,
4992                                RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4993        ioc->reply_post_free_dma_pool =
4994                dma_pool_create("reply_post_free pool",
4995                    &ioc->pdev->dev, sz, 16, 0);
4996        if (!ioc->reply_post_free_dma_pool)
4997                return -ENOMEM;
4998        for (i = 0; i < count; i++) {
4999                if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
5000                        ioc->reply_post[i].reply_post_free =
5001                            dma_pool_zalloc(ioc->reply_post_free_dma_pool,
5002                                GFP_KERNEL,
5003                                &ioc->reply_post[i].reply_post_free_dma);
5004                        if (!ioc->reply_post[i].reply_post_free)
5005                                return -ENOMEM;
5006                        /*
5007                         * Each set of RDPQ pool must satisfy 4gb boundary
5008                         * restriction.
5009                         * 1) Check if allocated resources for RDPQ pool are in
5010                         *      the same 4GB range.
5011                         * 2) If #1 is true, continue with 64 bit DMA.
5012                         * 3) If #1 is false, return 1. which means free all the
5013                         * resources and set DMA mask to 32 and allocate.
5014                         */
5015                        if (!mpt3sas_check_same_4gb_region(
5016                                (long)ioc->reply_post[i].reply_post_free, sz)) {
5017                                dinitprintk(ioc,
5018                                    ioc_err(ioc, "bad Replypost free pool(0x%p)"
5019                                    "reply_post_free_dma = (0x%llx)\n",
5020                                    ioc->reply_post[i].reply_post_free,
5021                                    (unsigned long long)
5022                                    ioc->reply_post[i].reply_post_free_dma));
5023                                return -EAGAIN;
5024                        }
5025                        dma_alloc_count--;
5026
5027                } else {
5028                        ioc->reply_post[i].reply_post_free =
5029                            (Mpi2ReplyDescriptorsUnion_t *)
5030                            ((long)ioc->reply_post[i-1].reply_post_free
5031                            + reply_post_free_sz);
5032                        ioc->reply_post[i].reply_post_free_dma =
5033                            (dma_addr_t)
5034                            (ioc->reply_post[i-1].reply_post_free_dma +
5035                            reply_post_free_sz);
5036                }
5037        }
5038        return 0;
5039}
5040
5041/**
5042 * _base_allocate_memory_pools - allocate start of day memory pools
5043 * @ioc: per adapter object
5044 *
5045 * Return: 0 success, anything else error.
5046 */
5047static int
5048_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5049{
5050        struct mpt3sas_facts *facts;
5051        u16 max_sge_elements;
5052        u16 chains_needed_per_io;
5053        u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
5054        u32 retry_sz;
5055        u32 rdpq_sz = 0;
5056        u16 max_request_credit, nvme_blocks_needed;
5057        unsigned short sg_tablesize;
5058        u16 sge_size;
5059        int i, j;
5060        int ret = 0;
5061        struct chain_tracker *ct;
5062
5063        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5064
5065
5066        retry_sz = 0;
5067        facts = &ioc->facts;
5068
5069        /* command line tunables for max sgl entries */
5070        if (max_sgl_entries != -1)
5071                sg_tablesize = max_sgl_entries;
5072        else {
5073                if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
5074                        sg_tablesize = MPT2SAS_SG_DEPTH;
5075                else
5076                        sg_tablesize = MPT3SAS_SG_DEPTH;
5077        }
5078
5079        /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
5080        if (reset_devices)
5081                sg_tablesize = min_t(unsigned short, sg_tablesize,
5082                   MPT_KDUMP_MIN_PHYS_SEGMENTS);
5083
5084        if (ioc->is_mcpu_endpoint)
5085                ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5086        else {
5087                if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
5088                        sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5089                else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
5090                        sg_tablesize = min_t(unsigned short, sg_tablesize,
5091                                        SG_MAX_SEGMENTS);
5092                        ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
5093                                 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
5094                }
5095                ioc->shost->sg_tablesize = sg_tablesize;
5096        }
5097
5098        ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
5099                (facts->RequestCredit / 4));
5100        if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
5101                if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
5102                                INTERNAL_SCSIIO_CMDS_COUNT)) {
5103                        ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
5104                                facts->RequestCredit);
5105                        return -ENOMEM;
5106                }
5107                ioc->internal_depth = 10;
5108        }
5109
5110        ioc->hi_priority_depth = ioc->internal_depth - (5);
5111        /* command line tunables  for max controller queue depth */
5112        if (max_queue_depth != -1 && max_queue_depth != 0) {
5113                max_request_credit = min_t(u16, max_queue_depth +
5114                        ioc->internal_depth, facts->RequestCredit);
5115                if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
5116                        max_request_credit =  MAX_HBA_QUEUE_DEPTH;
5117        } else if (reset_devices)
5118                max_request_credit = min_t(u16, facts->RequestCredit,
5119                    (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
5120        else
5121                max_request_credit = min_t(u16, facts->RequestCredit,
5122                    MAX_HBA_QUEUE_DEPTH);
5123
5124        /* Firmware maintains additional facts->HighPriorityCredit number of
5125         * credits for HiPriprity Request messages, so hba queue depth will be
5126         * sum of max_request_credit and high priority queue depth.
5127         */
5128        ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
5129
5130        /* request frame size */
5131        ioc->request_sz = facts->IOCRequestFrameSize * 4;
5132
5133        /* reply frame size */
5134        ioc->reply_sz = facts->ReplyFrameSize * 4;
5135
5136        /* chain segment size */
5137        if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5138                if (facts->IOCMaxChainSegmentSize)
5139                        ioc->chain_segment_sz =
5140                                        facts->IOCMaxChainSegmentSize *
5141                                        MAX_CHAIN_ELEMT_SZ;
5142                else
5143                /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
5144                        ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
5145                                                    MAX_CHAIN_ELEMT_SZ;
5146        } else
5147                ioc->chain_segment_sz = ioc->request_sz;
5148
5149        /* calculate the max scatter element size */
5150        sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
5151
5152 retry_allocation:
5153        total_sz = 0;
5154        /* calculate number of sg elements left over in the 1st frame */
5155        max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
5156            sizeof(Mpi2SGEIOUnion_t)) + sge_size);
5157        ioc->max_sges_in_main_message = max_sge_elements/sge_size;
5158
5159        /* now do the same for a chain buffer */
5160        max_sge_elements = ioc->chain_segment_sz - sge_size;
5161        ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
5162
5163        /*
5164         *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
5165         */
5166        chains_needed_per_io = ((ioc->shost->sg_tablesize -
5167           ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
5168            + 1;
5169        if (chains_needed_per_io > facts->MaxChainDepth) {
5170                chains_needed_per_io = facts->MaxChainDepth;
5171                ioc->shost->sg_tablesize = min_t(u16,
5172                ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
5173                * chains_needed_per_io), ioc->shost->sg_tablesize);
5174        }
5175        ioc->chains_needed_per_io = chains_needed_per_io;
5176
5177        /* reply free queue sizing - taking into account for 64 FW events */
5178        ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5179
5180        /* mCPU manage single counters for simplicity */
5181        if (ioc->is_mcpu_endpoint)
5182                ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
5183        else {
5184                /* calculate reply descriptor post queue depth */
5185                ioc->reply_post_queue_depth = ioc->hba_queue_depth +
5186                        ioc->reply_free_queue_depth +  1;
5187                /* align the reply post queue on the next 16 count boundary */
5188                if (ioc->reply_post_queue_depth % 16)
5189                        ioc->reply_post_queue_depth += 16 -
5190                                (ioc->reply_post_queue_depth % 16);
5191        }
5192
5193        if (ioc->reply_post_queue_depth >
5194            facts->MaxReplyDescriptorPostQueueDepth) {
5195                ioc->reply_post_queue_depth =
5196                                facts->MaxReplyDescriptorPostQueueDepth -
5197                    (facts->MaxReplyDescriptorPostQueueDepth % 16);
5198                ioc->hba_queue_depth =
5199                                ((ioc->reply_post_queue_depth - 64) / 2) - 1;
5200                ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5201        }
5202
5203        ioc_info(ioc,
5204            "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
5205            "sge_per_io(%d), chains_per_io(%d)\n",
5206            ioc->max_sges_in_main_message,
5207            ioc->max_sges_in_chain_message,
5208            ioc->shost->sg_tablesize,
5209            ioc->chains_needed_per_io);
5210
5211        /* reply post queue, 16 byte align */
5212        reply_post_free_sz = ioc->reply_post_queue_depth *
5213            sizeof(Mpi2DefaultReplyDescriptor_t);
5214        rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
5215        if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
5216                rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
5217        ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
5218        if (ret == -EAGAIN) {
5219                /*
5220                 * Free allocated bad RDPQ memory pools.
5221                 * Change dma coherent mask to 32 bit and reallocate RDPQ
5222                 */
5223                _base_release_memory_pools(ioc);
5224                ioc->use_32bit_dma = true;
5225                if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
5226                        ioc_err(ioc,
5227                            "32 DMA mask failed %s\n", pci_name(ioc->pdev));
5228                        return -ENODEV;
5229                }
5230                if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
5231                        return -ENOMEM;
5232        } else if (ret == -ENOMEM)
5233                return -ENOMEM;
5234        total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
5235            DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
5236        ioc->scsiio_depth = ioc->hba_queue_depth -
5237            ioc->hi_priority_depth - ioc->internal_depth;
5238
5239        /* set the scsi host can_queue depth
5240         * with some internal commands that could be outstanding
5241         */
5242        ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
5243        dinitprintk(ioc,
5244                    ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
5245                             ioc->shost->can_queue));
5246
5247        /* contiguous pool for request and chains, 16 byte align, one extra "
5248         * "frame for smid=0
5249         */
5250        ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
5251        sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
5252
5253        /* hi-priority queue */
5254        sz += (ioc->hi_priority_depth * ioc->request_sz);
5255
5256        /* internal queue */
5257        sz += (ioc->internal_depth * ioc->request_sz);
5258
5259        ioc->request_dma_sz = sz;
5260        ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
5261                        &ioc->request_dma, GFP_KERNEL);
5262        if (!ioc->request) {
5263                ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
5264                        ioc->hba_queue_depth, ioc->chains_needed_per_io,
5265                        ioc->request_sz, sz / 1024);
5266                if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
5267                        goto out;
5268                retry_sz = 64;
5269                ioc->hba_queue_depth -= retry_sz;
5270                _base_release_memory_pools(ioc);
5271                goto retry_allocation;
5272        }
5273
5274        if (retry_sz)
5275                ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
5276                        ioc->hba_queue_depth, ioc->chains_needed_per_io,
5277                        ioc->request_sz, sz / 1024);
5278
5279        /* hi-priority queue */
5280        ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
5281            ioc->request_sz);
5282        ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
5283            ioc->request_sz);
5284
5285        /* internal queue */
5286        ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
5287            ioc->request_sz);
5288        ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
5289            ioc->request_sz);
5290
5291        ioc_info(ioc,
5292            "request pool(0x%p) - dma(0x%llx): "
5293            "depth(%d), frame_size(%d), pool_size(%d kB)\n",
5294            ioc->request, (unsigned long long) ioc->request_dma,
5295            ioc->hba_queue_depth, ioc->request_sz,
5296            (ioc->hba_queue_depth * ioc->request_sz) / 1024);
5297
5298        total_sz += sz;
5299
5300        dinitprintk(ioc,
5301                    ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
5302                             ioc->request, ioc->scsiio_depth));
5303
5304        ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
5305        sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
5306        ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
5307        if (!ioc->chain_lookup) {
5308                ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
5309                goto out;
5310        }
5311
5312        sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
5313        for (i = 0; i < ioc->scsiio_depth; i++) {
5314                ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
5315                if (!ioc->chain_lookup[i].chains_per_smid) {
5316                        ioc_err(ioc, "chain_lookup: kzalloc failed\n");
5317                        goto out;
5318                }
5319        }
5320
5321        /* initialize hi-priority queue smid's */
5322        ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
5323            sizeof(struct request_tracker), GFP_KERNEL);
5324        if (!ioc->hpr_lookup) {
5325                ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
5326                goto out;
5327        }
5328        ioc->hi_priority_smid = ioc->scsiio_depth + 1;
5329        dinitprintk(ioc,
5330                    ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
5331                             ioc->hi_priority,
5332                             ioc->hi_priority_depth, ioc->hi_priority_smid));
5333
5334        /* initialize internal queue smid's */
5335        ioc->internal_lookup = kcalloc(ioc->internal_depth,
5336            sizeof(struct request_tracker), GFP_KERNEL);
5337        if (!ioc->internal_lookup) {
5338                ioc_err(ioc, "internal_lookup: kcalloc failed\n");
5339                goto out;
5340        }
5341        ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
5342        dinitprintk(ioc,
5343                    ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
5344                             ioc->internal,
5345                             ioc->internal_depth, ioc->internal_smid));
5346        /*
5347         * The number of NVMe page sized blocks needed is:
5348         *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
5349         * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
5350         * that is placed in the main message frame.  8 is the size of each PRP
5351         * entry or PRP list pointer entry.  8 is subtracted from page_size
5352         * because of the PRP list pointer entry at the end of a page, so this
5353         * is not counted as a PRP entry.  The 1 added page is a round up.
5354         *
5355         * To avoid allocation failures due to the amount of memory that could
5356         * be required for NVMe PRP's, only each set of NVMe blocks will be
5357         * contiguous, so a new set is allocated for each possible I/O.
5358         */
5359        ioc->chains_per_prp_buffer = 0;
5360        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
5361                nvme_blocks_needed =
5362                        (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
5363                nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
5364                nvme_blocks_needed++;
5365
5366                sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
5367                ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
5368                if (!ioc->pcie_sg_lookup) {
5369                        ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
5370                        goto out;
5371                }
5372                sz = nvme_blocks_needed * ioc->page_size;
5373                ioc->pcie_sgl_dma_pool =
5374                        dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
5375                if (!ioc->pcie_sgl_dma_pool) {
5376                        ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5377                        goto out;
5378                }
5379
5380                ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5381                ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
5382                                                ioc->chains_needed_per_io);
5383
5384                for (i = 0; i < ioc->scsiio_depth; i++) {
5385                        ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
5386                                ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5387                                &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5388                        if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5389                                ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5390                                goto out;
5391                        }
5392                        for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5393                                ct = &ioc->chain_lookup[i].chains_per_smid[j];
5394                                ct->chain_buffer =
5395                                    ioc->pcie_sg_lookup[i].pcie_sgl +
5396                                    (j * ioc->chain_segment_sz);
5397                                ct->chain_buffer_dma =
5398                                    ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5399                                    (j * ioc->chain_segment_sz);
5400                        }
5401                }
5402
5403                dinitprintk(ioc,
5404                            ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5405                                     ioc->scsiio_depth, sz,
5406                                     (sz * ioc->scsiio_depth) / 1024));
5407                dinitprintk(ioc,
5408                            ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
5409                                     ioc->chains_per_prp_buffer));
5410                total_sz += sz * ioc->scsiio_depth;
5411        }
5412
5413        ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5414            ioc->chain_segment_sz, 16, 0);
5415        if (!ioc->chain_dma_pool) {
5416                ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
5417                goto out;
5418        }
5419        for (i = 0; i < ioc->scsiio_depth; i++) {
5420                for (j = ioc->chains_per_prp_buffer;
5421                                j < ioc->chains_needed_per_io; j++) {
5422                        ct = &ioc->chain_lookup[i].chains_per_smid[j];
5423                        ct->chain_buffer = dma_pool_alloc(
5424                                        ioc->chain_dma_pool, GFP_KERNEL,
5425                                        &ct->chain_buffer_dma);
5426                        if (!ct->chain_buffer) {
5427                                ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
5428                                goto out;
5429                        }
5430                }
5431                total_sz += ioc->chain_segment_sz;
5432        }
5433
5434        dinitprintk(ioc,
5435                    ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
5436                             ioc->chain_depth, ioc->chain_segment_sz,
5437                             (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
5438
5439        /* sense buffers, 4 byte align */
5440        sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
5441        ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5442                                              4, 0);
5443        if (!ioc->sense_dma_pool) {
5444                ioc_err(ioc, "sense pool: dma_pool_create failed\n");
5445                goto out;
5446        }
5447        ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5448            &ioc->sense_dma);
5449        if (!ioc->sense) {
5450                ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
5451                goto out;
5452        }
5453        /* sense buffer requires to be in same 4 gb region.
5454         * Below function will check the same.
5455         * In case of failure, new pci pool will be created with updated
5456         * alignment. Older allocation and pool will be destroyed.
5457         * Alignment will be used such a way that next allocation if
5458         * success, will always meet same 4gb region requirement.
5459         * Actual requirement is not alignment, but we need start and end of
5460         * DMA address must have same upper 32 bit address.
5461         */
5462        if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
5463                //Release Sense pool & Reallocate
5464                dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5465                dma_pool_destroy(ioc->sense_dma_pool);
5466                ioc->sense = NULL;
5467
5468                ioc->sense_dma_pool =
5469                        dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5470                                                roundup_pow_of_two(sz), 0);
5471                if (!ioc->sense_dma_pool) {
5472                        ioc_err(ioc, "sense pool: pci_pool_create failed\n");
5473                        goto out;
5474                }
5475                ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5476                                &ioc->sense_dma);
5477                if (!ioc->sense) {
5478                        ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
5479                        goto out;
5480                }
5481        }
5482        ioc_info(ioc,
5483            "sense pool(0x%p)- dma(0x%llx): depth(%d),"
5484            "element_size(%d), pool_size(%d kB)\n",
5485            ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
5486            SCSI_SENSE_BUFFERSIZE, sz / 1024);
5487
5488        total_sz += sz;
5489
5490        /* reply pool, 4 byte align */
5491        sz = ioc->reply_free_queue_depth * ioc->reply_sz;
5492        ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
5493                                              4, 0);
5494        if (!ioc->reply_dma_pool) {
5495                ioc_err(ioc, "reply pool: dma_pool_create failed\n");
5496                goto out;
5497        }
5498        ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5499            &ioc->reply_dma);
5500        if (!ioc->reply) {
5501                ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
5502                goto out;
5503        }
5504        ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5505        ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5506        dinitprintk(ioc,
5507                    ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5508                             ioc->reply, ioc->reply_free_queue_depth,
5509                             ioc->reply_sz, sz / 1024));
5510        dinitprintk(ioc,
5511                    ioc_info(ioc, "reply_dma(0x%llx)\n",
5512                             (unsigned long long)ioc->reply_dma));
5513        total_sz += sz;
5514
5515        /* reply free queue, 16 byte align */
5516        sz = ioc->reply_free_queue_depth * 4;
5517        ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
5518            &ioc->pdev->dev, sz, 16, 0);
5519        if (!ioc->reply_free_dma_pool) {
5520                ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
5521                goto out;
5522        }
5523        ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
5524            &ioc->reply_free_dma);
5525        if (!ioc->reply_free) {
5526                ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
5527                goto out;
5528        }
5529        dinitprintk(ioc,
5530                    ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5531                             ioc->reply_free, ioc->reply_free_queue_depth,
5532                             4, sz / 1024));
5533        dinitprintk(ioc,
5534                    ioc_info(ioc, "reply_free_dma (0x%llx)\n",
5535                             (unsigned long long)ioc->reply_free_dma));
5536        total_sz += sz;
5537
5538        if (ioc->rdpq_array_enable) {
5539                reply_post_free_array_sz = ioc->reply_queue_count *
5540                    sizeof(Mpi2IOCInitRDPQArrayEntry);
5541                ioc->reply_post_free_array_dma_pool =
5542                    dma_pool_create("reply_post_free_array pool",
5543                    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5544                if (!ioc->reply_post_free_array_dma_pool) {
5545                        dinitprintk(ioc,
5546                                    ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
5547                        goto out;
5548                }
5549                ioc->reply_post_free_array =
5550                    dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5551                    GFP_KERNEL, &ioc->reply_post_free_array_dma);
5552                if (!ioc->reply_post_free_array) {
5553                        dinitprintk(ioc,
5554                                    ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
5555                        goto out;
5556                }
5557        }
5558        ioc->config_page_sz = 512;
5559        ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
5560                        ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
5561        if (!ioc->config_page) {
5562                ioc_err(ioc, "config page: dma_pool_alloc failed\n");
5563                goto out;
5564        }
5565
5566        ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
5567            ioc->config_page, (unsigned long long)ioc->config_page_dma,
5568            ioc->config_page_sz);
5569        total_sz += ioc->config_page_sz;
5570
5571        ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
5572                 total_sz / 1024);
5573        ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
5574                 ioc->shost->can_queue, facts->RequestCredit);
5575        ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
5576                 ioc->shost->sg_tablesize);
5577        return 0;
5578
5579 out:
5580        return -ENOMEM;
5581}
5582
5583/**
5584 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
5585 * @ioc: Pointer to MPT_ADAPTER structure
5586 * @cooked: Request raw or cooked IOC state
5587 *
5588 * Return: all IOC Doorbell register bits if cooked==0, else just the
5589 * Doorbell bits in MPI_IOC_STATE_MASK.
5590 */
5591u32
5592mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
5593{
5594        u32 s, sc;
5595
5596        s = ioc->base_readl(&ioc->chip->Doorbell);
5597        sc = s & MPI2_IOC_STATE_MASK;
5598        return cooked ? sc : s;
5599}
5600
5601/**
5602 * _base_wait_on_iocstate - waiting on a particular ioc state
5603 * @ioc: ?
5604 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
5605 * @timeout: timeout in second
5606 *
5607 * Return: 0 for success, non-zero for failure.
5608 */
5609static int
5610_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5611{
5612        u32 count, cntdn;
5613        u32 current_state;
5614
5615        count = 0;
5616        cntdn = 1000 * timeout;
5617        do {
5618                current_state = mpt3sas_base_get_iocstate(ioc, 1);
5619                if (current_state == ioc_state)
5620                        return 0;
5621                if (count && current_state == MPI2_IOC_STATE_FAULT)
5622                        break;
5623                if (count && current_state == MPI2_IOC_STATE_COREDUMP)
5624                        break;
5625
5626                usleep_range(1000, 1500);
5627                count++;
5628        } while (--cntdn);
5629
5630        return current_state;
5631}
5632
5633/**
5634 * _base_dump_reg_set - This function will print hexdump of register set.
5635 * @ioc: per adapter object
5636 *
5637 * Returns nothing.
5638 */
5639static inline void
5640_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
5641{
5642        unsigned int i, sz = 256;
5643        u32 __iomem *reg = (u32 __iomem *)ioc->chip;
5644
5645        ioc_info(ioc, "System Register set:\n");
5646        for (i = 0; i < (sz / sizeof(u32)); i++)
5647                pr_info("%08x: %08x\n", (i * 4), readl(&reg[i]));
5648}
5649
5650/**
5651 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
5652 * a write to the doorbell)
5653 * @ioc: per adapter object
5654 * @timeout: timeout in seconds
5655 *
5656 * Return: 0 for success, non-zero for failure.
5657 *
5658 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
5659 */
5660
5661static int
5662_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5663{
5664        u32 cntdn, count;
5665        u32 int_status;
5666
5667        count = 0;
5668        cntdn = 1000 * timeout;
5669        do {
5670                int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5671                if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5672                        dhsprintk(ioc,
5673                                  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5674                                           __func__, count, timeout));
5675                        return 0;
5676                }
5677
5678                usleep_range(1000, 1500);
5679                count++;
5680        } while (--cntdn);
5681
5682        ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5683                __func__, count, int_status);
5684        return -EFAULT;
5685}
5686
5687static int
5688_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5689{
5690        u32 cntdn, count;
5691        u32 int_status;
5692
5693        count = 0;
5694        cntdn = 2000 * timeout;
5695        do {
5696                int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5697                if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5698                        dhsprintk(ioc,
5699                                  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5700                                           __func__, count, timeout));
5701                        return 0;
5702                }
5703
5704                udelay(500);
5705                count++;
5706        } while (--cntdn);
5707
5708        ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5709                __func__, count, int_status);
5710        return -EFAULT;
5711
5712}
5713
5714/**
5715 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
5716 * @ioc: per adapter object
5717 * @timeout: timeout in second
5718 *
5719 * Return: 0 for success, non-zero for failure.
5720 *
5721 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
5722 * doorbell.
5723 */
5724static int
5725_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5726{
5727        u32 cntdn, count;
5728        u32 int_status;
5729        u32 doorbell;
5730
5731        count = 0;
5732        cntdn = 1000 * timeout;
5733        do {
5734                int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5735                if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5736                        dhsprintk(ioc,
5737                                  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5738                                           __func__, count, timeout));
5739                        return 0;
5740                } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5741                        doorbell = ioc->base_readl(&ioc->chip->Doorbell);
5742                        if ((doorbell & MPI2_IOC_STATE_MASK) ==
5743                            MPI2_IOC_STATE_FAULT) {
5744                                mpt3sas_print_fault_code(ioc, doorbell);
5745                                return -EFAULT;
5746                        }
5747                        if ((doorbell & MPI2_IOC_STATE_MASK) ==
5748                            MPI2_IOC_STATE_COREDUMP) {
5749                                mpt3sas_print_coredump_info(ioc, doorbell);
5750                                return -EFAULT;
5751                        }
5752                } else if (int_status == 0xFFFFFFFF)
5753                        goto out;
5754
5755                usleep_range(1000, 1500);
5756                count++;
5757        } while (--cntdn);
5758
5759 out:
5760        ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5761                __func__, count, int_status);
5762        return -EFAULT;
5763}
5764
5765/**
5766 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
5767 * @ioc: per adapter object
5768 * @timeout: timeout in second
5769 *
5770 * Return: 0 for success, non-zero for failure.
5771 */
5772static int
5773_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5774{
5775        u32 cntdn, count;
5776        u32 doorbell_reg;
5777
5778        count = 0;
5779        cntdn = 1000 * timeout;
5780        do {
5781                doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5782                if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5783                        dhsprintk(ioc,
5784                                  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5785                                           __func__, count, timeout));
5786                        return 0;
5787                }
5788
5789                usleep_range(1000, 1500);
5790                count++;
5791        } while (--cntdn);
5792
5793        ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5794                __func__, count, doorbell_reg);
5795        return -EFAULT;
5796}
5797
5798/**
5799 * _base_send_ioc_reset - send doorbell reset
5800 * @ioc: per adapter object
5801 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
5802 * @timeout: timeout in second
5803 *
5804 * Return: 0 for success, non-zero for failure.
5805 */
5806static int
5807_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5808{
5809        u32 ioc_state;
5810        int r = 0;
5811        unsigned long flags;
5812
5813        if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5814                ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5815                return -EFAULT;
5816        }
5817
5818        if (!(ioc->facts.IOCCapabilities &
5819           MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5820                return -EFAULT;
5821
5822        ioc_info(ioc, "sending message unit reset !!\n");
5823
5824        writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5825            &ioc->chip->Doorbell);
5826        if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5827                r = -EFAULT;
5828                goto out;
5829        }
5830
5831        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5832        if (ioc_state) {
5833                ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5834                        __func__, ioc_state);
5835                r = -EFAULT;
5836                goto out;
5837        }
5838 out:
5839        if (r != 0) {
5840                ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5841                spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5842                /*
5843                 * Wait for IOC state CoreDump to clear only during
5844                 * HBA initialization & release time.
5845                 */
5846                if ((ioc_state & MPI2_IOC_STATE_MASK) ==
5847                    MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
5848                    ioc->fault_reset_work_q == NULL)) {
5849                        spin_unlock_irqrestore(
5850                            &ioc->ioc_reset_in_progress_lock, flags);
5851                        mpt3sas_print_coredump_info(ioc, ioc_state);
5852                        mpt3sas_base_wait_for_coredump_completion(ioc,
5853                            __func__);
5854                        spin_lock_irqsave(
5855                            &ioc->ioc_reset_in_progress_lock, flags);
5856                }
5857                spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5858        }
5859        ioc_info(ioc, "message unit reset: %s\n",
5860                 r == 0 ? "SUCCESS" : "FAILED");
5861        return r;
5862}
5863
5864/**
5865 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
5866 * @ioc: per adapter object
5867 * @timeout: timeout in seconds
5868 *
5869 * Return: Waits up to timeout seconds for the IOC to
5870 * become operational. Returns 0 if IOC is present
5871 * and operational; otherwise returns -EFAULT.
5872 */
5873
5874int
5875mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5876{
5877        int wait_state_count = 0;
5878        u32 ioc_state;
5879
5880        do {
5881                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5882                if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5883                        break;
5884                ssleep(1);
5885                ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5886                                __func__, ++wait_state_count);
5887        } while (--timeout);
5888        if (!timeout) {
5889                ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5890                return -EFAULT;
5891        }
5892        if (wait_state_count)
5893                ioc_info(ioc, "ioc is operational\n");
5894        return 0;
5895}
5896
5897/**
5898 * _base_handshake_req_reply_wait - send request thru doorbell interface
5899 * @ioc: per adapter object
5900 * @request_bytes: request length
5901 * @request: pointer having request payload
5902 * @reply_bytes: reply length
5903 * @reply: pointer to reply payload
5904 * @timeout: timeout in second
5905 *
5906 * Return: 0 for success, non-zero for failure.
5907 */
5908static int
5909_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5910        u32 *request, int reply_bytes, u16 *reply, int timeout)
5911{
5912        MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5913        int i;
5914        u8 failed;
5915        __le32 *mfp;
5916
5917        /* make sure doorbell is not in use */
5918        if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5919                ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5920                return -EFAULT;
5921        }
5922
5923        /* clear pending doorbell interrupts from previous state changes */
5924        if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5925            MPI2_HIS_IOC2SYS_DB_STATUS)
5926                writel(0, &ioc->chip->HostInterruptStatus);
5927
5928        /* send message to ioc */
5929        writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5930            ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5931            &ioc->chip->Doorbell);
5932
5933        if ((_base_spin_on_doorbell_int(ioc, 5))) {
5934                ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5935                        __LINE__);
5936                return -EFAULT;
5937        }
5938        writel(0, &ioc->chip->HostInterruptStatus);
5939
5940        if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5941                ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5942                        __LINE__);
5943                return -EFAULT;
5944        }
5945
5946        /* send message 32-bits at a time */
5947        for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5948                writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5949                if ((_base_wait_for_doorbell_ack(ioc, 5)))
5950                        failed = 1;
5951        }
5952
5953        if (failed) {
5954                ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5955                        __LINE__);
5956                return -EFAULT;
5957        }
5958
5959        /* now wait for the reply */
5960        if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5961                ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5962                        __LINE__);
5963                return -EFAULT;
5964        }
5965
5966        /* read the first two 16-bits, it gives the total length of the reply */
5967        reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5968            & MPI2_DOORBELL_DATA_MASK);
5969        writel(0, &ioc->chip->HostInterruptStatus);
5970        if ((_base_wait_for_doorbell_int(ioc, 5))) {
5971                ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5972                        __LINE__);
5973                return -EFAULT;
5974        }
5975        reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5976            & MPI2_DOORBELL_DATA_MASK);
5977        writel(0, &ioc->chip->HostInterruptStatus);
5978
5979        for (i = 2; i < default_reply->MsgLength * 2; i++)  {
5980                if ((_base_wait_for_doorbell_int(ioc, 5))) {
5981                        ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5982                                __LINE__);
5983                        return -EFAULT;
5984                }
5985                if (i >=  reply_bytes/2) /* overflow case */
5986                        ioc->base_readl(&ioc->chip->Doorbell);
5987                else
5988                        reply[i] = le16_to_cpu(
5989                            ioc->base_readl(&ioc->chip->Doorbell)
5990                            & MPI2_DOORBELL_DATA_MASK);
5991                writel(0, &ioc->chip->HostInterruptStatus);
5992        }
5993
5994        _base_wait_for_doorbell_int(ioc, 5);
5995        if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5996                dhsprintk(ioc,
5997                          ioc_info(ioc, "doorbell is in use (line=%d)\n",
5998                                   __LINE__));
5999        }
6000        writel(0, &ioc->chip->HostInterruptStatus);
6001
6002        if (ioc->logging_level & MPT_DEBUG_INIT) {
6003                mfp = (__le32 *)reply;
6004                pr_info("\toffset:data\n");
6005                for (i = 0; i < reply_bytes/4; i++)
6006                        ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6007                            le32_to_cpu(mfp[i]));
6008        }
6009        return 0;
6010}
6011
6012/**
6013 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
6014 * @ioc: per adapter object
6015 * @mpi_reply: the reply payload from FW
6016 * @mpi_request: the request payload sent to FW
6017 *
6018 * The SAS IO Unit Control Request message allows the host to perform low-level
6019 * operations, such as resets on the PHYs of the IO Unit, also allows the host
6020 * to obtain the IOC assigned device handles for a device if it has other
6021 * identifying information about the device, in addition allows the host to
6022 * remove IOC resources associated with the device.
6023 *
6024 * Return: 0 for success, non-zero for failure.
6025 */
6026int
6027mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
6028        Mpi2SasIoUnitControlReply_t *mpi_reply,
6029        Mpi2SasIoUnitControlRequest_t *mpi_request)
6030{
6031        u16 smid;
6032        u8 issue_reset = 0;
6033        int rc;
6034        void *request;
6035
6036        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6037
6038        mutex_lock(&ioc->base_cmds.mutex);
6039
6040        if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6041                ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6042                rc = -EAGAIN;
6043                goto out;
6044        }
6045
6046        rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6047        if (rc)
6048                goto out;
6049
6050        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6051        if (!smid) {
6052                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6053                rc = -EAGAIN;
6054                goto out;
6055        }
6056
6057        rc = 0;
6058        ioc->base_cmds.status = MPT3_CMD_PENDING;
6059        request = mpt3sas_base_get_msg_frame(ioc, smid);
6060        ioc->base_cmds.smid = smid;
6061        memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
6062        if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6063            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
6064                ioc->ioc_link_reset_in_progress = 1;
6065        init_completion(&ioc->base_cmds.done);
6066        ioc->put_smid_default(ioc, smid);
6067        wait_for_completion_timeout(&ioc->base_cmds.done,
6068            msecs_to_jiffies(10000));
6069        if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6070            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
6071            ioc->ioc_link_reset_in_progress)
6072                ioc->ioc_link_reset_in_progress = 0;
6073        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6074                mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
6075                    mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
6076                    issue_reset);
6077                goto issue_host_reset;
6078        }
6079        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6080                memcpy(mpi_reply, ioc->base_cmds.reply,
6081                    sizeof(Mpi2SasIoUnitControlReply_t));
6082        else
6083                memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
6084        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6085        goto out;
6086
6087 issue_host_reset:
6088        if (issue_reset)
6089                mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6090        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6091        rc = -EFAULT;
6092 out:
6093        mutex_unlock(&ioc->base_cmds.mutex);
6094        return rc;
6095}
6096
6097/**
6098 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
6099 * @ioc: per adapter object
6100 * @mpi_reply: the reply payload from FW
6101 * @mpi_request: the request payload sent to FW
6102 *
6103 * The SCSI Enclosure Processor request message causes the IOC to
6104 * communicate with SES devices to control LED status signals.
6105 *
6106 * Return: 0 for success, non-zero for failure.
6107 */
6108int
6109mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
6110        Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
6111{
6112        u16 smid;
6113        u8 issue_reset = 0;
6114        int rc;
6115        void *request;
6116
6117        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6118
6119        mutex_lock(&ioc->base_cmds.mutex);
6120
6121        if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6122                ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6123                rc = -EAGAIN;
6124                goto out;
6125        }
6126
6127        rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6128        if (rc)
6129                goto out;
6130
6131        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6132        if (!smid) {
6133                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6134                rc = -EAGAIN;
6135                goto out;
6136        }
6137
6138        rc = 0;
6139        ioc->base_cmds.status = MPT3_CMD_PENDING;
6140        request = mpt3sas_base_get_msg_frame(ioc, smid);
6141        ioc->base_cmds.smid = smid;
6142        memset(request, 0, ioc->request_sz);
6143        memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
6144        init_completion(&ioc->base_cmds.done);
6145        ioc->put_smid_default(ioc, smid);
6146        wait_for_completion_timeout(&ioc->base_cmds.done,
6147            msecs_to_jiffies(10000));
6148        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6149                mpt3sas_check_cmd_timeout(ioc,
6150                    ioc->base_cmds.status, mpi_request,
6151                    sizeof(Mpi2SepRequest_t)/4, issue_reset);
6152                goto issue_host_reset;
6153        }
6154        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6155                memcpy(mpi_reply, ioc->base_cmds.reply,
6156                    sizeof(Mpi2SepReply_t));
6157        else
6158                memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
6159        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6160        goto out;
6161
6162 issue_host_reset:
6163        if (issue_reset)
6164                mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6165        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6166        rc = -EFAULT;
6167 out:
6168        mutex_unlock(&ioc->base_cmds.mutex);
6169        return rc;
6170}
6171
6172/**
6173 * _base_get_port_facts - obtain port facts reply and save in ioc
6174 * @ioc: per adapter object
6175 * @port: ?
6176 *
6177 * Return: 0 for success, non-zero for failure.
6178 */
6179static int
6180_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
6181{
6182        Mpi2PortFactsRequest_t mpi_request;
6183        Mpi2PortFactsReply_t mpi_reply;
6184        struct mpt3sas_port_facts *pfacts;
6185        int mpi_reply_sz, mpi_request_sz, r;
6186
6187        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6188
6189        mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
6190        mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
6191        memset(&mpi_request, 0, mpi_request_sz);
6192        mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
6193        mpi_request.PortNumber = port;
6194        r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6195            (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6196
6197        if (r != 0) {
6198                ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6199                return r;
6200        }
6201
6202        pfacts = &ioc->pfacts[port];
6203        memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
6204        pfacts->PortNumber = mpi_reply.PortNumber;
6205        pfacts->VP_ID = mpi_reply.VP_ID;
6206        pfacts->VF_ID = mpi_reply.VF_ID;
6207        pfacts->MaxPostedCmdBuffers =
6208            le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
6209
6210        return 0;
6211}
6212
6213/**
6214 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
6215 * @ioc: per adapter object
6216 * @timeout:
6217 *
6218 * Return: 0 for success, non-zero for failure.
6219 */
6220static int
6221_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
6222{
6223        u32 ioc_state;
6224        int rc;
6225
6226        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6227
6228        if (ioc->pci_error_recovery) {
6229                dfailprintk(ioc,
6230                            ioc_info(ioc, "%s: host in pci error recovery\n",
6231                                     __func__));
6232                return -EFAULT;
6233        }
6234
6235        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6236        dhsprintk(ioc,
6237                  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6238                           __func__, ioc_state));
6239
6240        if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6241            (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6242                return 0;
6243
6244        if (ioc_state & MPI2_DOORBELL_USED) {
6245                dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6246                goto issue_diag_reset;
6247        }
6248
6249        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6250                mpt3sas_print_fault_code(ioc, ioc_state &
6251                    MPI2_DOORBELL_DATA_MASK);
6252                goto issue_diag_reset;
6253        } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6254            MPI2_IOC_STATE_COREDUMP) {
6255                ioc_info(ioc,
6256                    "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
6257                    __func__, ioc_state);
6258                return -EFAULT;
6259        }
6260
6261        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6262        if (ioc_state) {
6263                dfailprintk(ioc,
6264                            ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6265                                     __func__, ioc_state));
6266                return -EFAULT;
6267        }
6268
6269 issue_diag_reset:
6270        rc = _base_diag_reset(ioc);
6271        return rc;
6272}
6273
6274/**
6275 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
6276 * @ioc: per adapter object
6277 *
6278 * Return: 0 for success, non-zero for failure.
6279 */
6280static int
6281_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6282{
6283        Mpi2IOCFactsRequest_t mpi_request;
6284        Mpi2IOCFactsReply_t mpi_reply;
6285        struct mpt3sas_facts *facts;
6286        int mpi_reply_sz, mpi_request_sz, r;
6287
6288        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6289
6290        r = _base_wait_for_iocstate(ioc, 10);
6291        if (r) {
6292                dfailprintk(ioc,
6293                            ioc_info(ioc, "%s: failed getting to correct state\n",
6294                                     __func__));
6295                return r;
6296        }
6297        mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6298        mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6299        memset(&mpi_request, 0, mpi_request_sz);
6300        mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6301        r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6302            (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6303
6304        if (r != 0) {
6305                ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6306                return r;
6307        }
6308
6309        facts = &ioc->facts;
6310        memset(facts, 0, sizeof(struct mpt3sas_facts));
6311        facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
6312        facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
6313        facts->VP_ID = mpi_reply.VP_ID;
6314        facts->VF_ID = mpi_reply.VF_ID;
6315        facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
6316        facts->MaxChainDepth = mpi_reply.MaxChainDepth;
6317        facts->WhoInit = mpi_reply.WhoInit;
6318        facts->NumberOfPorts = mpi_reply.NumberOfPorts;
6319        facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
6320        if (ioc->msix_enable && (facts->MaxMSIxVectors <=
6321            MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
6322                ioc->combined_reply_queue = 0;
6323        facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
6324        facts->MaxReplyDescriptorPostQueueDepth =
6325            le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
6326        facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
6327        facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
6328        if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
6329                ioc->ir_firmware = 1;
6330        if ((facts->IOCCapabilities &
6331              MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
6332                ioc->rdpq_array_capable = 1;
6333        if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
6334            && ioc->is_aero_ioc)
6335                ioc->atomic_desc_capable = 1;
6336        facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
6337        facts->IOCRequestFrameSize =
6338            le16_to_cpu(mpi_reply.IOCRequestFrameSize);
6339        if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6340                facts->IOCMaxChainSegmentSize =
6341                        le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
6342        }
6343        facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
6344        facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
6345        ioc->shost->max_id = -1;
6346        facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
6347        facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
6348        facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
6349        facts->HighPriorityCredit =
6350            le16_to_cpu(mpi_reply.HighPriorityCredit);
6351        facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
6352        facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
6353        facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
6354
6355        /*
6356         * Get the Page Size from IOC Facts. If it's 0, default to 4k.
6357         */
6358        ioc->page_size = 1 << facts->CurrentHostPageSize;
6359        if (ioc->page_size == 1) {
6360                ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
6361                ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
6362        }
6363        dinitprintk(ioc,
6364                    ioc_info(ioc, "CurrentHostPageSize(%d)\n",
6365                             facts->CurrentHostPageSize));
6366
6367        dinitprintk(ioc,
6368                    ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
6369                             facts->RequestCredit, facts->MaxChainDepth));
6370        dinitprintk(ioc,
6371                    ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
6372                             facts->IOCRequestFrameSize * 4,
6373                             facts->ReplyFrameSize * 4));
6374        return 0;
6375}
6376
6377/**
6378 * _base_send_ioc_init - send ioc_init to firmware
6379 * @ioc: per adapter object
6380 *
6381 * Return: 0 for success, non-zero for failure.
6382 */
6383static int
6384_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
6385{
6386        Mpi2IOCInitRequest_t mpi_request;
6387        Mpi2IOCInitReply_t mpi_reply;
6388        int i, r = 0;
6389        ktime_t current_time;
6390        u16 ioc_status;
6391        u32 reply_post_free_array_sz = 0;
6392
6393        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6394
6395        memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
6396        mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
6397        mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
6398        mpi_request.VF_ID = 0; /* TODO */
6399        mpi_request.VP_ID = 0;
6400        mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
6401        mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
6402        mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
6403
6404        if (_base_is_controller_msix_enabled(ioc))
6405                mpi_request.HostMSIxVectors = ioc->reply_queue_count;
6406        mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
6407        mpi_request.ReplyDescriptorPostQueueDepth =
6408            cpu_to_le16(ioc->reply_post_queue_depth);
6409        mpi_request.ReplyFreeQueueDepth =
6410            cpu_to_le16(ioc->reply_free_queue_depth);
6411
6412        mpi_request.SenseBufferAddressHigh =
6413            cpu_to_le32((u64)ioc->sense_dma >> 32);
6414        mpi_request.SystemReplyAddressHigh =
6415            cpu_to_le32((u64)ioc->reply_dma >> 32);
6416        mpi_request.SystemRequestFrameBaseAddress =
6417            cpu_to_le64((u64)ioc->request_dma);
6418        mpi_request.ReplyFreeQueueAddress =
6419            cpu_to_le64((u64)ioc->reply_free_dma);
6420
6421        if (ioc->rdpq_array_enable) {
6422                reply_post_free_array_sz = ioc->reply_queue_count *
6423                    sizeof(Mpi2IOCInitRDPQArrayEntry);
6424                memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
6425                for (i = 0; i < ioc->reply_queue_count; i++)
6426                        ioc->reply_post_free_array[i].RDPQBaseAddress =
6427                            cpu_to_le64(
6428                                (u64)ioc->reply_post[i].reply_post_free_dma);
6429                mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
6430                mpi_request.ReplyDescriptorPostQueueAddress =
6431                    cpu_to_le64((u64)ioc->reply_post_free_array_dma);
6432        } else {
6433                mpi_request.ReplyDescriptorPostQueueAddress =
6434                    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
6435        }
6436
6437        /*
6438         * Set the flag to enable CoreDump state feature in IOC firmware.
6439         */
6440        mpi_request.ConfigurationFlags |=
6441            cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
6442
6443        /* This time stamp specifies number of milliseconds
6444         * since epoch ~ midnight January 1, 1970.
6445         */
6446        current_time = ktime_get_real();
6447        mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
6448
6449        if (ioc->logging_level & MPT_DEBUG_INIT) {
6450                __le32 *mfp;
6451                int i;
6452
6453                mfp = (__le32 *)&mpi_request;
6454                ioc_info(ioc, "\toffset:data\n");
6455                for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
6456                        ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6457                            le32_to_cpu(mfp[i]));
6458        }
6459
6460        r = _base_handshake_req_reply_wait(ioc,
6461            sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
6462            sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
6463
6464        if (r != 0) {
6465                ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6466                return r;
6467        }
6468
6469        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6470        if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
6471            mpi_reply.IOCLogInfo) {
6472                ioc_err(ioc, "%s: failed\n", __func__);
6473                r = -EIO;
6474        }
6475
6476        return r;
6477}
6478
6479/**
6480 * mpt3sas_port_enable_done - command completion routine for port enable
6481 * @ioc: per adapter object
6482 * @smid: system request message index
6483 * @msix_index: MSIX table index supplied by the OS
6484 * @reply: reply message frame(lower 32bit addr)
6485 *
6486 * Return: 1 meaning mf should be freed from _base_interrupt
6487 *          0 means the mf is freed from this function.
6488 */
6489u8
6490mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
6491        u32 reply)
6492{
6493        MPI2DefaultReply_t *mpi_reply;
6494        u16 ioc_status;
6495
6496        if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
6497                return 1;
6498
6499        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6500        if (!mpi_reply)
6501                return 1;
6502
6503        if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
6504                return 1;
6505
6506        ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
6507        ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
6508        ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
6509        memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
6510        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6511        if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6512                ioc->port_enable_failed = 1;
6513
6514        if (ioc->is_driver_loading) {
6515                if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
6516                        mpt3sas_port_enable_complete(ioc);
6517                        return 1;
6518                } else {
6519                        ioc->start_scan_failed = ioc_status;
6520                        ioc->start_scan = 0;
6521                        return 1;
6522                }
6523        }
6524        complete(&ioc->port_enable_cmds.done);
6525        return 1;
6526}
6527
6528/**
6529 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
6530 * @ioc: per adapter object
6531 *
6532 * Return: 0 for success, non-zero for failure.
6533 */
6534static int
6535_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
6536{
6537        Mpi2PortEnableRequest_t *mpi_request;
6538        Mpi2PortEnableReply_t *mpi_reply;
6539        int r = 0;
6540        u16 smid;
6541        u16 ioc_status;
6542
6543        ioc_info(ioc, "sending port enable !!\n");
6544
6545        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6546                ioc_err(ioc, "%s: internal command already in use\n", __func__);
6547                return -EAGAIN;
6548        }
6549
6550        smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6551        if (!smid) {
6552                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6553                return -EAGAIN;
6554        }
6555
6556        ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6557        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6558        ioc->port_enable_cmds.smid = smid;
6559        memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6560        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6561
6562        init_completion(&ioc->port_enable_cmds.done);
6563        ioc->put_smid_default(ioc, smid);
6564        wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
6565        if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
6566                ioc_err(ioc, "%s: timeout\n", __func__);
6567                _debug_dump_mf(mpi_request,
6568                    sizeof(Mpi2PortEnableRequest_t)/4);
6569                if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
6570                        r = -EFAULT;
6571                else
6572                        r = -ETIME;
6573                goto out;
6574        }
6575
6576        mpi_reply = ioc->port_enable_cmds.reply;
6577        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6578        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6579                ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
6580                        __func__, ioc_status);
6581                r = -EFAULT;
6582                goto out;
6583        }
6584
6585 out:
6586        ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6587        ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
6588        return r;
6589}
6590
6591/**
6592 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
6593 * @ioc: per adapter object
6594 *
6595 * Return: 0 for success, non-zero for failure.
6596 */
6597int
6598mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
6599{
6600        Mpi2PortEnableRequest_t *mpi_request;
6601        u16 smid;
6602
6603        ioc_info(ioc, "sending port enable !!\n");
6604
6605        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6606                ioc_err(ioc, "%s: internal command already in use\n", __func__);
6607                return -EAGAIN;
6608        }
6609
6610        smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6611        if (!smid) {
6612                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6613                return -EAGAIN;
6614        }
6615
6616        ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6617        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6618        ioc->port_enable_cmds.smid = smid;
6619        memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6620        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6621
6622        ioc->put_smid_default(ioc, smid);
6623        return 0;
6624}
6625
6626/**
6627 * _base_determine_wait_on_discovery - desposition
6628 * @ioc: per adapter object
6629 *
6630 * Decide whether to wait on discovery to complete. Used to either
6631 * locate boot device, or report volumes ahead of physical devices.
6632 *
6633 * Return: 1 for wait, 0 for don't wait.
6634 */
6635static int
6636_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
6637{
6638        /* We wait for discovery to complete if IR firmware is loaded.
6639         * The sas topology events arrive before PD events, so we need time to
6640         * turn on the bit in ioc->pd_handles to indicate PD
6641         * Also, it maybe required to report Volumes ahead of physical
6642         * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
6643         */
6644        if (ioc->ir_firmware)
6645                return 1;
6646
6647        /* if no Bios, then we don't need to wait */
6648        if (!ioc->bios_pg3.BiosVersion)
6649                return 0;
6650
6651        /* Bios is present, then we drop down here.
6652         *
6653         * If there any entries in the Bios Page 2, then we wait
6654         * for discovery to complete.
6655         */
6656
6657        /* Current Boot Device */
6658        if ((ioc->bios_pg2.CurrentBootDeviceForm &
6659            MPI2_BIOSPAGE2_FORM_MASK) ==
6660            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6661        /* Request Boot Device */
6662           (ioc->bios_pg2.ReqBootDeviceForm &
6663            MPI2_BIOSPAGE2_FORM_MASK) ==
6664            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6665        /* Alternate Request Boot Device */
6666           (ioc->bios_pg2.ReqAltBootDeviceForm &
6667            MPI2_BIOSPAGE2_FORM_MASK) ==
6668            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6669                return 0;
6670
6671        return 1;
6672}
6673
6674/**
6675 * _base_unmask_events - turn on notification for this event
6676 * @ioc: per adapter object
6677 * @event: firmware event
6678 *
6679 * The mask is stored in ioc->event_masks.
6680 */
6681static void
6682_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6683{
6684        u32 desired_event;
6685
6686        if (event >= 128)
6687                return;
6688
6689        desired_event = (1 << (event % 32));
6690
6691        if (event < 32)
6692                ioc->event_masks[0] &= ~desired_event;
6693        else if (event < 64)
6694                ioc->event_masks[1] &= ~desired_event;
6695        else if (event < 96)
6696                ioc->event_masks[2] &= ~desired_event;
6697        else if (event < 128)
6698                ioc->event_masks[3] &= ~desired_event;
6699}
6700
6701/**
6702 * _base_event_notification - send event notification
6703 * @ioc: per adapter object
6704 *
6705 * Return: 0 for success, non-zero for failure.
6706 */
6707static int
6708_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6709{
6710        Mpi2EventNotificationRequest_t *mpi_request;
6711        u16 smid;
6712        int r = 0;
6713        int i;
6714
6715        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6716
6717        if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6718                ioc_err(ioc, "%s: internal command already in use\n", __func__);
6719                return -EAGAIN;
6720        }
6721
6722        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6723        if (!smid) {
6724                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6725                return -EAGAIN;
6726        }
6727        ioc->base_cmds.status = MPT3_CMD_PENDING;
6728        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6729        ioc->base_cmds.smid = smid;
6730        memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6731        mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6732        mpi_request->VF_ID = 0; /* TODO */
6733        mpi_request->VP_ID = 0;
6734        for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6735                mpi_request->EventMasks[i] =
6736                    cpu_to_le32(ioc->event_masks[i]);
6737        init_completion(&ioc->base_cmds.done);
6738        ioc->put_smid_default(ioc, smid);
6739        wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6740        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6741                ioc_err(ioc, "%s: timeout\n", __func__);
6742                _debug_dump_mf(mpi_request,
6743                    sizeof(Mpi2EventNotificationRequest_t)/4);
6744                if (ioc->base_cmds.status & MPT3_CMD_RESET)
6745                        r = -EFAULT;
6746                else
6747                        r = -ETIME;
6748        } else
6749                dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
6750        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6751        return r;
6752}
6753
6754/**
6755 * mpt3sas_base_validate_event_type - validating event types
6756 * @ioc: per adapter object
6757 * @event_type: firmware event
6758 *
6759 * This will turn on firmware event notification when application
6760 * ask for that event. We don't mask events that are already enabled.
6761 */
6762void
6763mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6764{
6765        int i, j;
6766        u32 event_mask, desired_event;
6767        u8 send_update_to_fw;
6768
6769        for (i = 0, send_update_to_fw = 0; i <
6770            MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6771                event_mask = ~event_type[i];
6772                desired_event = 1;
6773                for (j = 0; j < 32; j++) {
6774                        if (!(event_mask & desired_event) &&
6775                            (ioc->event_masks[i] & desired_event)) {
6776                                ioc->event_masks[i] &= ~desired_event;
6777                                send_update_to_fw = 1;
6778                        }
6779                        desired_event = (desired_event << 1);
6780                }
6781        }
6782
6783        if (!send_update_to_fw)
6784                return;
6785
6786        mutex_lock(&ioc->base_cmds.mutex);
6787        _base_event_notification(ioc);
6788        mutex_unlock(&ioc->base_cmds.mutex);
6789}
6790
6791/**
6792 * _base_diag_reset - the "big hammer" start of day reset
6793 * @ioc: per adapter object
6794 *
6795 * Return: 0 for success, non-zero for failure.
6796 */
6797static int
6798_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6799{
6800        u32 host_diagnostic;
6801        u32 ioc_state;
6802        u32 count;
6803        u32 hcb_size;
6804
6805        ioc_info(ioc, "sending diag reset !!\n");
6806
6807        drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6808
6809        count = 0;
6810        do {
6811                /* Write magic sequence to WriteSequence register
6812                 * Loop until in diagnostic mode
6813                 */
6814                drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6815                writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6816                writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6817                writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6818                writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6819                writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6820                writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6821                writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6822
6823                /* wait 100 msec */
6824                msleep(100);
6825
6826                if (count++ > 20) {
6827                        ioc_info(ioc,
6828                            "Stop writing magic sequence after 20 retries\n");
6829                        _base_dump_reg_set(ioc);
6830                        goto out;
6831                }
6832
6833                host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6834                drsprintk(ioc,
6835                          ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6836                                   count, host_diagnostic));
6837
6838        } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6839
6840        hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6841
6842        drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6843        writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6844             &ioc->chip->HostDiagnostic);
6845
6846        /*This delay allows the chip PCIe hardware time to finish reset tasks*/
6847        msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6848
6849        /* Approximately 300 second max wait */
6850        for (count = 0; count < (300000000 /
6851                MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6852
6853                host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6854
6855                if (host_diagnostic == 0xFFFFFFFF) {
6856                        ioc_info(ioc,
6857                            "Invalid host diagnostic register value\n");
6858                        _base_dump_reg_set(ioc);
6859                        goto out;
6860                }
6861                if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6862                        break;
6863
6864                msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6865        }
6866
6867        if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6868
6869                drsprintk(ioc,
6870                          ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6871                host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6872                host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6873                writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6874
6875                drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6876                writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6877                    &ioc->chip->HCBSize);
6878        }
6879
6880        drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6881        writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6882            &ioc->chip->HostDiagnostic);
6883
6884        drsprintk(ioc,
6885                  ioc_info(ioc, "disable writes to the diagnostic register\n"));
6886        writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6887
6888        drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6889        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6890        if (ioc_state) {
6891                ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6892                        __func__, ioc_state);
6893                _base_dump_reg_set(ioc);
6894                goto out;
6895        }
6896
6897        ioc_info(ioc, "diag reset: SUCCESS\n");
6898        return 0;
6899
6900 out:
6901        ioc_err(ioc, "diag reset: FAILED\n");
6902        return -EFAULT;
6903}
6904
6905/**
6906 * _base_make_ioc_ready - put controller in READY state
6907 * @ioc: per adapter object
6908 * @type: FORCE_BIG_HAMMER or SOFT_RESET
6909 *
6910 * Return: 0 for success, non-zero for failure.
6911 */
6912static int
6913_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6914{
6915        u32 ioc_state;
6916        int rc;
6917        int count;
6918
6919        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6920
6921        if (ioc->pci_error_recovery)
6922                return 0;
6923
6924        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6925        dhsprintk(ioc,
6926                  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6927                           __func__, ioc_state));
6928
6929        /* if in RESET state, it should move to READY state shortly */
6930        count = 0;
6931        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6932                while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6933                    MPI2_IOC_STATE_READY) {
6934                        if (count++ == 10) {
6935                                ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6936                                        __func__, ioc_state);
6937                                return -EFAULT;
6938                        }
6939                        ssleep(1);
6940                        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6941                }
6942        }
6943
6944        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6945                return 0;
6946
6947        if (ioc_state & MPI2_DOORBELL_USED) {
6948                ioc_info(ioc, "unexpected doorbell active!\n");
6949                goto issue_diag_reset;
6950        }
6951
6952        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6953                mpt3sas_print_fault_code(ioc, ioc_state &
6954                    MPI2_DOORBELL_DATA_MASK);
6955                goto issue_diag_reset;
6956        }
6957
6958        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
6959                /*
6960                 * if host reset is invoked while watch dog thread is waiting
6961                 * for IOC state to be changed to Fault state then driver has
6962                 * to wait here for CoreDump state to clear otherwise reset
6963                 * will be issued to the FW and FW move the IOC state to
6964                 * reset state without copying the FW logs to coredump region.
6965                 */
6966                if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
6967                        mpt3sas_print_coredump_info(ioc, ioc_state &
6968                            MPI2_DOORBELL_DATA_MASK);
6969                        mpt3sas_base_wait_for_coredump_completion(ioc,
6970                            __func__);
6971                }
6972                goto issue_diag_reset;
6973        }
6974
6975        if (type == FORCE_BIG_HAMMER)
6976                goto issue_diag_reset;
6977
6978        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6979                if (!(_base_send_ioc_reset(ioc,
6980                    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6981                        return 0;
6982        }
6983
6984 issue_diag_reset:
6985        rc = _base_diag_reset(ioc);
6986        return rc;
6987}
6988
6989/**
6990 * _base_make_ioc_operational - put controller in OPERATIONAL state
6991 * @ioc: per adapter object
6992 *
6993 * Return: 0 for success, non-zero for failure.
6994 */
6995static int
6996_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6997{
6998        int r, i, index, rc;
6999        unsigned long   flags;
7000        u32 reply_address;
7001        u16 smid;
7002        struct _tr_list *delayed_tr, *delayed_tr_next;
7003        struct _sc_list *delayed_sc, *delayed_sc_next;
7004        struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7005        u8 hide_flag;
7006        struct adapter_reply_queue *reply_q;
7007        Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
7008
7009        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7010
7011        /* clean the delayed target reset list */
7012        list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7013            &ioc->delayed_tr_list, list) {
7014                list_del(&delayed_tr->list);
7015                kfree(delayed_tr);
7016        }
7017
7018
7019        list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7020            &ioc->delayed_tr_volume_list, list) {
7021                list_del(&delayed_tr->list);
7022                kfree(delayed_tr);
7023        }
7024
7025        list_for_each_entry_safe(delayed_sc, delayed_sc_next,
7026            &ioc->delayed_sc_list, list) {
7027                list_del(&delayed_sc->list);
7028                kfree(delayed_sc);
7029        }
7030
7031        list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
7032            &ioc->delayed_event_ack_list, list) {
7033                list_del(&delayed_event_ack->list);
7034                kfree(delayed_event_ack);
7035        }
7036
7037        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7038
7039        /* hi-priority queue */
7040        INIT_LIST_HEAD(&ioc->hpr_free_list);
7041        smid = ioc->hi_priority_smid;
7042        for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
7043                ioc->hpr_lookup[i].cb_idx = 0xFF;
7044                ioc->hpr_lookup[i].smid = smid;
7045                list_add_tail(&ioc->hpr_lookup[i].tracker_list,
7046                    &ioc->hpr_free_list);
7047        }
7048
7049        /* internal queue */
7050        INIT_LIST_HEAD(&ioc->internal_free_list);
7051        smid = ioc->internal_smid;
7052        for (i = 0; i < ioc->internal_depth; i++, smid++) {
7053                ioc->internal_lookup[i].cb_idx = 0xFF;
7054                ioc->internal_lookup[i].smid = smid;
7055                list_add_tail(&ioc->internal_lookup[i].tracker_list,
7056                    &ioc->internal_free_list);
7057        }
7058
7059        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7060
7061        /* initialize Reply Free Queue */
7062        for (i = 0, reply_address = (u32)ioc->reply_dma ;
7063            i < ioc->reply_free_queue_depth ; i++, reply_address +=
7064            ioc->reply_sz) {
7065                ioc->reply_free[i] = cpu_to_le32(reply_address);
7066                if (ioc->is_mcpu_endpoint)
7067                        _base_clone_reply_to_sys_mem(ioc,
7068                                        reply_address, i);
7069        }
7070
7071        /* initialize reply queues */
7072        if (ioc->is_driver_loading)
7073                _base_assign_reply_queues(ioc);
7074
7075        /* initialize Reply Post Free Queue */
7076        index = 0;
7077        reply_post_free_contig = ioc->reply_post[0].reply_post_free;
7078        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7079                /*
7080                 * If RDPQ is enabled, switch to the next allocation.
7081                 * Otherwise advance within the contiguous region.
7082                 */
7083                if (ioc->rdpq_array_enable) {
7084                        reply_q->reply_post_free =
7085                                ioc->reply_post[index++].reply_post_free;
7086                } else {
7087                        reply_q->reply_post_free = reply_post_free_contig;
7088                        reply_post_free_contig += ioc->reply_post_queue_depth;
7089                }
7090
7091                reply_q->reply_post_host_index = 0;
7092                for (i = 0; i < ioc->reply_post_queue_depth; i++)
7093                        reply_q->reply_post_free[i].Words =
7094                            cpu_to_le64(ULLONG_MAX);
7095                if (!_base_is_controller_msix_enabled(ioc))
7096                        goto skip_init_reply_post_free_queue;
7097        }
7098 skip_init_reply_post_free_queue:
7099
7100        r = _base_send_ioc_init(ioc);
7101        if (r) {
7102                /*
7103                 * No need to check IOC state for fault state & issue
7104                 * diag reset during host reset. This check is need
7105                 * only during driver load time.
7106                 */
7107                if (!ioc->is_driver_loading)
7108                        return r;
7109
7110                rc = _base_check_for_fault_and_issue_reset(ioc);
7111                if (rc || (_base_send_ioc_init(ioc)))
7112                        return r;
7113        }
7114
7115        /* initialize reply free host index */
7116        ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
7117        writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
7118
7119        /* initialize reply post host index */
7120        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7121                if (ioc->combined_reply_queue)
7122                        writel((reply_q->msix_index & 7)<<
7123                           MPI2_RPHI_MSIX_INDEX_SHIFT,
7124                           ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
7125                else
7126                        writel(reply_q->msix_index <<
7127                                MPI2_RPHI_MSIX_INDEX_SHIFT,
7128                                &ioc->chip->ReplyPostHostIndex);
7129
7130                if (!_base_is_controller_msix_enabled(ioc))
7131                        goto skip_init_reply_post_host_index;
7132        }
7133
7134 skip_init_reply_post_host_index:
7135
7136        mpt3sas_base_unmask_interrupts(ioc);
7137
7138        if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7139                r = _base_display_fwpkg_version(ioc);
7140                if (r)
7141                        return r;
7142        }
7143
7144        _base_static_config_pages(ioc);
7145        r = _base_event_notification(ioc);
7146        if (r)
7147                return r;
7148
7149        if (ioc->is_driver_loading) {
7150
7151                if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
7152                    == 0x80) {
7153                        hide_flag = (u8) (
7154                            le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
7155                            MFG_PAGE10_HIDE_SSDS_MASK);
7156                        if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
7157                                ioc->mfg_pg10_hide_flag = hide_flag;
7158                }
7159
7160                ioc->wait_for_discovery_to_complete =
7161                    _base_determine_wait_on_discovery(ioc);
7162
7163                return r; /* scan_start and scan_finished support */
7164        }
7165
7166        r = _base_send_port_enable(ioc);
7167        if (r)
7168                return r;
7169
7170        return r;
7171}
7172
7173/**
7174 * mpt3sas_base_free_resources - free resources controller resources
7175 * @ioc: per adapter object
7176 */
7177void
7178mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
7179{
7180        dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7181
7182        /* synchronizing freeing resource with pci_access_mutex lock */
7183        mutex_lock(&ioc->pci_access_mutex);
7184        if (ioc->chip_phys && ioc->chip) {
7185                mpt3sas_base_mask_interrupts(ioc);
7186                ioc->shost_recovery = 1;
7187                _base_make_ioc_ready(ioc, SOFT_RESET);
7188                ioc->shost_recovery = 0;
7189        }
7190
7191        mpt3sas_base_unmap_resources(ioc);
7192        mutex_unlock(&ioc->pci_access_mutex);
7193        return;
7194}
7195
7196/**
7197 * mpt3sas_base_attach - attach controller instance
7198 * @ioc: per adapter object
7199 *
7200 * Return: 0 for success, non-zero for failure.
7201 */
7202int
7203mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7204{
7205        int r, i, rc;
7206        int cpu_id, last_cpu_id = 0;
7207
7208        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7209
7210        /* setup cpu_msix_table */
7211        ioc->cpu_count = num_online_cpus();
7212        for_each_online_cpu(cpu_id)
7213                last_cpu_id = cpu_id;
7214        ioc->cpu_msix_table_sz = last_cpu_id + 1;
7215        ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
7216        ioc->reply_queue_count = 1;
7217        if (!ioc->cpu_msix_table) {
7218                ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
7219                r = -ENOMEM;
7220                goto out_free_resources;
7221        }
7222
7223        if (ioc->is_warpdrive) {
7224                ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
7225                    sizeof(resource_size_t *), GFP_KERNEL);
7226                if (!ioc->reply_post_host_index) {
7227                        ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
7228                        r = -ENOMEM;
7229                        goto out_free_resources;
7230                }
7231        }
7232
7233        ioc->smp_affinity_enable = smp_affinity_enable;
7234
7235        ioc->rdpq_array_enable_assigned = 0;
7236        ioc->use_32bit_dma = false;
7237        if (ioc->is_aero_ioc)
7238                ioc->base_readl = &_base_readl_aero;
7239        else
7240                ioc->base_readl = &_base_readl;
7241        r = mpt3sas_base_map_resources(ioc);
7242        if (r)
7243                goto out_free_resources;
7244
7245        pci_set_drvdata(ioc->pdev, ioc->shost);
7246        r = _base_get_ioc_facts(ioc);
7247        if (r) {
7248                rc = _base_check_for_fault_and_issue_reset(ioc);
7249                if (rc || (_base_get_ioc_facts(ioc)))
7250                        goto out_free_resources;
7251        }
7252
7253        switch (ioc->hba_mpi_version_belonged) {
7254        case MPI2_VERSION:
7255                ioc->build_sg_scmd = &_base_build_sg_scmd;
7256                ioc->build_sg = &_base_build_sg;
7257                ioc->build_zero_len_sge = &_base_build_zero_len_sge;
7258                ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7259                break;
7260        case MPI25_VERSION:
7261        case MPI26_VERSION:
7262                /*
7263                 * In SAS3.0,
7264                 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
7265                 * Target Status - all require the IEEE formated scatter gather
7266                 * elements.
7267                 */
7268                ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7269                ioc->build_sg = &_base_build_sg_ieee;
7270                ioc->build_nvme_prp = &_base_build_nvme_prp;
7271                ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7272                ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7273                if (ioc->high_iops_queues)
7274                        ioc->get_msix_index_for_smlio =
7275                                        &_base_get_high_iops_msix_index;
7276                else
7277                        ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7278                break;
7279        }
7280        if (ioc->atomic_desc_capable) {
7281                ioc->put_smid_default = &_base_put_smid_default_atomic;
7282                ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7283                ioc->put_smid_fast_path =
7284                                &_base_put_smid_fast_path_atomic;
7285                ioc->put_smid_hi_priority =
7286                                &_base_put_smid_hi_priority_atomic;
7287        } else {
7288                ioc->put_smid_default = &_base_put_smid_default;
7289                ioc->put_smid_fast_path = &_base_put_smid_fast_path;
7290                ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
7291                if (ioc->is_mcpu_endpoint)
7292                        ioc->put_smid_scsi_io =
7293                                &_base_put_smid_mpi_ep_scsi_io;
7294                else
7295                        ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
7296        }
7297        /*
7298         * These function pointers for other requests that don't
7299         * the require IEEE scatter gather elements.
7300         *
7301         * For example Configuration Pages and SAS IOUNIT Control don't.
7302         */
7303        ioc->build_sg_mpi = &_base_build_sg;
7304        ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
7305
7306        r = _base_make_ioc_ready(ioc, SOFT_RESET);
7307        if (r)
7308                goto out_free_resources;
7309
7310        ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
7311            sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
7312        if (!ioc->pfacts) {
7313                r = -ENOMEM;
7314                goto out_free_resources;
7315        }
7316
7317        for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
7318                r = _base_get_port_facts(ioc, i);
7319                if (r) {
7320                        rc = _base_check_for_fault_and_issue_reset(ioc);
7321                        if (rc || (_base_get_port_facts(ioc, i)))
7322                                goto out_free_resources;
7323                }
7324        }
7325
7326        r = _base_allocate_memory_pools(ioc);
7327        if (r)
7328                goto out_free_resources;
7329
7330        if (irqpoll_weight > 0)
7331                ioc->thresh_hold = irqpoll_weight;
7332        else
7333                ioc->thresh_hold = ioc->hba_queue_depth/4;
7334
7335        _base_init_irqpolls(ioc);
7336        init_waitqueue_head(&ioc->reset_wq);
7337
7338        /* allocate memory pd handle bitmask list */
7339        ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7340        if (ioc->facts.MaxDevHandle % 8)
7341                ioc->pd_handles_sz++;
7342        ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
7343            GFP_KERNEL);
7344        if (!ioc->pd_handles) {
7345                r = -ENOMEM;
7346                goto out_free_resources;
7347        }
7348        ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
7349            GFP_KERNEL);
7350        if (!ioc->blocking_handles) {
7351                r = -ENOMEM;
7352                goto out_free_resources;
7353        }
7354
7355        /* allocate memory for pending OS device add list */
7356        ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
7357        if (ioc->facts.MaxDevHandle % 8)
7358                ioc->pend_os_device_add_sz++;
7359        ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
7360            GFP_KERNEL);
7361        if (!ioc->pend_os_device_add)
7362                goto out_free_resources;
7363
7364        ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
7365        ioc->device_remove_in_progress =
7366                kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
7367        if (!ioc->device_remove_in_progress)
7368                goto out_free_resources;
7369
7370        ioc->fwfault_debug = mpt3sas_fwfault_debug;
7371
7372        /* base internal command bits */
7373        mutex_init(&ioc->base_cmds.mutex);
7374        ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7375        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7376
7377        /* port_enable command bits */
7378        ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7379        ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7380
7381        /* transport internal command bits */
7382        ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7383        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
7384        mutex_init(&ioc->transport_cmds.mutex);
7385
7386        /* scsih internal command bits */
7387        ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7388        ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7389        mutex_init(&ioc->scsih_cmds.mutex);
7390
7391        /* task management internal command bits */
7392        ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7393        ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
7394        mutex_init(&ioc->tm_cmds.mutex);
7395
7396        /* config page internal command bits */
7397        ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7398        ioc->config_cmds.status = MPT3_CMD_NOT_USED;
7399        mutex_init(&ioc->config_cmds.mutex);
7400
7401        /* ctl module internal command bits */
7402        ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7403        ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
7404        ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
7405        mutex_init(&ioc->ctl_cmds.mutex);
7406
7407        if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
7408            !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
7409            !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
7410            !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
7411                r = -ENOMEM;
7412                goto out_free_resources;
7413        }
7414
7415        for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7416                ioc->event_masks[i] = -1;
7417
7418        /* here we enable the events we care about */
7419        _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
7420        _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
7421        _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
7422        _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
7423        _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
7424        _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
7425        _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
7426        _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
7427        _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
7428        _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
7429        _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
7430        _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
7431        _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
7432        if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
7433                if (ioc->is_gen35_ioc) {
7434                        _base_unmask_events(ioc,
7435                                MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
7436                        _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
7437                        _base_unmask_events(ioc,
7438                                MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
7439                }
7440        }
7441        r = _base_make_ioc_operational(ioc);
7442        if (r)
7443                goto out_free_resources;
7444
7445        /*
7446         * Copy current copy of IOCFacts in prev_fw_facts
7447         * and it will be used during online firmware upgrade.
7448         */
7449        memcpy(&ioc->prev_fw_facts, &ioc->facts,
7450            sizeof(struct mpt3sas_facts));
7451
7452        ioc->non_operational_loop = 0;
7453        ioc->ioc_coredump_loop = 0;
7454        ioc->got_task_abort_from_ioctl = 0;
7455        return 0;
7456
7457 out_free_resources:
7458
7459        ioc->remove_host = 1;
7460
7461        mpt3sas_base_free_resources(ioc);
7462        _base_release_memory_pools(ioc);
7463        pci_set_drvdata(ioc->pdev, NULL);
7464        kfree(ioc->cpu_msix_table);
7465        if (ioc->is_warpdrive)
7466                kfree(ioc->reply_post_host_index);
7467        kfree(ioc->pd_handles);
7468        kfree(ioc->blocking_handles);
7469        kfree(ioc->device_remove_in_progress);
7470        kfree(ioc->pend_os_device_add);
7471        kfree(ioc->tm_cmds.reply);
7472        kfree(ioc->transport_cmds.reply);
7473        kfree(ioc->scsih_cmds.reply);
7474        kfree(ioc->config_cmds.reply);
7475        kfree(ioc->base_cmds.reply);
7476        kfree(ioc->port_enable_cmds.reply);
7477        kfree(ioc->ctl_cmds.reply);
7478        kfree(ioc->ctl_cmds.sense);
7479        kfree(ioc->pfacts);
7480        ioc->ctl_cmds.reply = NULL;
7481        ioc->base_cmds.reply = NULL;
7482        ioc->tm_cmds.reply = NULL;
7483        ioc->scsih_cmds.reply = NULL;
7484        ioc->transport_cmds.reply = NULL;
7485        ioc->config_cmds.reply = NULL;
7486        ioc->pfacts = NULL;
7487        return r;
7488}
7489
7490
7491/**
7492 * mpt3sas_base_detach - remove controller instance
7493 * @ioc: per adapter object
7494 */
7495void
7496mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
7497{
7498        dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7499
7500        mpt3sas_base_stop_watchdog(ioc);
7501        mpt3sas_base_free_resources(ioc);
7502        _base_release_memory_pools(ioc);
7503        mpt3sas_free_enclosure_list(ioc);
7504        pci_set_drvdata(ioc->pdev, NULL);
7505        kfree(ioc->cpu_msix_table);
7506        if (ioc->is_warpdrive)
7507                kfree(ioc->reply_post_host_index);
7508        kfree(ioc->pd_handles);
7509        kfree(ioc->blocking_handles);
7510        kfree(ioc->device_remove_in_progress);
7511        kfree(ioc->pend_os_device_add);
7512        kfree(ioc->pfacts);
7513        kfree(ioc->ctl_cmds.reply);
7514        kfree(ioc->ctl_cmds.sense);
7515        kfree(ioc->base_cmds.reply);
7516        kfree(ioc->port_enable_cmds.reply);
7517        kfree(ioc->tm_cmds.reply);
7518        kfree(ioc->transport_cmds.reply);
7519        kfree(ioc->scsih_cmds.reply);
7520        kfree(ioc->config_cmds.reply);
7521}
7522
7523/**
7524 * _base_pre_reset_handler - pre reset handler
7525 * @ioc: per adapter object
7526 */
7527static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
7528{
7529        mpt3sas_scsih_pre_reset_handler(ioc);
7530        mpt3sas_ctl_pre_reset_handler(ioc);
7531        dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
7532}
7533
7534/**
7535 * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
7536 * @ioc: per adapter object
7537 */
7538static void
7539_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
7540{
7541        dtmprintk(ioc,
7542            ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
7543        if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
7544                ioc->transport_cmds.status |= MPT3_CMD_RESET;
7545                mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
7546                complete(&ioc->transport_cmds.done);
7547        }
7548        if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7549                ioc->base_cmds.status |= MPT3_CMD_RESET;
7550                mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
7551                complete(&ioc->base_cmds.done);
7552        }
7553        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7554                ioc->port_enable_failed = 1;
7555                ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
7556                mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
7557                if (ioc->is_driver_loading) {
7558                        ioc->start_scan_failed =
7559                                MPI2_IOCSTATUS_INTERNAL_ERROR;
7560                        ioc->start_scan = 0;
7561                        ioc->port_enable_cmds.status =
7562                                MPT3_CMD_NOT_USED;
7563                } else {
7564                        complete(&ioc->port_enable_cmds.done);
7565                }
7566        }
7567        if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
7568                ioc->config_cmds.status |= MPT3_CMD_RESET;
7569                mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
7570                ioc->config_cmds.smid = USHRT_MAX;
7571                complete(&ioc->config_cmds.done);
7572        }
7573}
7574
7575/**
7576 * _base_clear_outstanding_commands - clear all outstanding commands
7577 * @ioc: per adapter object
7578 */
7579static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
7580{
7581        mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
7582        mpt3sas_ctl_clear_outstanding_ioctls(ioc);
7583        _base_clear_outstanding_mpt_commands(ioc);
7584}
7585
7586/**
7587 * _base_reset_done_handler - reset done handler
7588 * @ioc: per adapter object
7589 */
7590static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
7591{
7592        mpt3sas_scsih_reset_done_handler(ioc);
7593        mpt3sas_ctl_reset_done_handler(ioc);
7594        dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
7595}
7596
7597/**
7598 * mpt3sas_wait_for_commands_to_complete - reset controller
7599 * @ioc: Pointer to MPT_ADAPTER structure
7600 *
7601 * This function is waiting 10s for all pending commands to complete
7602 * prior to putting controller in reset.
7603 */
7604void
7605mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
7606{
7607        u32 ioc_state;
7608
7609        ioc->pending_io_count = 0;
7610
7611        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7612        if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
7613                return;
7614
7615        /* pending command count */
7616        ioc->pending_io_count = scsi_host_busy(ioc->shost);
7617
7618        if (!ioc->pending_io_count)
7619                return;
7620
7621        /* wait for pending commands to complete */
7622        wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
7623}
7624
7625/**
7626 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
7627 *     attributes during online firmware upgrade and update the corresponding
7628 *     IOC variables accordingly.
7629 *
7630 * @ioc: Pointer to MPT_ADAPTER structure
7631 */
7632static int
7633_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
7634{
7635        u16 pd_handles_sz;
7636        void *pd_handles = NULL, *blocking_handles = NULL;
7637        void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
7638        struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
7639
7640        if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
7641                pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7642                if (ioc->facts.MaxDevHandle % 8)
7643                        pd_handles_sz++;
7644
7645                pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
7646                    GFP_KERNEL);
7647                if (!pd_handles) {
7648                        ioc_info(ioc,
7649                            "Unable to allocate the memory for pd_handles of sz: %d\n",
7650                            pd_handles_sz);
7651                        return -ENOMEM;
7652                }
7653                memset(pd_handles + ioc->pd_handles_sz, 0,
7654                    (pd_handles_sz - ioc->pd_handles_sz));
7655                ioc->pd_handles = pd_handles;
7656
7657                blocking_handles = krealloc(ioc->blocking_handles,
7658                    pd_handles_sz, GFP_KERNEL);
7659                if (!blocking_handles) {
7660                        ioc_info(ioc,
7661                            "Unable to allocate the memory for "
7662                            "blocking_handles of sz: %d\n",
7663                            pd_handles_sz);
7664                        return -ENOMEM;
7665                }
7666                memset(blocking_handles + ioc->pd_handles_sz, 0,
7667                    (pd_handles_sz - ioc->pd_handles_sz));
7668                ioc->blocking_handles = blocking_handles;
7669                ioc->pd_handles_sz = pd_handles_sz;
7670
7671                pend_os_device_add = krealloc(ioc->pend_os_device_add,
7672                    pd_handles_sz, GFP_KERNEL);
7673                if (!pend_os_device_add) {
7674                        ioc_info(ioc,
7675                            "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
7676                            pd_handles_sz);
7677                        return -ENOMEM;
7678                }
7679                memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
7680                    (pd_handles_sz - ioc->pend_os_device_add_sz));
7681                ioc->pend_os_device_add = pend_os_device_add;
7682                ioc->pend_os_device_add_sz = pd_handles_sz;
7683
7684                device_remove_in_progress = krealloc(
7685                    ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
7686                if (!device_remove_in_progress) {
7687                        ioc_info(ioc,
7688                            "Unable to allocate the memory for "
7689                            "device_remove_in_progress of sz: %d\n "
7690                            , pd_handles_sz);
7691                        return -ENOMEM;
7692                }
7693                memset(device_remove_in_progress +
7694                    ioc->device_remove_in_progress_sz, 0,
7695                    (pd_handles_sz - ioc->device_remove_in_progress_sz));
7696                ioc->device_remove_in_progress = device_remove_in_progress;
7697                ioc->device_remove_in_progress_sz = pd_handles_sz;
7698        }
7699
7700        memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
7701        return 0;
7702}
7703
7704/**
7705 * mpt3sas_base_hard_reset_handler - reset controller
7706 * @ioc: Pointer to MPT_ADAPTER structure
7707 * @type: FORCE_BIG_HAMMER or SOFT_RESET
7708 *
7709 * Return: 0 for success, non-zero for failure.
7710 */
7711int
7712mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
7713        enum reset_type type)
7714{
7715        int r;
7716        unsigned long flags;
7717        u32 ioc_state;
7718        u8 is_fault = 0, is_trigger = 0;
7719
7720        dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
7721
7722        if (ioc->pci_error_recovery) {
7723                ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
7724                r = 0;
7725                goto out_unlocked;
7726        }
7727
7728        if (mpt3sas_fwfault_debug)
7729                mpt3sas_halt_firmware(ioc);
7730
7731        /* wait for an active reset in progress to complete */
7732        mutex_lock(&ioc->reset_in_progress_mutex);
7733
7734        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7735        ioc->shost_recovery = 1;
7736        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7737
7738        if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7739            MPT3_DIAG_BUFFER_IS_REGISTERED) &&
7740            (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7741            MPT3_DIAG_BUFFER_IS_RELEASED))) {
7742                is_trigger = 1;
7743                ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7744                if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
7745                    (ioc_state & MPI2_IOC_STATE_MASK) ==
7746                    MPI2_IOC_STATE_COREDUMP)
7747                        is_fault = 1;
7748        }
7749        _base_pre_reset_handler(ioc);
7750        mpt3sas_wait_for_commands_to_complete(ioc);
7751        mpt3sas_base_mask_interrupts(ioc);
7752        r = _base_make_ioc_ready(ioc, type);
7753        if (r)
7754                goto out;
7755        _base_clear_outstanding_commands(ioc);
7756
7757        /* If this hard reset is called while port enable is active, then
7758         * there is no reason to call make_ioc_operational
7759         */
7760        if (ioc->is_driver_loading && ioc->port_enable_failed) {
7761                ioc->remove_host = 1;
7762                r = -EFAULT;
7763                goto out;
7764        }
7765        r = _base_get_ioc_facts(ioc);
7766        if (r)
7767                goto out;
7768
7769        r = _base_check_ioc_facts_changes(ioc);
7770        if (r) {
7771                ioc_info(ioc,
7772                    "Some of the parameters got changed in this new firmware"
7773                    " image and it requires system reboot\n");
7774                goto out;
7775        }
7776        if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
7777                panic("%s: Issue occurred with flashing controller firmware."
7778                      "Please reboot the system and ensure that the correct"
7779                      " firmware version is running\n", ioc->name);
7780
7781        r = _base_make_ioc_operational(ioc);
7782        if (!r)
7783                _base_reset_done_handler(ioc);
7784
7785 out:
7786        ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
7787
7788        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7789        ioc->shost_recovery = 0;
7790        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7791        ioc->ioc_reset_count++;
7792        mutex_unlock(&ioc->reset_in_progress_mutex);
7793
7794 out_unlocked:
7795        if ((r == 0) && is_trigger) {
7796                if (is_fault)
7797                        mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
7798                else
7799                        mpt3sas_trigger_master(ioc,
7800                            MASTER_TRIGGER_ADAPTER_RESET);
7801        }
7802        dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
7803        return r;
7804}
7805