linux/drivers/scsi/mpt3sas/mpt3sas_base.c
<<
>>
Prefs
   1/*
   2 * This is the Fusion MPT base driver providing common API layer interface
   3 * for access to MPT (Message Passing Technology) firmware.
   4 *
   5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
   6 * Copyright (C) 2012-2014  LSI Corporation
   7 * Copyright (C) 2013-2014 Avago Technologies
   8 *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * as published by the Free Software Foundation; either version 2
  13 * of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * NO WARRANTY
  21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  25 * solely responsible for determining the appropriateness of using and
  26 * distributing the Program and assumes all risks associated with its
  27 * exercise of rights under this Agreement, including but not limited to
  28 * the risks and costs of program errors, damage to or loss of data,
  29 * programs or equipment, and unavailability or interruption of operations.
  30
  31 * DISCLAIMER OF LIABILITY
  32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  39
  40 * You should have received a copy of the GNU General Public License
  41 * along with this program; if not, write to the Free Software
  42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
  43 * USA.
  44 */
  45
  46#include <linux/kernel.h>
  47#include <linux/module.h>
  48#include <linux/errno.h>
  49#include <linux/init.h>
  50#include <linux/slab.h>
  51#include <linux/types.h>
  52#include <linux/pci.h>
  53#include <linux/kdev_t.h>
  54#include <linux/blkdev.h>
  55#include <linux/delay.h>
  56#include <linux/interrupt.h>
  57#include <linux/dma-mapping.h>
  58#include <linux/io.h>
  59#include <linux/time.h>
  60#include <linux/ktime.h>
  61#include <linux/kthread.h>
  62#include <linux/aer.h>
  63
  64
  65#include "mpt3sas_base.h"
  66
  67static MPT_CALLBACK     mpt_callbacks[MPT_MAX_CALLBACKS];
  68
  69
  70#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
  71
  72 /* maximum controller queue depth */
  73#define MAX_HBA_QUEUE_DEPTH     30000
  74#define MAX_CHAIN_DEPTH         100000
  75static int max_queue_depth = -1;
  76module_param(max_queue_depth, int, 0);
  77MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
  78
  79static int max_sgl_entries = -1;
  80module_param(max_sgl_entries, int, 0);
  81MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
  82
  83static int msix_disable = -1;
  84module_param(msix_disable, int, 0);
  85MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
  86
  87static int smp_affinity_enable = 1;
  88module_param(smp_affinity_enable, int, S_IRUGO);
  89MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
  90
  91static int max_msix_vectors = -1;
  92module_param(max_msix_vectors, int, 0);
  93MODULE_PARM_DESC(max_msix_vectors,
  94        " max msix vectors");
  95
  96static int mpt3sas_fwfault_debug;
  97MODULE_PARM_DESC(mpt3sas_fwfault_debug,
  98        " enable detection of firmware fault and halt firmware - (default=0)");
  99
 100static int
 101_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
 102
 103/**
 104 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
 105 *
 106 */
 107static int
 108_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
 109{
 110        int ret = param_set_int(val, kp);
 111        struct MPT3SAS_ADAPTER *ioc;
 112
 113        if (ret)
 114                return ret;
 115
 116        /* global ioc spinlock to protect controller list on list operations */
 117        pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
 118        spin_lock(&gioc_lock);
 119        list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
 120                ioc->fwfault_debug = mpt3sas_fwfault_debug;
 121        spin_unlock(&gioc_lock);
 122        return 0;
 123}
 124module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
 125        param_get_int, &mpt3sas_fwfault_debug, 0644);
 126
 127/**
 128 *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
 129 * @arg: input argument, used to derive ioc
 130 *
 131 * Return 0 if controller is removed from pci subsystem.
 132 * Return -1 for other case.
 133 */
 134static int mpt3sas_remove_dead_ioc_func(void *arg)
 135{
 136        struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
 137        struct pci_dev *pdev;
 138
 139        if ((ioc == NULL))
 140                return -1;
 141
 142        pdev = ioc->pdev;
 143        if ((pdev == NULL))
 144                return -1;
 145        pci_stop_and_remove_bus_device_locked(pdev);
 146        return 0;
 147}
 148
 149/**
 150 * _base_fault_reset_work - workq handling ioc fault conditions
 151 * @work: input argument, used to derive ioc
 152 * Context: sleep.
 153 *
 154 * Return nothing.
 155 */
 156static void
 157_base_fault_reset_work(struct work_struct *work)
 158{
 159        struct MPT3SAS_ADAPTER *ioc =
 160            container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
 161        unsigned long    flags;
 162        u32 doorbell;
 163        int rc;
 164        struct task_struct *p;
 165
 166
 167        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 168        if (ioc->shost_recovery || ioc->pci_error_recovery)
 169                goto rearm_timer;
 170        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 171
 172        doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 173        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
 174                pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
 175                    ioc->name);
 176
 177                /* It may be possible that EEH recovery can resolve some of
 178                 * pci bus failure issues rather removing the dead ioc function
 179                 * by considering controller is in a non-operational state. So
 180                 * here priority is given to the EEH recovery. If it doesn't
 181                 * not resolve this issue, mpt3sas driver will consider this
 182                 * controller to non-operational state and remove the dead ioc
 183                 * function.
 184                 */
 185                if (ioc->non_operational_loop++ < 5) {
 186                        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
 187                                                         flags);
 188                        goto rearm_timer;
 189                }
 190
 191                /*
 192                 * Call _scsih_flush_pending_cmds callback so that we flush all
 193                 * pending commands back to OS. This call is required to aovid
 194                 * deadlock at block layer. Dead IOC will fail to do diag reset,
 195                 * and this call is safe since dead ioc will never return any
 196                 * command back from HW.
 197                 */
 198                ioc->schedule_dead_ioc_flush_running_cmds(ioc);
 199                /*
 200                 * Set remove_host flag early since kernel thread will
 201                 * take some time to execute.
 202                 */
 203                ioc->remove_host = 1;
 204                /*Remove the Dead Host */
 205                p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
 206                    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
 207                if (IS_ERR(p))
 208                        pr_err(MPT3SAS_FMT
 209                        "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
 210                        ioc->name, __func__);
 211                else
 212                        pr_err(MPT3SAS_FMT
 213                        "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
 214                        ioc->name, __func__);
 215                return; /* don't rearm timer */
 216        }
 217
 218        ioc->non_operational_loop = 0;
 219
 220        if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
 221                rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 222                pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
 223                    __func__, (rc == 0) ? "success" : "failed");
 224                doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 225                if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 226                        mpt3sas_base_fault_info(ioc, doorbell &
 227                            MPI2_DOORBELL_DATA_MASK);
 228                if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
 229                    MPI2_IOC_STATE_OPERATIONAL)
 230                        return; /* don't rearm timer */
 231        }
 232
 233        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 234 rearm_timer:
 235        if (ioc->fault_reset_work_q)
 236                queue_delayed_work(ioc->fault_reset_work_q,
 237                    &ioc->fault_reset_work,
 238                    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
 239        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 240}
 241
 242/**
 243 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
 244 * @ioc: per adapter object
 245 * Context: sleep.
 246 *
 247 * Return nothing.
 248 */
 249void
 250mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
 251{
 252        unsigned long    flags;
 253
 254        if (ioc->fault_reset_work_q)
 255                return;
 256
 257        /* initialize fault polling */
 258
 259        INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
 260        snprintf(ioc->fault_reset_work_q_name,
 261            sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
 262            ioc->driver_name, ioc->id);
 263        ioc->fault_reset_work_q =
 264                create_singlethread_workqueue(ioc->fault_reset_work_q_name);
 265        if (!ioc->fault_reset_work_q) {
 266                pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
 267                    ioc->name, __func__, __LINE__);
 268                        return;
 269        }
 270        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 271        if (ioc->fault_reset_work_q)
 272                queue_delayed_work(ioc->fault_reset_work_q,
 273                    &ioc->fault_reset_work,
 274                    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
 275        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 276}
 277
 278/**
 279 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
 280 * @ioc: per adapter object
 281 * Context: sleep.
 282 *
 283 * Return nothing.
 284 */
 285void
 286mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
 287{
 288        unsigned long flags;
 289        struct workqueue_struct *wq;
 290
 291        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 292        wq = ioc->fault_reset_work_q;
 293        ioc->fault_reset_work_q = NULL;
 294        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 295        if (wq) {
 296                if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
 297                        flush_workqueue(wq);
 298                destroy_workqueue(wq);
 299        }
 300}
 301
 302/**
 303 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
 304 * @ioc: per adapter object
 305 * @fault_code: fault code
 306 *
 307 * Return nothing.
 308 */
 309void
 310mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
 311{
 312        pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
 313            ioc->name, fault_code);
 314}
 315
 316/**
 317 * mpt3sas_halt_firmware - halt's mpt controller firmware
 318 * @ioc: per adapter object
 319 *
 320 * For debugging timeout related issues.  Writing 0xCOFFEE00
 321 * to the doorbell register will halt controller firmware. With
 322 * the purpose to stop both driver and firmware, the enduser can
 323 * obtain a ring buffer from controller UART.
 324 */
 325void
 326mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
 327{
 328        u32 doorbell;
 329
 330        if (!ioc->fwfault_debug)
 331                return;
 332
 333        dump_stack();
 334
 335        doorbell = readl(&ioc->chip->Doorbell);
 336        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 337                mpt3sas_base_fault_info(ioc , doorbell);
 338        else {
 339                writel(0xC0FFEE00, &ioc->chip->Doorbell);
 340                pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
 341                        ioc->name);
 342        }
 343
 344        if (ioc->fwfault_debug == 2)
 345                for (;;)
 346                        ;
 347        else
 348                panic("panic in %s\n", __func__);
 349}
 350
 351/**
 352 * _base_sas_ioc_info - verbose translation of the ioc status
 353 * @ioc: per adapter object
 354 * @mpi_reply: reply mf payload returned from firmware
 355 * @request_hdr: request mf
 356 *
 357 * Return nothing.
 358 */
 359static void
 360_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
 361        MPI2RequestHeader_t *request_hdr)
 362{
 363        u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
 364            MPI2_IOCSTATUS_MASK;
 365        char *desc = NULL;
 366        u16 frame_sz;
 367        char *func_str = NULL;
 368
 369        /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
 370        if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 371            request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
 372            request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
 373                return;
 374
 375        if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 376                return;
 377
 378        switch (ioc_status) {
 379
 380/****************************************************************************
 381*  Common IOCStatus values for all replies
 382****************************************************************************/
 383
 384        case MPI2_IOCSTATUS_INVALID_FUNCTION:
 385                desc = "invalid function";
 386                break;
 387        case MPI2_IOCSTATUS_BUSY:
 388                desc = "busy";
 389                break;
 390        case MPI2_IOCSTATUS_INVALID_SGL:
 391                desc = "invalid sgl";
 392                break;
 393        case MPI2_IOCSTATUS_INTERNAL_ERROR:
 394                desc = "internal error";
 395                break;
 396        case MPI2_IOCSTATUS_INVALID_VPID:
 397                desc = "invalid vpid";
 398                break;
 399        case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
 400                desc = "insufficient resources";
 401                break;
 402        case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
 403                desc = "insufficient power";
 404                break;
 405        case MPI2_IOCSTATUS_INVALID_FIELD:
 406                desc = "invalid field";
 407                break;
 408        case MPI2_IOCSTATUS_INVALID_STATE:
 409                desc = "invalid state";
 410                break;
 411        case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
 412                desc = "op state not supported";
 413                break;
 414
 415/****************************************************************************
 416*  Config IOCStatus values
 417****************************************************************************/
 418
 419        case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
 420                desc = "config invalid action";
 421                break;
 422        case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
 423                desc = "config invalid type";
 424                break;
 425        case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
 426                desc = "config invalid page";
 427                break;
 428        case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
 429                desc = "config invalid data";
 430                break;
 431        case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
 432                desc = "config no defaults";
 433                break;
 434        case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
 435                desc = "config cant commit";
 436                break;
 437
 438/****************************************************************************
 439*  SCSI IO Reply
 440****************************************************************************/
 441
 442        case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 443        case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 444        case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 445        case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 446        case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 447        case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 448        case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 449        case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 450        case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 451        case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 452        case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 453        case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 454                break;
 455
 456/****************************************************************************
 457*  For use by SCSI Initiator and SCSI Target end-to-end data protection
 458****************************************************************************/
 459
 460        case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
 461                desc = "eedp guard error";
 462                break;
 463        case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
 464                desc = "eedp ref tag error";
 465                break;
 466        case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
 467                desc = "eedp app tag error";
 468                break;
 469
 470/****************************************************************************
 471*  SCSI Target values
 472****************************************************************************/
 473
 474        case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
 475                desc = "target invalid io index";
 476                break;
 477        case MPI2_IOCSTATUS_TARGET_ABORTED:
 478                desc = "target aborted";
 479                break;
 480        case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
 481                desc = "target no conn retryable";
 482                break;
 483        case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
 484                desc = "target no connection";
 485                break;
 486        case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
 487                desc = "target xfer count mismatch";
 488                break;
 489        case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
 490                desc = "target data offset error";
 491                break;
 492        case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
 493                desc = "target too much write data";
 494                break;
 495        case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
 496                desc = "target iu too short";
 497                break;
 498        case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
 499                desc = "target ack nak timeout";
 500                break;
 501        case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
 502                desc = "target nak received";
 503                break;
 504
 505/****************************************************************************
 506*  Serial Attached SCSI values
 507****************************************************************************/
 508
 509        case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
 510                desc = "smp request failed";
 511                break;
 512        case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
 513                desc = "smp data overrun";
 514                break;
 515
 516/****************************************************************************
 517*  Diagnostic Buffer Post / Diagnostic Release values
 518****************************************************************************/
 519
 520        case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
 521                desc = "diagnostic released";
 522                break;
 523        default:
 524                break;
 525        }
 526
 527        if (!desc)
 528                return;
 529
 530        switch (request_hdr->Function) {
 531        case MPI2_FUNCTION_CONFIG:
 532                frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
 533                func_str = "config_page";
 534                break;
 535        case MPI2_FUNCTION_SCSI_TASK_MGMT:
 536                frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
 537                func_str = "task_mgmt";
 538                break;
 539        case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
 540                frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
 541                func_str = "sas_iounit_ctl";
 542                break;
 543        case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
 544                frame_sz = sizeof(Mpi2SepRequest_t);
 545                func_str = "enclosure";
 546                break;
 547        case MPI2_FUNCTION_IOC_INIT:
 548                frame_sz = sizeof(Mpi2IOCInitRequest_t);
 549                func_str = "ioc_init";
 550                break;
 551        case MPI2_FUNCTION_PORT_ENABLE:
 552                frame_sz = sizeof(Mpi2PortEnableRequest_t);
 553                func_str = "port_enable";
 554                break;
 555        case MPI2_FUNCTION_SMP_PASSTHROUGH:
 556                frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
 557                func_str = "smp_passthru";
 558                break;
 559        default:
 560                frame_sz = 32;
 561                func_str = "unknown";
 562                break;
 563        }
 564
 565        pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
 566                ioc->name, desc, ioc_status, request_hdr, func_str);
 567
 568        _debug_dump_mf(request_hdr, frame_sz/4);
 569}
 570
 571/**
 572 * _base_display_event_data - verbose translation of firmware asyn events
 573 * @ioc: per adapter object
 574 * @mpi_reply: reply mf payload returned from firmware
 575 *
 576 * Return nothing.
 577 */
 578static void
 579_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
 580        Mpi2EventNotificationReply_t *mpi_reply)
 581{
 582        char *desc = NULL;
 583        u16 event;
 584
 585        if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
 586                return;
 587
 588        event = le16_to_cpu(mpi_reply->Event);
 589
 590        switch (event) {
 591        case MPI2_EVENT_LOG_DATA:
 592                desc = "Log Data";
 593                break;
 594        case MPI2_EVENT_STATE_CHANGE:
 595                desc = "Status Change";
 596                break;
 597        case MPI2_EVENT_HARD_RESET_RECEIVED:
 598                desc = "Hard Reset Received";
 599                break;
 600        case MPI2_EVENT_EVENT_CHANGE:
 601                desc = "Event Change";
 602                break;
 603        case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
 604                desc = "Device Status Change";
 605                break;
 606        case MPI2_EVENT_IR_OPERATION_STATUS:
 607                if (!ioc->hide_ir_msg)
 608                        desc = "IR Operation Status";
 609                break;
 610        case MPI2_EVENT_SAS_DISCOVERY:
 611        {
 612                Mpi2EventDataSasDiscovery_t *event_data =
 613                    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
 614                pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
 615                    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
 616                    "start" : "stop");
 617                if (event_data->DiscoveryStatus)
 618                        pr_info("discovery_status(0x%08x)",
 619                            le32_to_cpu(event_data->DiscoveryStatus));
 620                        pr_info("\n");
 621                return;
 622        }
 623        case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
 624                desc = "SAS Broadcast Primitive";
 625                break;
 626        case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
 627                desc = "SAS Init Device Status Change";
 628                break;
 629        case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
 630                desc = "SAS Init Table Overflow";
 631                break;
 632        case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
 633                desc = "SAS Topology Change List";
 634                break;
 635        case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
 636                desc = "SAS Enclosure Device Status Change";
 637                break;
 638        case MPI2_EVENT_IR_VOLUME:
 639                if (!ioc->hide_ir_msg)
 640                        desc = "IR Volume";
 641                break;
 642        case MPI2_EVENT_IR_PHYSICAL_DISK:
 643                if (!ioc->hide_ir_msg)
 644                        desc = "IR Physical Disk";
 645                break;
 646        case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
 647                if (!ioc->hide_ir_msg)
 648                        desc = "IR Configuration Change List";
 649                break;
 650        case MPI2_EVENT_LOG_ENTRY_ADDED:
 651                if (!ioc->hide_ir_msg)
 652                        desc = "Log Entry Added";
 653                break;
 654        case MPI2_EVENT_TEMP_THRESHOLD:
 655                desc = "Temperature Threshold";
 656                break;
 657        case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
 658                desc = "Active cable exception";
 659                break;
 660        }
 661
 662        if (!desc)
 663                return;
 664
 665        pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
 666}
 667
 668/**
 669 * _base_sas_log_info - verbose translation of firmware log info
 670 * @ioc: per adapter object
 671 * @log_info: log info
 672 *
 673 * Return nothing.
 674 */
 675static void
 676_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
 677{
 678        union loginfo_type {
 679                u32     loginfo;
 680                struct {
 681                        u32     subcode:16;
 682                        u32     code:8;
 683                        u32     originator:4;
 684                        u32     bus_type:4;
 685                } dw;
 686        };
 687        union loginfo_type sas_loginfo;
 688        char *originator_str = NULL;
 689
 690        sas_loginfo.loginfo = log_info;
 691        if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
 692                return;
 693
 694        /* each nexus loss loginfo */
 695        if (log_info == 0x31170000)
 696                return;
 697
 698        /* eat the loginfos associated with task aborts */
 699        if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
 700            0x31140000 || log_info == 0x31130000))
 701                return;
 702
 703        switch (sas_loginfo.dw.originator) {
 704        case 0:
 705                originator_str = "IOP";
 706                break;
 707        case 1:
 708                originator_str = "PL";
 709                break;
 710        case 2:
 711                if (!ioc->hide_ir_msg)
 712                        originator_str = "IR";
 713                else
 714                        originator_str = "WarpDrive";
 715                break;
 716        }
 717
 718        pr_warn(MPT3SAS_FMT
 719                "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
 720                ioc->name, log_info,
 721             originator_str, sas_loginfo.dw.code,
 722             sas_loginfo.dw.subcode);
 723}
 724
 725/**
 726 * _base_display_reply_info -
 727 * @ioc: per adapter object
 728 * @smid: system request message index
 729 * @msix_index: MSIX table index supplied by the OS
 730 * @reply: reply message frame(lower 32bit addr)
 731 *
 732 * Return nothing.
 733 */
 734static void
 735_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 736        u32 reply)
 737{
 738        MPI2DefaultReply_t *mpi_reply;
 739        u16 ioc_status;
 740        u32 loginfo = 0;
 741
 742        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 743        if (unlikely(!mpi_reply)) {
 744                pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
 745                    ioc->name, __FILE__, __LINE__, __func__);
 746                return;
 747        }
 748        ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
 749
 750        if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
 751            (ioc->logging_level & MPT_DEBUG_REPLY)) {
 752                _base_sas_ioc_info(ioc , mpi_reply,
 753                   mpt3sas_base_get_msg_frame(ioc, smid));
 754        }
 755
 756        if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
 757                loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
 758                _base_sas_log_info(ioc, loginfo);
 759        }
 760
 761        if (ioc_status || loginfo) {
 762                ioc_status &= MPI2_IOCSTATUS_MASK;
 763                mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
 764        }
 765}
 766
 767/**
 768 * mpt3sas_base_done - base internal command completion routine
 769 * @ioc: per adapter object
 770 * @smid: system request message index
 771 * @msix_index: MSIX table index supplied by the OS
 772 * @reply: reply message frame(lower 32bit addr)
 773 *
 774 * Return 1 meaning mf should be freed from _base_interrupt
 775 *        0 means the mf is freed from this function.
 776 */
 777u8
 778mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 779        u32 reply)
 780{
 781        MPI2DefaultReply_t *mpi_reply;
 782
 783        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 784        if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
 785                return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
 786
 787        if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
 788                return 1;
 789
 790        ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
 791        if (mpi_reply) {
 792                ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
 793                memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
 794        }
 795        ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
 796
 797        complete(&ioc->base_cmds.done);
 798        return 1;
 799}
 800
 801/**
 802 * _base_async_event - main callback handler for firmware asyn events
 803 * @ioc: per adapter object
 804 * @msix_index: MSIX table index supplied by the OS
 805 * @reply: reply message frame(lower 32bit addr)
 806 *
 807 * Return 1 meaning mf should be freed from _base_interrupt
 808 *        0 means the mf is freed from this function.
 809 */
 810static u8
 811_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
 812{
 813        Mpi2EventNotificationReply_t *mpi_reply;
 814        Mpi2EventAckRequest_t *ack_request;
 815        u16 smid;
 816        struct _event_ack_list *delayed_event_ack;
 817
 818        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 819        if (!mpi_reply)
 820                return 1;
 821        if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
 822                return 1;
 823
 824        _base_display_event_data(ioc, mpi_reply);
 825
 826        if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
 827                goto out;
 828        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 829        if (!smid) {
 830                delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
 831                                        GFP_ATOMIC);
 832                if (!delayed_event_ack)
 833                        goto out;
 834                INIT_LIST_HEAD(&delayed_event_ack->list);
 835                delayed_event_ack->Event = mpi_reply->Event;
 836                delayed_event_ack->EventContext = mpi_reply->EventContext;
 837                list_add_tail(&delayed_event_ack->list,
 838                                &ioc->delayed_event_ack_list);
 839                dewtprintk(ioc, pr_info(MPT3SAS_FMT
 840                                "DELAYED: EVENT ACK: event (0x%04x)\n",
 841                                ioc->name, le16_to_cpu(mpi_reply->Event)));
 842                goto out;
 843        }
 844
 845        ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
 846        memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
 847        ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
 848        ack_request->Event = mpi_reply->Event;
 849        ack_request->EventContext = mpi_reply->EventContext;
 850        ack_request->VF_ID = 0;  /* TODO */
 851        ack_request->VP_ID = 0;
 852        mpt3sas_base_put_smid_default(ioc, smid);
 853
 854 out:
 855
 856        /* scsih callback handler */
 857        mpt3sas_scsih_event_callback(ioc, msix_index, reply);
 858
 859        /* ctl callback handler */
 860        mpt3sas_ctl_event_callback(ioc, msix_index, reply);
 861
 862        return 1;
 863}
 864
 865/**
 866 * _base_get_cb_idx - obtain the callback index
 867 * @ioc: per adapter object
 868 * @smid: system request message index
 869 *
 870 * Return callback index.
 871 */
 872static u8
 873_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 874{
 875        int i;
 876        u8 cb_idx;
 877
 878        if (smid < ioc->hi_priority_smid) {
 879                i = smid - 1;
 880                cb_idx = ioc->scsi_lookup[i].cb_idx;
 881        } else if (smid < ioc->internal_smid) {
 882                i = smid - ioc->hi_priority_smid;
 883                cb_idx = ioc->hpr_lookup[i].cb_idx;
 884        } else if (smid <= ioc->hba_queue_depth) {
 885                i = smid - ioc->internal_smid;
 886                cb_idx = ioc->internal_lookup[i].cb_idx;
 887        } else
 888                cb_idx = 0xFF;
 889        return cb_idx;
 890}
 891
 892/**
 893 * _base_mask_interrupts - disable interrupts
 894 * @ioc: per adapter object
 895 *
 896 * Disabling ResetIRQ, Reply and Doorbell Interrupts
 897 *
 898 * Return nothing.
 899 */
 900static void
 901_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
 902{
 903        u32 him_register;
 904
 905        ioc->mask_interrupts = 1;
 906        him_register = readl(&ioc->chip->HostInterruptMask);
 907        him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
 908        writel(him_register, &ioc->chip->HostInterruptMask);
 909        readl(&ioc->chip->HostInterruptMask);
 910}
 911
 912/**
 913 * _base_unmask_interrupts - enable interrupts
 914 * @ioc: per adapter object
 915 *
 916 * Enabling only Reply Interrupts
 917 *
 918 * Return nothing.
 919 */
 920static void
 921_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
 922{
 923        u32 him_register;
 924
 925        him_register = readl(&ioc->chip->HostInterruptMask);
 926        him_register &= ~MPI2_HIM_RIM;
 927        writel(him_register, &ioc->chip->HostInterruptMask);
 928        ioc->mask_interrupts = 0;
 929}
 930
 931union reply_descriptor {
 932        u64 word;
 933        struct {
 934                u32 low;
 935                u32 high;
 936        } u;
 937};
 938
 939/**
 940 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
 941 * @irq: irq number (not used)
 942 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
 943 * @r: pt_regs pointer (not used)
 944 *
 945 * Return IRQ_HANDLE if processed, else IRQ_NONE.
 946 */
 947static irqreturn_t
 948_base_interrupt(int irq, void *bus_id)
 949{
 950        struct adapter_reply_queue *reply_q = bus_id;
 951        union reply_descriptor rd;
 952        u32 completed_cmds;
 953        u8 request_desript_type;
 954        u16 smid;
 955        u8 cb_idx;
 956        u32 reply;
 957        u8 msix_index = reply_q->msix_index;
 958        struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
 959        Mpi2ReplyDescriptorsUnion_t *rpf;
 960        u8 rc;
 961
 962        if (ioc->mask_interrupts)
 963                return IRQ_NONE;
 964
 965        if (!atomic_add_unless(&reply_q->busy, 1, 1))
 966                return IRQ_NONE;
 967
 968        rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
 969        request_desript_type = rpf->Default.ReplyFlags
 970             & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 971        if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
 972                atomic_dec(&reply_q->busy);
 973                return IRQ_NONE;
 974        }
 975
 976        completed_cmds = 0;
 977        cb_idx = 0xFF;
 978        do {
 979                rd.word = le64_to_cpu(rpf->Words);
 980                if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
 981                        goto out;
 982                reply = 0;
 983                smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
 984                if (request_desript_type ==
 985                    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
 986                    request_desript_type ==
 987                    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
 988                        cb_idx = _base_get_cb_idx(ioc, smid);
 989                        if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
 990                            (likely(mpt_callbacks[cb_idx] != NULL))) {
 991                                rc = mpt_callbacks[cb_idx](ioc, smid,
 992                                    msix_index, 0);
 993                                if (rc)
 994                                        mpt3sas_base_free_smid(ioc, smid);
 995                        }
 996                } else if (request_desript_type ==
 997                    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
 998                        reply = le32_to_cpu(
 999                            rpf->AddressReply.ReplyFrameAddress);
1000                        if (reply > ioc->reply_dma_max_address ||
1001                            reply < ioc->reply_dma_min_address)
1002                                reply = 0;
1003                        if (smid) {
1004                                cb_idx = _base_get_cb_idx(ioc, smid);
1005                                if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1006                                    (likely(mpt_callbacks[cb_idx] != NULL))) {
1007                                        rc = mpt_callbacks[cb_idx](ioc, smid,
1008                                            msix_index, reply);
1009                                        if (reply)
1010                                                _base_display_reply_info(ioc,
1011                                                    smid, msix_index, reply);
1012                                        if (rc)
1013                                                mpt3sas_base_free_smid(ioc,
1014                                                    smid);
1015                                }
1016                        } else {
1017                                _base_async_event(ioc, msix_index, reply);
1018                        }
1019
1020                        /* reply free queue handling */
1021                        if (reply) {
1022                                ioc->reply_free_host_index =
1023                                    (ioc->reply_free_host_index ==
1024                                    (ioc->reply_free_queue_depth - 1)) ?
1025                                    0 : ioc->reply_free_host_index + 1;
1026                                ioc->reply_free[ioc->reply_free_host_index] =
1027                                    cpu_to_le32(reply);
1028                                wmb();
1029                                writel(ioc->reply_free_host_index,
1030                                    &ioc->chip->ReplyFreeHostIndex);
1031                        }
1032                }
1033
1034                rpf->Words = cpu_to_le64(ULLONG_MAX);
1035                reply_q->reply_post_host_index =
1036                    (reply_q->reply_post_host_index ==
1037                    (ioc->reply_post_queue_depth - 1)) ? 0 :
1038                    reply_q->reply_post_host_index + 1;
1039                request_desript_type =
1040                    reply_q->reply_post_free[reply_q->reply_post_host_index].
1041                    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1042                completed_cmds++;
1043                if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1044                        goto out;
1045                if (!reply_q->reply_post_host_index)
1046                        rpf = reply_q->reply_post_free;
1047                else
1048                        rpf++;
1049        } while (1);
1050
1051 out:
1052
1053        if (!completed_cmds) {
1054                atomic_dec(&reply_q->busy);
1055                return IRQ_NONE;
1056        }
1057
1058        wmb();
1059        if (ioc->is_warpdrive) {
1060                writel(reply_q->reply_post_host_index,
1061                ioc->reply_post_host_index[msix_index]);
1062                atomic_dec(&reply_q->busy);
1063                return IRQ_HANDLED;
1064        }
1065
1066        /* Update Reply Post Host Index.
1067         * For those HBA's which support combined reply queue feature
1068         * 1. Get the correct Supplemental Reply Post Host Index Register.
1069         *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1070         *    Index Register address bank i.e replyPostRegisterIndex[],
1071         * 2. Then update this register with new reply host index value
1072         *    in ReplyPostIndex field and the MSIxIndex field with
1073         *    msix_index value reduced to a value between 0 and 7,
1074         *    using a modulo 8 operation. Since each Supplemental Reply Post
1075         *    Host Index Register supports 8 MSI-X vectors.
1076         *
1077         * For other HBA's just update the Reply Post Host Index register with
1078         * new reply host index value in ReplyPostIndex Field and msix_index
1079         * value in MSIxIndex field.
1080         */
1081        if (ioc->msix96_vector)
1082                writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1083                        MPI2_RPHI_MSIX_INDEX_SHIFT),
1084                        ioc->replyPostRegisterIndex[msix_index/8]);
1085        else
1086                writel(reply_q->reply_post_host_index | (msix_index <<
1087                        MPI2_RPHI_MSIX_INDEX_SHIFT),
1088                        &ioc->chip->ReplyPostHostIndex);
1089        atomic_dec(&reply_q->busy);
1090        return IRQ_HANDLED;
1091}
1092
1093/**
1094 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1095 * @ioc: per adapter object
1096 *
1097 */
1098static inline int
1099_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1100{
1101        return (ioc->facts.IOCCapabilities &
1102            MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1103}
1104
1105/**
1106 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1107 * @ioc: per adapter object
1108 * Context: non ISR conext
1109 *
1110 * Called when a Task Management request has completed.
1111 *
1112 * Return nothing.
1113 */
1114void
1115mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
1116{
1117        struct adapter_reply_queue *reply_q;
1118
1119        /* If MSIX capability is turned off
1120         * then multi-queues are not enabled
1121         */
1122        if (!_base_is_controller_msix_enabled(ioc))
1123                return;
1124
1125        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1126                if (ioc->shost_recovery || ioc->remove_host ||
1127                                ioc->pci_error_recovery)
1128                        return;
1129                /* TMs are on msix_index == 0 */
1130                if (reply_q->msix_index == 0)
1131                        continue;
1132                synchronize_irq(reply_q->vector);
1133        }
1134}
1135
1136/**
1137 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1138 * @cb_idx: callback index
1139 *
1140 * Return nothing.
1141 */
1142void
1143mpt3sas_base_release_callback_handler(u8 cb_idx)
1144{
1145        mpt_callbacks[cb_idx] = NULL;
1146}
1147
1148/**
1149 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1150 * @cb_func: callback function
1151 *
1152 * Returns cb_func.
1153 */
1154u8
1155mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1156{
1157        u8 cb_idx;
1158
1159        for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1160                if (mpt_callbacks[cb_idx] == NULL)
1161                        break;
1162
1163        mpt_callbacks[cb_idx] = cb_func;
1164        return cb_idx;
1165}
1166
1167/**
1168 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1169 *
1170 * Return nothing.
1171 */
1172void
1173mpt3sas_base_initialize_callback_handler(void)
1174{
1175        u8 cb_idx;
1176
1177        for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1178                mpt3sas_base_release_callback_handler(cb_idx);
1179}
1180
1181
1182/**
1183 * _base_build_zero_len_sge - build zero length sg entry
1184 * @ioc: per adapter object
1185 * @paddr: virtual address for SGE
1186 *
1187 * Create a zero length scatter gather entry to insure the IOCs hardware has
1188 * something to use if the target device goes brain dead and tries
1189 * to send data even when none is asked for.
1190 *
1191 * Return nothing.
1192 */
1193static void
1194_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1195{
1196        u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1197            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1198            MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1199            MPI2_SGE_FLAGS_SHIFT);
1200        ioc->base_add_sg_single(paddr, flags_length, -1);
1201}
1202
1203/**
1204 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1205 * @paddr: virtual address for SGE
1206 * @flags_length: SGE flags and data transfer length
1207 * @dma_addr: Physical address
1208 *
1209 * Return nothing.
1210 */
1211static void
1212_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1213{
1214        Mpi2SGESimple32_t *sgel = paddr;
1215
1216        flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1217            MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1218        sgel->FlagsLength = cpu_to_le32(flags_length);
1219        sgel->Address = cpu_to_le32(dma_addr);
1220}
1221
1222
1223/**
1224 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1225 * @paddr: virtual address for SGE
1226 * @flags_length: SGE flags and data transfer length
1227 * @dma_addr: Physical address
1228 *
1229 * Return nothing.
1230 */
1231static void
1232_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1233{
1234        Mpi2SGESimple64_t *sgel = paddr;
1235
1236        flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1237            MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1238        sgel->FlagsLength = cpu_to_le32(flags_length);
1239        sgel->Address = cpu_to_le64(dma_addr);
1240}
1241
1242/**
1243 * _base_get_chain_buffer_tracker - obtain chain tracker
1244 * @ioc: per adapter object
1245 * @smid: smid associated to an IO request
1246 *
1247 * Returns chain tracker(from ioc->free_chain_list)
1248 */
1249static struct chain_tracker *
1250_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1251{
1252        struct chain_tracker *chain_req;
1253        unsigned long flags;
1254
1255        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1256        if (list_empty(&ioc->free_chain_list)) {
1257                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1258                dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1259                        "chain buffers not available\n", ioc->name));
1260                return NULL;
1261        }
1262        chain_req = list_entry(ioc->free_chain_list.next,
1263            struct chain_tracker, tracker_list);
1264        list_del_init(&chain_req->tracker_list);
1265        list_add_tail(&chain_req->tracker_list,
1266            &ioc->scsi_lookup[smid - 1].chain_list);
1267        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1268        return chain_req;
1269}
1270
1271
1272/**
1273 * _base_build_sg - build generic sg
1274 * @ioc: per adapter object
1275 * @psge: virtual address for SGE
1276 * @data_out_dma: physical address for WRITES
1277 * @data_out_sz: data xfer size for WRITES
1278 * @data_in_dma: physical address for READS
1279 * @data_in_sz: data xfer size for READS
1280 *
1281 * Return nothing.
1282 */
1283static void
1284_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1285        dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1286        size_t data_in_sz)
1287{
1288        u32 sgl_flags;
1289
1290        if (!data_out_sz && !data_in_sz) {
1291                _base_build_zero_len_sge(ioc, psge);
1292                return;
1293        }
1294
1295        if (data_out_sz && data_in_sz) {
1296                /* WRITE sgel first */
1297                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1298                    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1299                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1300                ioc->base_add_sg_single(psge, sgl_flags |
1301                    data_out_sz, data_out_dma);
1302
1303                /* incr sgel */
1304                psge += ioc->sge_size;
1305
1306                /* READ sgel last */
1307                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1308                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1309                    MPI2_SGE_FLAGS_END_OF_LIST);
1310                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1311                ioc->base_add_sg_single(psge, sgl_flags |
1312                    data_in_sz, data_in_dma);
1313        } else if (data_out_sz) /* WRITE */ {
1314                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1315                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1316                    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1317                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1318                ioc->base_add_sg_single(psge, sgl_flags |
1319                    data_out_sz, data_out_dma);
1320        } else if (data_in_sz) /* READ */ {
1321                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1322                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1323                    MPI2_SGE_FLAGS_END_OF_LIST);
1324                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1325                ioc->base_add_sg_single(psge, sgl_flags |
1326                    data_in_sz, data_in_dma);
1327        }
1328}
1329
1330/* IEEE format sgls */
1331
1332/**
1333 * _base_add_sg_single_ieee - add sg element for IEEE format
1334 * @paddr: virtual address for SGE
1335 * @flags: SGE flags
1336 * @chain_offset: number of 128 byte elements from start of segment
1337 * @length: data transfer length
1338 * @dma_addr: Physical address
1339 *
1340 * Return nothing.
1341 */
1342static void
1343_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1344        dma_addr_t dma_addr)
1345{
1346        Mpi25IeeeSgeChain64_t *sgel = paddr;
1347
1348        sgel->Flags = flags;
1349        sgel->NextChainOffset = chain_offset;
1350        sgel->Length = cpu_to_le32(length);
1351        sgel->Address = cpu_to_le64(dma_addr);
1352}
1353
1354/**
1355 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1356 * @ioc: per adapter object
1357 * @paddr: virtual address for SGE
1358 *
1359 * Create a zero length scatter gather entry to insure the IOCs hardware has
1360 * something to use if the target device goes brain dead and tries
1361 * to send data even when none is asked for.
1362 *
1363 * Return nothing.
1364 */
1365static void
1366_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1367{
1368        u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1369                MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1370                MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
1371
1372        _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1373}
1374
1375/**
1376 * _base_build_sg_scmd - main sg creation routine
1377 * @ioc: per adapter object
1378 * @scmd: scsi command
1379 * @smid: system request message index
1380 * Context: none.
1381 *
1382 * The main routine that builds scatter gather table from a given
1383 * scsi request sent via the .queuecommand main handler.
1384 *
1385 * Returns 0 success, anything else error
1386 */
1387static int
1388_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1389                struct scsi_cmnd *scmd, u16 smid)
1390{
1391        Mpi2SCSIIORequest_t *mpi_request;
1392        dma_addr_t chain_dma;
1393        struct scatterlist *sg_scmd;
1394        void *sg_local, *chain;
1395        u32 chain_offset;
1396        u32 chain_length;
1397        u32 chain_flags;
1398        int sges_left;
1399        u32 sges_in_segment;
1400        u32 sgl_flags;
1401        u32 sgl_flags_last_element;
1402        u32 sgl_flags_end_buffer;
1403        struct chain_tracker *chain_req;
1404
1405        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1406
1407        /* init scatter gather flags */
1408        sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
1409        if (scmd->sc_data_direction == DMA_TO_DEVICE)
1410                sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
1411        sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
1412            << MPI2_SGE_FLAGS_SHIFT;
1413        sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
1414            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
1415            << MPI2_SGE_FLAGS_SHIFT;
1416        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1417
1418        sg_scmd = scsi_sglist(scmd);
1419        sges_left = scsi_dma_map(scmd);
1420        if (sges_left < 0) {
1421                sdev_printk(KERN_ERR, scmd->device,
1422                 "pci_map_sg failed: request for %d bytes!\n",
1423                 scsi_bufflen(scmd));
1424                return -ENOMEM;
1425        }
1426
1427        sg_local = &mpi_request->SGL;
1428        sges_in_segment = ioc->max_sges_in_main_message;
1429        if (sges_left <= sges_in_segment)
1430                goto fill_in_last_segment;
1431
1432        mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
1433            (sges_in_segment * ioc->sge_size))/4;
1434
1435        /* fill in main message segment when there is a chain following */
1436        while (sges_in_segment) {
1437                if (sges_in_segment == 1)
1438                        ioc->base_add_sg_single(sg_local,
1439                            sgl_flags_last_element | sg_dma_len(sg_scmd),
1440                            sg_dma_address(sg_scmd));
1441                else
1442                        ioc->base_add_sg_single(sg_local, sgl_flags |
1443                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1444                sg_scmd = sg_next(sg_scmd);
1445                sg_local += ioc->sge_size;
1446                sges_left--;
1447                sges_in_segment--;
1448        }
1449
1450        /* initializing the chain flags and pointers */
1451        chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1452        chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1453        if (!chain_req)
1454                return -1;
1455        chain = chain_req->chain_buffer;
1456        chain_dma = chain_req->chain_buffer_dma;
1457        do {
1458                sges_in_segment = (sges_left <=
1459                    ioc->max_sges_in_chain_message) ? sges_left :
1460                    ioc->max_sges_in_chain_message;
1461                chain_offset = (sges_left == sges_in_segment) ?
1462                    0 : (sges_in_segment * ioc->sge_size)/4;
1463                chain_length = sges_in_segment * ioc->sge_size;
1464                if (chain_offset) {
1465                        chain_offset = chain_offset <<
1466                            MPI2_SGE_CHAIN_OFFSET_SHIFT;
1467                        chain_length += ioc->sge_size;
1468                }
1469                ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
1470                    chain_length, chain_dma);
1471                sg_local = chain;
1472                if (!chain_offset)
1473                        goto fill_in_last_segment;
1474
1475                /* fill in chain segments */
1476                while (sges_in_segment) {
1477                        if (sges_in_segment == 1)
1478                                ioc->base_add_sg_single(sg_local,
1479                                    sgl_flags_last_element |
1480                                    sg_dma_len(sg_scmd),
1481                                    sg_dma_address(sg_scmd));
1482                        else
1483                                ioc->base_add_sg_single(sg_local, sgl_flags |
1484                                    sg_dma_len(sg_scmd),
1485                                    sg_dma_address(sg_scmd));
1486                        sg_scmd = sg_next(sg_scmd);
1487                        sg_local += ioc->sge_size;
1488                        sges_left--;
1489                        sges_in_segment--;
1490                }
1491
1492                chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1493                if (!chain_req)
1494                        return -1;
1495                chain = chain_req->chain_buffer;
1496                chain_dma = chain_req->chain_buffer_dma;
1497        } while (1);
1498
1499
1500 fill_in_last_segment:
1501
1502        /* fill the last segment */
1503        while (sges_left) {
1504                if (sges_left == 1)
1505                        ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
1506                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1507                else
1508                        ioc->base_add_sg_single(sg_local, sgl_flags |
1509                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1510                sg_scmd = sg_next(sg_scmd);
1511                sg_local += ioc->sge_size;
1512                sges_left--;
1513        }
1514
1515        return 0;
1516}
1517
1518/**
1519 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1520 * @ioc: per adapter object
1521 * @scmd: scsi command
1522 * @smid: system request message index
1523 * Context: none.
1524 *
1525 * The main routine that builds scatter gather table from a given
1526 * scsi request sent via the .queuecommand main handler.
1527 *
1528 * Returns 0 success, anything else error
1529 */
1530static int
1531_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1532        struct scsi_cmnd *scmd, u16 smid)
1533{
1534        Mpi2SCSIIORequest_t *mpi_request;
1535        dma_addr_t chain_dma;
1536        struct scatterlist *sg_scmd;
1537        void *sg_local, *chain;
1538        u32 chain_offset;
1539        u32 chain_length;
1540        int sges_left;
1541        u32 sges_in_segment;
1542        u8 simple_sgl_flags;
1543        u8 simple_sgl_flags_last;
1544        u8 chain_sgl_flags;
1545        struct chain_tracker *chain_req;
1546
1547        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1548
1549        /* init scatter gather flags */
1550        simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1551            MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1552        simple_sgl_flags_last = simple_sgl_flags |
1553            MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1554        chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1555            MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1556
1557        sg_scmd = scsi_sglist(scmd);
1558        sges_left = scsi_dma_map(scmd);
1559        if (sges_left < 0) {
1560                sdev_printk(KERN_ERR, scmd->device,
1561                        "pci_map_sg failed: request for %d bytes!\n",
1562                        scsi_bufflen(scmd));
1563                return -ENOMEM;
1564        }
1565
1566        sg_local = &mpi_request->SGL;
1567        sges_in_segment = (ioc->request_sz -
1568            offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1569        if (sges_left <= sges_in_segment)
1570                goto fill_in_last_segment;
1571
1572        mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1573            (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1574
1575        /* fill in main message segment when there is a chain following */
1576        while (sges_in_segment > 1) {
1577                _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1578                    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1579                sg_scmd = sg_next(sg_scmd);
1580                sg_local += ioc->sge_size_ieee;
1581                sges_left--;
1582                sges_in_segment--;
1583        }
1584
1585        /* initializing the pointers */
1586        chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1587        if (!chain_req)
1588                return -1;
1589        chain = chain_req->chain_buffer;
1590        chain_dma = chain_req->chain_buffer_dma;
1591        do {
1592                sges_in_segment = (sges_left <=
1593                    ioc->max_sges_in_chain_message) ? sges_left :
1594                    ioc->max_sges_in_chain_message;
1595                chain_offset = (sges_left == sges_in_segment) ?
1596                    0 : sges_in_segment;
1597                chain_length = sges_in_segment * ioc->sge_size_ieee;
1598                if (chain_offset)
1599                        chain_length += ioc->sge_size_ieee;
1600                _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1601                    chain_offset, chain_length, chain_dma);
1602
1603                sg_local = chain;
1604                if (!chain_offset)
1605                        goto fill_in_last_segment;
1606
1607                /* fill in chain segments */
1608                while (sges_in_segment) {
1609                        _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1610                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1611                        sg_scmd = sg_next(sg_scmd);
1612                        sg_local += ioc->sge_size_ieee;
1613                        sges_left--;
1614                        sges_in_segment--;
1615                }
1616
1617                chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1618                if (!chain_req)
1619                        return -1;
1620                chain = chain_req->chain_buffer;
1621                chain_dma = chain_req->chain_buffer_dma;
1622        } while (1);
1623
1624
1625 fill_in_last_segment:
1626
1627        /* fill the last segment */
1628        while (sges_left > 0) {
1629                if (sges_left == 1)
1630                        _base_add_sg_single_ieee(sg_local,
1631                            simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1632                            sg_dma_address(sg_scmd));
1633                else
1634                        _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1635                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1636                sg_scmd = sg_next(sg_scmd);
1637                sg_local += ioc->sge_size_ieee;
1638                sges_left--;
1639        }
1640
1641        return 0;
1642}
1643
1644/**
1645 * _base_build_sg_ieee - build generic sg for IEEE format
1646 * @ioc: per adapter object
1647 * @psge: virtual address for SGE
1648 * @data_out_dma: physical address for WRITES
1649 * @data_out_sz: data xfer size for WRITES
1650 * @data_in_dma: physical address for READS
1651 * @data_in_sz: data xfer size for READS
1652 *
1653 * Return nothing.
1654 */
1655static void
1656_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1657        dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1658        size_t data_in_sz)
1659{
1660        u8 sgl_flags;
1661
1662        if (!data_out_sz && !data_in_sz) {
1663                _base_build_zero_len_sge_ieee(ioc, psge);
1664                return;
1665        }
1666
1667        if (data_out_sz && data_in_sz) {
1668                /* WRITE sgel first */
1669                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1670                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1671                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1672                    data_out_dma);
1673
1674                /* incr sgel */
1675                psge += ioc->sge_size_ieee;
1676
1677                /* READ sgel last */
1678                sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1679                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1680                    data_in_dma);
1681        } else if (data_out_sz) /* WRITE */ {
1682                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1683                    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1684                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1685                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1686                    data_out_dma);
1687        } else if (data_in_sz) /* READ */ {
1688                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1689                    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1690                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1691                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1692                    data_in_dma);
1693        }
1694}
1695
1696#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1697
1698/**
1699 * _base_config_dma_addressing - set dma addressing
1700 * @ioc: per adapter object
1701 * @pdev: PCI device struct
1702 *
1703 * Returns 0 for success, non-zero for failure.
1704 */
1705static int
1706_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1707{
1708        struct sysinfo s;
1709        u64 consistent_dma_mask;
1710
1711        if (ioc->dma_mask)
1712                consistent_dma_mask = DMA_BIT_MASK(64);
1713        else
1714                consistent_dma_mask = DMA_BIT_MASK(32);
1715
1716        if (sizeof(dma_addr_t) > 4) {
1717                const uint64_t required_mask =
1718                    dma_get_required_mask(&pdev->dev);
1719                if ((required_mask > DMA_BIT_MASK(32)) &&
1720                    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1721                    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
1722                        ioc->base_add_sg_single = &_base_add_sg_single_64;
1723                        ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1724                        ioc->dma_mask = 64;
1725                        goto out;
1726                }
1727        }
1728
1729        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1730            && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1731                ioc->base_add_sg_single = &_base_add_sg_single_32;
1732                ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1733                ioc->dma_mask = 32;
1734        } else
1735                return -ENODEV;
1736
1737 out:
1738        si_meminfo(&s);
1739        pr_info(MPT3SAS_FMT
1740                "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1741                ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
1742
1743        return 0;
1744}
1745
1746static int
1747_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
1748                                      struct pci_dev *pdev)
1749{
1750        if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1751                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1752                        return -ENODEV;
1753        }
1754        return 0;
1755}
1756
1757/**
1758 * _base_check_enable_msix - checks MSIX capabable.
1759 * @ioc: per adapter object
1760 *
1761 * Check to see if card is capable of MSIX, and set number
1762 * of available msix vectors
1763 */
1764static int
1765_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1766{
1767        int base;
1768        u16 message_control;
1769
1770        /* Check whether controller SAS2008 B0 controller,
1771         * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
1772         */
1773        if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1774            ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
1775                return -EINVAL;
1776        }
1777
1778        base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1779        if (!base) {
1780                dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1781                        ioc->name));
1782                return -EINVAL;
1783        }
1784
1785        /* get msix vector count */
1786        /* NUMA_IO not supported for older controllers */
1787        if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1788            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1789            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1790            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1791            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1792            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1793            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1794                ioc->msix_vector_count = 1;
1795        else {
1796                pci_read_config_word(ioc->pdev, base + 2, &message_control);
1797                ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1798        }
1799        dinitprintk(ioc, pr_info(MPT3SAS_FMT
1800                "msix is supported, vector_count(%d)\n",
1801                ioc->name, ioc->msix_vector_count));
1802        return 0;
1803}
1804
1805/**
1806 * _base_free_irq - free irq
1807 * @ioc: per adapter object
1808 *
1809 * Freeing respective reply_queue from the list.
1810 */
1811static void
1812_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1813{
1814        struct adapter_reply_queue *reply_q, *next;
1815
1816        if (list_empty(&ioc->reply_queue_list))
1817                return;
1818
1819        list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1820                list_del(&reply_q->list);
1821                if (smp_affinity_enable) {
1822                        irq_set_affinity_hint(reply_q->vector, NULL);
1823                        free_cpumask_var(reply_q->affinity_hint);
1824                }
1825                free_irq(reply_q->vector, reply_q);
1826                kfree(reply_q);
1827        }
1828}
1829
1830/**
1831 * _base_request_irq - request irq
1832 * @ioc: per adapter object
1833 * @index: msix index into vector table
1834 * @vector: irq vector
1835 *
1836 * Inserting respective reply_queue into the list.
1837 */
1838static int
1839_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1840{
1841        struct adapter_reply_queue *reply_q;
1842        int r;
1843
1844        reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1845        if (!reply_q) {
1846                pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1847                    ioc->name, (int)sizeof(struct adapter_reply_queue));
1848                return -ENOMEM;
1849        }
1850        reply_q->ioc = ioc;
1851        reply_q->msix_index = index;
1852        reply_q->vector = vector;
1853
1854        if (smp_affinity_enable) {
1855                if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) {
1856                        kfree(reply_q);
1857                        return -ENOMEM;
1858                }
1859        }
1860
1861        atomic_set(&reply_q->busy, 0);
1862        if (ioc->msix_enable)
1863                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1864                    ioc->driver_name, ioc->id, index);
1865        else
1866                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1867                    ioc->driver_name, ioc->id);
1868        r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1869            reply_q);
1870        if (r) {
1871                pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1872                    reply_q->name, vector);
1873                free_cpumask_var(reply_q->affinity_hint);
1874                kfree(reply_q);
1875                return -EBUSY;
1876        }
1877
1878        INIT_LIST_HEAD(&reply_q->list);
1879        list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1880        return 0;
1881}
1882
1883/**
1884 * _base_assign_reply_queues - assigning msix index for each cpu
1885 * @ioc: per adapter object
1886 *
1887 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1888 *
1889 * It would nice if we could call irq_set_affinity, however it is not
1890 * an exported symbol
1891 */
1892static void
1893_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1894{
1895        unsigned int cpu, nr_cpus, nr_msix, index = 0;
1896        struct adapter_reply_queue *reply_q;
1897
1898        if (!_base_is_controller_msix_enabled(ioc))
1899                return;
1900
1901        memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1902
1903        nr_cpus = num_online_cpus();
1904        nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
1905                                               ioc->facts.MaxMSIxVectors);
1906        if (!nr_msix)
1907                return;
1908
1909        cpu = cpumask_first(cpu_online_mask);
1910
1911        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1912
1913                unsigned int i, group = nr_cpus / nr_msix;
1914
1915                if (cpu >= nr_cpus)
1916                        break;
1917
1918                if (index < nr_cpus % nr_msix)
1919                        group++;
1920
1921                for (i = 0 ; i < group ; i++) {
1922                        ioc->cpu_msix_table[cpu] = index;
1923                        if (smp_affinity_enable)
1924                                cpumask_or(reply_q->affinity_hint,
1925                                   reply_q->affinity_hint, get_cpu_mask(cpu));
1926                        cpu = cpumask_next(cpu, cpu_online_mask);
1927                }
1928                if (smp_affinity_enable)
1929                        if (irq_set_affinity_hint(reply_q->vector,
1930                                           reply_q->affinity_hint))
1931                                dinitprintk(ioc, pr_info(MPT3SAS_FMT
1932                                 "Err setting affinity hint to irq vector %d\n",
1933                                 ioc->name, reply_q->vector));
1934                index++;
1935        }
1936}
1937
1938/**
1939 * _base_disable_msix - disables msix
1940 * @ioc: per adapter object
1941 *
1942 */
1943static void
1944_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1945{
1946        if (!ioc->msix_enable)
1947                return;
1948        pci_disable_msix(ioc->pdev);
1949        ioc->msix_enable = 0;
1950}
1951
1952/**
1953 * _base_enable_msix - enables msix, failback to io_apic
1954 * @ioc: per adapter object
1955 *
1956 */
1957static int
1958_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1959{
1960        struct msix_entry *entries, *a;
1961        int r;
1962        int i;
1963        u8 try_msix = 0;
1964
1965        if (msix_disable == -1 || msix_disable == 0)
1966                try_msix = 1;
1967
1968        if (!try_msix)
1969                goto try_ioapic;
1970
1971        if (_base_check_enable_msix(ioc) != 0)
1972                goto try_ioapic;
1973
1974        ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1975            ioc->msix_vector_count);
1976
1977        printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1978          ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1979          ioc->cpu_count, max_msix_vectors);
1980
1981        if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1982                max_msix_vectors = 8;
1983
1984        if (max_msix_vectors > 0) {
1985                ioc->reply_queue_count = min_t(int, max_msix_vectors,
1986                        ioc->reply_queue_count);
1987                ioc->msix_vector_count = ioc->reply_queue_count;
1988        } else if (max_msix_vectors == 0)
1989                goto try_ioapic;
1990
1991        if (ioc->msix_vector_count < ioc->cpu_count)
1992                smp_affinity_enable = 0;
1993
1994        entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1995            GFP_KERNEL);
1996        if (!entries) {
1997                dfailprintk(ioc, pr_info(MPT3SAS_FMT
1998                        "kcalloc failed @ at %s:%d/%s() !!!\n",
1999                        ioc->name, __FILE__, __LINE__, __func__));
2000                goto try_ioapic;
2001        }
2002
2003        for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
2004                a->entry = i;
2005
2006        r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
2007        if (r) {
2008                dfailprintk(ioc, pr_info(MPT3SAS_FMT
2009                        "pci_enable_msix_exact failed (r=%d) !!!\n",
2010                        ioc->name, r));
2011                kfree(entries);
2012                goto try_ioapic;
2013        }
2014
2015        ioc->msix_enable = 1;
2016        for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
2017                r = _base_request_irq(ioc, i, a->vector);
2018                if (r) {
2019                        _base_free_irq(ioc);
2020                        _base_disable_msix(ioc);
2021                        kfree(entries);
2022                        goto try_ioapic;
2023                }
2024        }
2025
2026        kfree(entries);
2027        return 0;
2028
2029/* failback to io_apic interrupt routing */
2030 try_ioapic:
2031
2032        ioc->reply_queue_count = 1;
2033        r = _base_request_irq(ioc, 0, ioc->pdev->irq);
2034
2035        return r;
2036}
2037
2038/**
2039 * mpt3sas_base_unmap_resources - free controller resources
2040 * @ioc: per adapter object
2041 */
2042static void
2043mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2044{
2045        struct pci_dev *pdev = ioc->pdev;
2046
2047        dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
2048                ioc->name, __func__));
2049
2050        _base_free_irq(ioc);
2051        _base_disable_msix(ioc);
2052
2053        if (ioc->msix96_vector) {
2054                kfree(ioc->replyPostRegisterIndex);
2055                ioc->replyPostRegisterIndex = NULL;
2056        }
2057
2058        if (ioc->chip_phys) {
2059                iounmap(ioc->chip);
2060                ioc->chip_phys = 0;
2061        }
2062
2063        if (pci_is_enabled(pdev)) {
2064                pci_release_selected_regions(ioc->pdev, ioc->bars);
2065                pci_disable_pcie_error_reporting(pdev);
2066                pci_disable_device(pdev);
2067        }
2068}
2069
2070/**
2071 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2072 * @ioc: per adapter object
2073 *
2074 * Returns 0 for success, non-zero for failure.
2075 */
2076int
2077mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2078{
2079        struct pci_dev *pdev = ioc->pdev;
2080        u32 memap_sz;
2081        u32 pio_sz;
2082        int i, r = 0;
2083        u64 pio_chip = 0;
2084        u64 chip_phys = 0;
2085        struct adapter_reply_queue *reply_q;
2086
2087        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
2088            ioc->name, __func__));
2089
2090        ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2091        if (pci_enable_device_mem(pdev)) {
2092                pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
2093                        ioc->name);
2094                ioc->bars = 0;
2095                return -ENODEV;
2096        }
2097
2098
2099        if (pci_request_selected_regions(pdev, ioc->bars,
2100            ioc->driver_name)) {
2101                pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
2102                        ioc->name);
2103                ioc->bars = 0;
2104                r = -ENODEV;
2105                goto out_fail;
2106        }
2107
2108/* AER (Advanced Error Reporting) hooks */
2109        pci_enable_pcie_error_reporting(pdev);
2110
2111        pci_set_master(pdev);
2112
2113
2114        if (_base_config_dma_addressing(ioc, pdev) != 0) {
2115                pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
2116                    ioc->name, pci_name(pdev));
2117                r = -ENODEV;
2118                goto out_fail;
2119        }
2120
2121        for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2122             (!memap_sz || !pio_sz); i++) {
2123                if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2124                        if (pio_sz)
2125                                continue;
2126                        pio_chip = (u64)pci_resource_start(pdev, i);
2127                        pio_sz = pci_resource_len(pdev, i);
2128                } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
2129                        if (memap_sz)
2130                                continue;
2131                        ioc->chip_phys = pci_resource_start(pdev, i);
2132                        chip_phys = (u64)ioc->chip_phys;
2133                        memap_sz = pci_resource_len(pdev, i);
2134                        ioc->chip = ioremap(ioc->chip_phys, memap_sz);
2135                }
2136        }
2137
2138        if (ioc->chip == NULL) {
2139                pr_err(MPT3SAS_FMT "unable to map adapter memory! "
2140                        " or resource not found\n", ioc->name);
2141                r = -EINVAL;
2142                goto out_fail;
2143        }
2144
2145        _base_mask_interrupts(ioc);
2146
2147        r = _base_get_ioc_facts(ioc);
2148        if (r)
2149                goto out_fail;
2150
2151        if (!ioc->rdpq_array_enable_assigned) {
2152                ioc->rdpq_array_enable = ioc->rdpq_array_capable;
2153                ioc->rdpq_array_enable_assigned = 1;
2154        }
2155
2156        r = _base_enable_msix(ioc);
2157        if (r)
2158                goto out_fail;
2159
2160        /* Use the Combined reply queue feature only for SAS3 C0 & higher
2161         * revision HBAs and also only when reply queue count is greater than 8
2162         */
2163        if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
2164                /* Determine the Supplemental Reply Post Host Index Registers
2165                 * Addresse. Supplemental Reply Post Host Index Registers
2166                 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
2167                 * each register is at offset bytes of
2168                 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
2169                 */
2170                ioc->replyPostRegisterIndex = kcalloc(
2171                     MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
2172                     sizeof(resource_size_t *), GFP_KERNEL);
2173                if (!ioc->replyPostRegisterIndex) {
2174                        dfailprintk(ioc, printk(MPT3SAS_FMT
2175                        "allocation for reply Post Register Index failed!!!\n",
2176                                                                   ioc->name));
2177                        r = -ENOMEM;
2178                        goto out_fail;
2179                }
2180
2181                for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
2182                        ioc->replyPostRegisterIndex[i] = (resource_size_t *)
2183                             ((u8 *)&ioc->chip->Doorbell +
2184                             MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2185                             (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
2186                }
2187        } else
2188                ioc->msix96_vector = 0;
2189
2190        if (ioc->is_warpdrive) {
2191                ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
2192                    &ioc->chip->ReplyPostHostIndex;
2193
2194                for (i = 1; i < ioc->cpu_msix_table_sz; i++)
2195                        ioc->reply_post_host_index[i] =
2196                        (resource_size_t __iomem *)
2197                        ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
2198                        * 4)));
2199        }
2200
2201        list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
2202                pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
2203                    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
2204                    "IO-APIC enabled"), reply_q->vector);
2205
2206        pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
2207            ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
2208        pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
2209            ioc->name, (unsigned long long)pio_chip, pio_sz);
2210
2211        /* Save PCI configuration state for recovery from PCI AER/EEH errors */
2212        pci_save_state(pdev);
2213        return 0;
2214
2215 out_fail:
2216        mpt3sas_base_unmap_resources(ioc);
2217        return r;
2218}
2219
2220/**
2221 * mpt3sas_base_get_msg_frame - obtain request mf pointer
2222 * @ioc: per adapter object
2223 * @smid: system request message index(smid zero is invalid)
2224 *
2225 * Returns virt pointer to message frame.
2226 */
2227void *
2228mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2229{
2230        return (void *)(ioc->request + (smid * ioc->request_sz));
2231}
2232
2233/**
2234 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
2235 * @ioc: per adapter object
2236 * @smid: system request message index
2237 *
2238 * Returns virt pointer to sense buffer.
2239 */
2240void *
2241mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2242{
2243        return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
2244}
2245
2246/**
2247 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
2248 * @ioc: per adapter object
2249 * @smid: system request message index
2250 *
2251 * Returns phys pointer to the low 32bit address of the sense buffer.
2252 */
2253__le32
2254mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2255{
2256        return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
2257            SCSI_SENSE_BUFFERSIZE));
2258}
2259
2260/**
2261 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
2262 * @ioc: per adapter object
2263 * @phys_addr: lower 32 physical addr of the reply
2264 *
2265 * Converts 32bit lower physical addr into a virt address.
2266 */
2267void *
2268mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
2269{
2270        if (!phys_addr)
2271                return NULL;
2272        return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
2273}
2274
2275static inline u8
2276_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2277{
2278        return ioc->cpu_msix_table[raw_smp_processor_id()];
2279}
2280
2281/**
2282 * mpt3sas_base_get_smid - obtain a free smid from internal queue
2283 * @ioc: per adapter object
2284 * @cb_idx: callback index
2285 *
2286 * Returns smid (zero is invalid)
2287 */
2288u16
2289mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2290{
2291        unsigned long flags;
2292        struct request_tracker *request;
2293        u16 smid;
2294
2295        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2296        if (list_empty(&ioc->internal_free_list)) {
2297                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2298                pr_err(MPT3SAS_FMT "%s: smid not available\n",
2299                    ioc->name, __func__);
2300                return 0;
2301        }
2302
2303        request = list_entry(ioc->internal_free_list.next,
2304            struct request_tracker, tracker_list);
2305        request->cb_idx = cb_idx;
2306        smid = request->smid;
2307        list_del(&request->tracker_list);
2308        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2309        return smid;
2310}
2311
2312/**
2313 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
2314 * @ioc: per adapter object
2315 * @cb_idx: callback index
2316 * @scmd: pointer to scsi command object
2317 *
2318 * Returns smid (zero is invalid)
2319 */
2320u16
2321mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
2322        struct scsi_cmnd *scmd)
2323{
2324        unsigned long flags;
2325        struct scsiio_tracker *request;
2326        u16 smid;
2327
2328        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2329        if (list_empty(&ioc->free_list)) {
2330                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2331                pr_err(MPT3SAS_FMT "%s: smid not available\n",
2332                    ioc->name, __func__);
2333                return 0;
2334        }
2335
2336        request = list_entry(ioc->free_list.next,
2337            struct scsiio_tracker, tracker_list);
2338        request->scmd = scmd;
2339        request->cb_idx = cb_idx;
2340        smid = request->smid;
2341        request->msix_io = _base_get_msix_index(ioc);
2342        list_del(&request->tracker_list);
2343        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2344        return smid;
2345}
2346
2347/**
2348 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
2349 * @ioc: per adapter object
2350 * @cb_idx: callback index
2351 *
2352 * Returns smid (zero is invalid)
2353 */
2354u16
2355mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2356{
2357        unsigned long flags;
2358        struct request_tracker *request;
2359        u16 smid;
2360
2361        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2362        if (list_empty(&ioc->hpr_free_list)) {
2363                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2364                return 0;
2365        }
2366
2367        request = list_entry(ioc->hpr_free_list.next,
2368            struct request_tracker, tracker_list);
2369        request->cb_idx = cb_idx;
2370        smid = request->smid;
2371        list_del(&request->tracker_list);
2372        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2373        return smid;
2374}
2375
2376/**
2377 * mpt3sas_base_free_smid - put smid back on free_list
2378 * @ioc: per adapter object
2379 * @smid: system request message index
2380 *
2381 * Return nothing.
2382 */
2383void
2384mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2385{
2386        unsigned long flags;
2387        int i;
2388        struct chain_tracker *chain_req, *next;
2389
2390        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2391        if (smid < ioc->hi_priority_smid) {
2392                /* scsiio queue */
2393                i = smid - 1;
2394                if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2395                        list_for_each_entry_safe(chain_req, next,
2396                            &ioc->scsi_lookup[i].chain_list, tracker_list) {
2397                                list_del_init(&chain_req->tracker_list);
2398                                list_add(&chain_req->tracker_list,
2399                                    &ioc->free_chain_list);
2400                        }
2401                }
2402                ioc->scsi_lookup[i].cb_idx = 0xFF;
2403                ioc->scsi_lookup[i].scmd = NULL;
2404                ioc->scsi_lookup[i].direct_io = 0;
2405                list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2406                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2407
2408                /*
2409                 * See _wait_for_commands_to_complete() call with regards
2410                 * to this code.
2411                 */
2412                if (ioc->shost_recovery && ioc->pending_io_count) {
2413                        if (ioc->pending_io_count == 1)
2414                                wake_up(&ioc->reset_wq);
2415                        ioc->pending_io_count--;
2416                }
2417                return;
2418        } else if (smid < ioc->internal_smid) {
2419                /* hi-priority */
2420                i = smid - ioc->hi_priority_smid;
2421                ioc->hpr_lookup[i].cb_idx = 0xFF;
2422                list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2423        } else if (smid <= ioc->hba_queue_depth) {
2424                /* internal queue */
2425                i = smid - ioc->internal_smid;
2426                ioc->internal_lookup[i].cb_idx = 0xFF;
2427                list_add(&ioc->internal_lookup[i].tracker_list,
2428                    &ioc->internal_free_list);
2429        }
2430        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2431}
2432
2433/**
2434 * _base_writeq - 64 bit write to MMIO
2435 * @ioc: per adapter object
2436 * @b: data payload
2437 * @addr: address in MMIO space
2438 * @writeq_lock: spin lock
2439 *
2440 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2441 * care of 32 bit environment where its not quarenteed to send the entire word
2442 * in one transfer.
2443 */
2444#if defined(writeq) && defined(CONFIG_64BIT)
2445static inline void
2446_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2447{
2448        writeq(cpu_to_le64(b), addr);
2449}
2450#else
2451static inline void
2452_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2453{
2454        unsigned long flags;
2455        __u64 data_out = cpu_to_le64(b);
2456
2457        spin_lock_irqsave(writeq_lock, flags);
2458        writel((u32)(data_out), addr);
2459        writel((u32)(data_out >> 32), (addr + 4));
2460        spin_unlock_irqrestore(writeq_lock, flags);
2461}
2462#endif
2463
2464/**
2465 * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
2466 * @ioc: per adapter object
2467 * @smid: system request message index
2468 * @handle: device handle
2469 *
2470 * Return nothing.
2471 */
2472void
2473mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2474{
2475        Mpi2RequestDescriptorUnion_t descriptor;
2476        u64 *request = (u64 *)&descriptor;
2477
2478
2479        descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2480        descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
2481        descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2482        descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2483        descriptor.SCSIIO.LMID = 0;
2484        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2485            &ioc->scsi_lookup_lock);
2486}
2487
2488/**
2489 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
2490 * @ioc: per adapter object
2491 * @smid: system request message index
2492 * @handle: device handle
2493 *
2494 * Return nothing.
2495 */
2496void
2497mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2498        u16 handle)
2499{
2500        Mpi2RequestDescriptorUnion_t descriptor;
2501        u64 *request = (u64 *)&descriptor;
2502
2503        descriptor.SCSIIO.RequestFlags =
2504            MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2505        descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2506        descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2507        descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2508        descriptor.SCSIIO.LMID = 0;
2509        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2510            &ioc->scsi_lookup_lock);
2511}
2512
2513/**
2514 * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
2515 * @ioc: per adapter object
2516 * @smid: system request message index
2517 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
2518 * Return nothing.
2519 */
2520void
2521mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2522        u16 msix_task)
2523{
2524        Mpi2RequestDescriptorUnion_t descriptor;
2525        u64 *request = (u64 *)&descriptor;
2526
2527        descriptor.HighPriority.RequestFlags =
2528            MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2529        descriptor.HighPriority.MSIxIndex =  msix_task;
2530        descriptor.HighPriority.SMID = cpu_to_le16(smid);
2531        descriptor.HighPriority.LMID = 0;
2532        descriptor.HighPriority.Reserved1 = 0;
2533        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2534            &ioc->scsi_lookup_lock);
2535}
2536
2537/**
2538 * mpt3sas_base_put_smid_default - Default, primarily used for config pages
2539 * @ioc: per adapter object
2540 * @smid: system request message index
2541 *
2542 * Return nothing.
2543 */
2544void
2545mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2546{
2547        Mpi2RequestDescriptorUnion_t descriptor;
2548        u64 *request = (u64 *)&descriptor;
2549
2550        descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2551        descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
2552        descriptor.Default.SMID = cpu_to_le16(smid);
2553        descriptor.Default.LMID = 0;
2554        descriptor.Default.DescriptorTypeDependent = 0;
2555        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2556            &ioc->scsi_lookup_lock);
2557}
2558
2559/**
2560 * _base_display_OEMs_branding - Display branding string
2561 * @ioc: per adapter object
2562 *
2563 * Return nothing.
2564 */
2565static void
2566_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
2567{
2568        if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
2569                return;
2570
2571        switch (ioc->pdev->subsystem_vendor) {
2572        case PCI_VENDOR_ID_INTEL:
2573                switch (ioc->pdev->device) {
2574                case MPI2_MFGPAGE_DEVID_SAS2008:
2575                        switch (ioc->pdev->subsystem_device) {
2576                        case MPT2SAS_INTEL_RMS2LL080_SSDID:
2577                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2578                                    MPT2SAS_INTEL_RMS2LL080_BRANDING);
2579                                break;
2580                        case MPT2SAS_INTEL_RMS2LL040_SSDID:
2581                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2582                                    MPT2SAS_INTEL_RMS2LL040_BRANDING);
2583                                break;
2584                        case MPT2SAS_INTEL_SSD910_SSDID:
2585                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2586                                    MPT2SAS_INTEL_SSD910_BRANDING);
2587                                break;
2588                        default:
2589                                pr_info(MPT3SAS_FMT
2590                                 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2591                                 ioc->name, ioc->pdev->subsystem_device);
2592                                break;
2593                        }
2594                case MPI2_MFGPAGE_DEVID_SAS2308_2:
2595                        switch (ioc->pdev->subsystem_device) {
2596                        case MPT2SAS_INTEL_RS25GB008_SSDID:
2597                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2598                                    MPT2SAS_INTEL_RS25GB008_BRANDING);
2599                                break;
2600                        case MPT2SAS_INTEL_RMS25JB080_SSDID:
2601                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2602                                    MPT2SAS_INTEL_RMS25JB080_BRANDING);
2603                                break;
2604                        case MPT2SAS_INTEL_RMS25JB040_SSDID:
2605                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2606                                    MPT2SAS_INTEL_RMS25JB040_BRANDING);
2607                                break;
2608                        case MPT2SAS_INTEL_RMS25KB080_SSDID:
2609                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2610                                    MPT2SAS_INTEL_RMS25KB080_BRANDING);
2611                                break;
2612                        case MPT2SAS_INTEL_RMS25KB040_SSDID:
2613                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2614                                    MPT2SAS_INTEL_RMS25KB040_BRANDING);
2615                                break;
2616                        case MPT2SAS_INTEL_RMS25LB040_SSDID:
2617                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2618                                    MPT2SAS_INTEL_RMS25LB040_BRANDING);
2619                                break;
2620                        case MPT2SAS_INTEL_RMS25LB080_SSDID:
2621                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2622                                    MPT2SAS_INTEL_RMS25LB080_BRANDING);
2623                                break;
2624                        default:
2625                                pr_info(MPT3SAS_FMT
2626                                 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2627                                 ioc->name, ioc->pdev->subsystem_device);
2628                                break;
2629                        }
2630                case MPI25_MFGPAGE_DEVID_SAS3008:
2631                        switch (ioc->pdev->subsystem_device) {
2632                        case MPT3SAS_INTEL_RMS3JC080_SSDID:
2633                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2634                                        MPT3SAS_INTEL_RMS3JC080_BRANDING);
2635                                break;
2636
2637                        case MPT3SAS_INTEL_RS3GC008_SSDID:
2638                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2639                                        MPT3SAS_INTEL_RS3GC008_BRANDING);
2640                                break;
2641                        case MPT3SAS_INTEL_RS3FC044_SSDID:
2642                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2643                                        MPT3SAS_INTEL_RS3FC044_BRANDING);
2644                                break;
2645                        case MPT3SAS_INTEL_RS3UC080_SSDID:
2646                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2647                                        MPT3SAS_INTEL_RS3UC080_BRANDING);
2648                                break;
2649                        default:
2650                                pr_info(MPT3SAS_FMT
2651                                 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2652                                 ioc->name, ioc->pdev->subsystem_device);
2653                                break;
2654                        }
2655                        break;
2656                default:
2657                        pr_info(MPT3SAS_FMT
2658                         "Intel(R) Controller: Subsystem ID: 0x%X\n",
2659                         ioc->name, ioc->pdev->subsystem_device);
2660                        break;
2661                }
2662                break;
2663        case PCI_VENDOR_ID_DELL:
2664                switch (ioc->pdev->device) {
2665                case MPI2_MFGPAGE_DEVID_SAS2008:
2666                        switch (ioc->pdev->subsystem_device) {
2667                        case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
2668                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2669                                 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
2670                                break;
2671                        case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
2672                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2673                                 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
2674                                break;
2675                        case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
2676                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2677                                 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
2678                                break;
2679                        case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
2680                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2681                                 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
2682                                break;
2683                        case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
2684                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2685                                 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
2686                                break;
2687                        case MPT2SAS_DELL_PERC_H200_SSDID:
2688                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2689                                 MPT2SAS_DELL_PERC_H200_BRANDING);
2690                                break;
2691                        case MPT2SAS_DELL_6GBPS_SAS_SSDID:
2692                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2693                                 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
2694                                break;
2695                        default:
2696                                pr_info(MPT3SAS_FMT
2697                                   "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
2698                                   ioc->name, ioc->pdev->subsystem_device);
2699                                break;
2700                        }
2701                        break;
2702                case MPI25_MFGPAGE_DEVID_SAS3008:
2703                        switch (ioc->pdev->subsystem_device) {
2704                        case MPT3SAS_DELL_12G_HBA_SSDID:
2705                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2706                                        MPT3SAS_DELL_12G_HBA_BRANDING);
2707                                break;
2708                        default:
2709                                pr_info(MPT3SAS_FMT
2710                                   "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
2711                                   ioc->name, ioc->pdev->subsystem_device);
2712                                break;
2713                        }
2714                        break;
2715                default:
2716                        pr_info(MPT3SAS_FMT
2717                           "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
2718                           ioc->pdev->subsystem_device);
2719                        break;
2720                }
2721                break;
2722        case PCI_VENDOR_ID_CISCO:
2723                switch (ioc->pdev->device) {
2724                case MPI25_MFGPAGE_DEVID_SAS3008:
2725                        switch (ioc->pdev->subsystem_device) {
2726                        case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
2727                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2728                                        MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
2729                                break;
2730                        case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
2731                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2732                                        MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
2733                                break;
2734                        case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2735                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2736                                        MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2737                                break;
2738                        default:
2739                                pr_info(MPT3SAS_FMT
2740                                  "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2741                                  ioc->name, ioc->pdev->subsystem_device);
2742                                break;
2743                        }
2744                        break;
2745                case MPI25_MFGPAGE_DEVID_SAS3108_1:
2746                        switch (ioc->pdev->subsystem_device) {
2747                        case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2748                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2749                                MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2750                                break;
2751                        case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
2752                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2753                                MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
2754                                );
2755                                break;
2756                        default:
2757                                pr_info(MPT3SAS_FMT
2758                                 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2759                                 ioc->name, ioc->pdev->subsystem_device);
2760                                break;
2761                        }
2762                        break;
2763                default:
2764                        pr_info(MPT3SAS_FMT
2765                           "Cisco SAS HBA: Subsystem ID: 0x%X\n",
2766                           ioc->name, ioc->pdev->subsystem_device);
2767                        break;
2768                }
2769                break;
2770        case MPT2SAS_HP_3PAR_SSVID:
2771                switch (ioc->pdev->device) {
2772                case MPI2_MFGPAGE_DEVID_SAS2004:
2773                        switch (ioc->pdev->subsystem_device) {
2774                        case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2775                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2776                                    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2777                                break;
2778                        default:
2779                                pr_info(MPT3SAS_FMT
2780                                   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2781                                   ioc->name, ioc->pdev->subsystem_device);
2782                                break;
2783                        }
2784                case MPI2_MFGPAGE_DEVID_SAS2308_2:
2785                        switch (ioc->pdev->subsystem_device) {
2786                        case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2787                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2788                                    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2789                                break;
2790                        case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2791                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2792                                    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2793                                break;
2794                        case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2795                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2796                                 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2797                                break;
2798                        case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2799                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2800                                    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2801                                break;
2802                        default:
2803                                pr_info(MPT3SAS_FMT
2804                                   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2805                                   ioc->name, ioc->pdev->subsystem_device);
2806                                break;
2807                        }
2808                default:
2809                        pr_info(MPT3SAS_FMT
2810                           "HP SAS HBA: Subsystem ID: 0x%X\n",
2811                           ioc->name, ioc->pdev->subsystem_device);
2812                        break;
2813                }
2814        default:
2815                break;
2816        }
2817}
2818
2819/**
2820 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2821 * @ioc: per adapter object
2822 *
2823 * Return nothing.
2824 */
2825static void
2826_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2827{
2828        int i = 0;
2829        char desc[16];
2830        u32 iounit_pg1_flags;
2831        u32 bios_version;
2832
2833        bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2834        strncpy(desc, ioc->manu_pg0.ChipName, 16);
2835        pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2836           "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2837            ioc->name, desc,
2838           (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2839           (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2840           (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2841           ioc->facts.FWVersion.Word & 0x000000FF,
2842           ioc->pdev->revision,
2843           (bios_version & 0xFF000000) >> 24,
2844           (bios_version & 0x00FF0000) >> 16,
2845           (bios_version & 0x0000FF00) >> 8,
2846            bios_version & 0x000000FF);
2847
2848        _base_display_OEMs_branding(ioc);
2849
2850        pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2851
2852        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2853                pr_info("Initiator");
2854                i++;
2855        }
2856
2857        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2858                pr_info("%sTarget", i ? "," : "");
2859                i++;
2860        }
2861
2862        i = 0;
2863        pr_info("), ");
2864        pr_info("Capabilities=(");
2865
2866        if (!ioc->hide_ir_msg) {
2867                if (ioc->facts.IOCCapabilities &
2868                    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2869                        pr_info("Raid");
2870                        i++;
2871                }
2872        }
2873
2874        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2875                pr_info("%sTLR", i ? "," : "");
2876                i++;
2877        }
2878
2879        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2880                pr_info("%sMulticast", i ? "," : "");
2881                i++;
2882        }
2883
2884        if (ioc->facts.IOCCapabilities &
2885            MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2886                pr_info("%sBIDI Target", i ? "," : "");
2887                i++;
2888        }
2889
2890        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2891                pr_info("%sEEDP", i ? "," : "");
2892                i++;
2893        }
2894
2895        if (ioc->facts.IOCCapabilities &
2896            MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2897                pr_info("%sSnapshot Buffer", i ? "," : "");
2898                i++;
2899        }
2900
2901        if (ioc->facts.IOCCapabilities &
2902            MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2903                pr_info("%sDiag Trace Buffer", i ? "," : "");
2904                i++;
2905        }
2906
2907        if (ioc->facts.IOCCapabilities &
2908            MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2909                pr_info("%sDiag Extended Buffer", i ? "," : "");
2910                i++;
2911        }
2912
2913        if (ioc->facts.IOCCapabilities &
2914            MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2915                pr_info("%sTask Set Full", i ? "," : "");
2916                i++;
2917        }
2918
2919        iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2920        if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2921                pr_info("%sNCQ", i ? "," : "");
2922                i++;
2923        }
2924
2925        pr_info(")\n");
2926}
2927
2928/**
2929 * mpt3sas_base_update_missing_delay - change the missing delay timers
2930 * @ioc: per adapter object
2931 * @device_missing_delay: amount of time till device is reported missing
2932 * @io_missing_delay: interval IO is returned when there is a missing device
2933 *
2934 * Return nothing.
2935 *
2936 * Passed on the command line, this function will modify the device missing
2937 * delay, as well as the io missing delay. This should be called at driver
2938 * load time.
2939 */
2940void
2941mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
2942        u16 device_missing_delay, u8 io_missing_delay)
2943{
2944        u16 dmd, dmd_new, dmd_orignal;
2945        u8 io_missing_delay_original;
2946        u16 sz;
2947        Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2948        Mpi2ConfigReply_t mpi_reply;
2949        u8 num_phys = 0;
2950        u16 ioc_status;
2951
2952        mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
2953        if (!num_phys)
2954                return;
2955
2956        sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2957            sizeof(Mpi2SasIOUnit1PhyData_t));
2958        sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2959        if (!sas_iounit_pg1) {
2960                pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2961                    ioc->name, __FILE__, __LINE__, __func__);
2962                goto out;
2963        }
2964        if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2965            sas_iounit_pg1, sz))) {
2966                pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2967                    ioc->name, __FILE__, __LINE__, __func__);
2968                goto out;
2969        }
2970        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2971            MPI2_IOCSTATUS_MASK;
2972        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2973                pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2974                    ioc->name, __FILE__, __LINE__, __func__);
2975                goto out;
2976        }
2977
2978        /* device missing delay */
2979        dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2980        if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2981                dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2982        else
2983                dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2984        dmd_orignal = dmd;
2985        if (device_missing_delay > 0x7F) {
2986                dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2987                    device_missing_delay;
2988                dmd = dmd / 16;
2989                dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2990        } else
2991                dmd = device_missing_delay;
2992        sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2993
2994        /* io missing delay */
2995        io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2996        sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2997
2998        if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2999            sz)) {
3000                if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
3001                        dmd_new = (dmd &
3002                            MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
3003                else
3004                        dmd_new =
3005                    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
3006                pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
3007                        ioc->name, dmd_orignal, dmd_new);
3008                pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
3009                        ioc->name, io_missing_delay_original,
3010                    io_missing_delay);
3011                ioc->device_missing_delay = dmd_new;
3012                ioc->io_missing_delay = io_missing_delay;
3013        }
3014
3015out:
3016        kfree(sas_iounit_pg1);
3017}
3018/**
3019 * _base_static_config_pages - static start of day config pages
3020 * @ioc: per adapter object
3021 *
3022 * Return nothing.
3023 */
3024static void
3025_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
3026{
3027        Mpi2ConfigReply_t mpi_reply;
3028        u32 iounit_pg1_flags;
3029
3030        mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
3031        if (ioc->ir_firmware)
3032                mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
3033                    &ioc->manu_pg10);
3034
3035        /*
3036         * Ensure correct T10 PI operation if vendor left EEDPTagMode
3037         * flag unset in NVDATA.
3038         */
3039        mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
3040        if (ioc->manu_pg11.EEDPTagMode == 0) {
3041                pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
3042                    ioc->name);
3043                ioc->manu_pg11.EEDPTagMode &= ~0x3;
3044                ioc->manu_pg11.EEDPTagMode |= 0x1;
3045                mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
3046                    &ioc->manu_pg11);
3047        }
3048
3049        mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
3050        mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
3051        mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
3052        mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
3053        mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
3054        mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
3055        _base_display_ioc_capabilities(ioc);
3056
3057        /*
3058         * Enable task_set_full handling in iounit_pg1 when the
3059         * facts capabilities indicate that its supported.
3060         */
3061        iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3062        if ((ioc->facts.IOCCapabilities &
3063            MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
3064                iounit_pg1_flags &=
3065                    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3066        else
3067                iounit_pg1_flags |=
3068                    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3069        ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
3070        mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
3071
3072        if (ioc->iounit_pg8.NumSensors)
3073                ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
3074}
3075
3076/**
3077 * _base_release_memory_pools - release memory
3078 * @ioc: per adapter object
3079 *
3080 * Free memory allocated from _base_allocate_memory_pools.
3081 *
3082 * Return nothing.
3083 */
3084static void
3085_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3086{
3087        int i = 0;
3088        struct reply_post_struct *rps;
3089
3090        dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3091            __func__));
3092
3093        if (ioc->request) {
3094                pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
3095                    ioc->request,  ioc->request_dma);
3096                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3097                        "request_pool(0x%p): free\n",
3098                        ioc->name, ioc->request));
3099                ioc->request = NULL;
3100        }
3101
3102        if (ioc->sense) {
3103                pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
3104                if (ioc->sense_dma_pool)
3105                        pci_pool_destroy(ioc->sense_dma_pool);
3106                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3107                        "sense_pool(0x%p): free\n",
3108                        ioc->name, ioc->sense));
3109                ioc->sense = NULL;
3110        }
3111
3112        if (ioc->reply) {
3113                pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
3114                if (ioc->reply_dma_pool)
3115                        pci_pool_destroy(ioc->reply_dma_pool);
3116                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3117                        "reply_pool(0x%p): free\n",
3118                        ioc->name, ioc->reply));
3119                ioc->reply = NULL;
3120        }
3121
3122        if (ioc->reply_free) {
3123                pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
3124                    ioc->reply_free_dma);
3125                if (ioc->reply_free_dma_pool)
3126                        pci_pool_destroy(ioc->reply_free_dma_pool);
3127                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3128                        "reply_free_pool(0x%p): free\n",
3129                        ioc->name, ioc->reply_free));
3130                ioc->reply_free = NULL;
3131        }
3132
3133        if (ioc->reply_post) {
3134                do {
3135                        rps = &ioc->reply_post[i];
3136                        if (rps->reply_post_free) {
3137                                pci_pool_free(
3138                                    ioc->reply_post_free_dma_pool,
3139                                    rps->reply_post_free,
3140                                    rps->reply_post_free_dma);
3141                                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3142                                    "reply_post_free_pool(0x%p): free\n",
3143                                    ioc->name, rps->reply_post_free));
3144                                rps->reply_post_free = NULL;
3145                        }
3146                } while (ioc->rdpq_array_enable &&
3147                           (++i < ioc->reply_queue_count));
3148
3149                if (ioc->reply_post_free_dma_pool)
3150                        pci_pool_destroy(ioc->reply_post_free_dma_pool);
3151                kfree(ioc->reply_post);
3152        }
3153
3154        if (ioc->config_page) {
3155                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3156                    "config_page(0x%p): free\n", ioc->name,
3157                    ioc->config_page));
3158                pci_free_consistent(ioc->pdev, ioc->config_page_sz,
3159                    ioc->config_page, ioc->config_page_dma);
3160        }
3161
3162        if (ioc->scsi_lookup) {
3163                free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
3164                ioc->scsi_lookup = NULL;
3165        }
3166        kfree(ioc->hpr_lookup);
3167        kfree(ioc->internal_lookup);
3168        if (ioc->chain_lookup) {
3169                for (i = 0; i < ioc->chain_depth; i++) {
3170                        if (ioc->chain_lookup[i].chain_buffer)
3171                                pci_pool_free(ioc->chain_dma_pool,
3172                                    ioc->chain_lookup[i].chain_buffer,
3173                                    ioc->chain_lookup[i].chain_buffer_dma);
3174                }
3175                if (ioc->chain_dma_pool)
3176                        pci_pool_destroy(ioc->chain_dma_pool);
3177                free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
3178                ioc->chain_lookup = NULL;
3179        }
3180}
3181
3182/**
3183 * _base_allocate_memory_pools - allocate start of day memory pools
3184 * @ioc: per adapter object
3185 *
3186 * Returns 0 success, anything else error
3187 */
3188static int
3189_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3190{
3191        struct mpt3sas_facts *facts;
3192        u16 max_sge_elements;
3193        u16 chains_needed_per_io;
3194        u32 sz, total_sz, reply_post_free_sz;
3195        u32 retry_sz;
3196        u16 max_request_credit;
3197        unsigned short sg_tablesize;
3198        u16 sge_size;
3199        int i;
3200
3201        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3202            __func__));
3203
3204
3205        retry_sz = 0;
3206        facts = &ioc->facts;
3207
3208        /* command line tunables for max sgl entries */
3209        if (max_sgl_entries != -1)
3210                sg_tablesize = max_sgl_entries;
3211        else {
3212                if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
3213                        sg_tablesize = MPT2SAS_SG_DEPTH;
3214                else
3215                        sg_tablesize = MPT3SAS_SG_DEPTH;
3216        }
3217
3218        if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
3219                sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
3220        else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
3221                sg_tablesize = min_t(unsigned short, sg_tablesize,
3222                                      SG_MAX_SEGMENTS);
3223                pr_warn(MPT3SAS_FMT
3224                 "sg_tablesize(%u) is bigger than kernel"
3225                 " defined SG_CHUNK_SIZE(%u)\n", ioc->name,
3226                 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
3227        }
3228        ioc->shost->sg_tablesize = sg_tablesize;
3229
3230        ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
3231                (facts->RequestCredit / 4));
3232        if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
3233                if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
3234                                INTERNAL_SCSIIO_CMDS_COUNT)) {
3235                        pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
3236                            Credits, it has just %d number of credits\n",
3237                            ioc->name, facts->RequestCredit);
3238                        return -ENOMEM;
3239                }
3240                ioc->internal_depth = 10;
3241        }
3242
3243        ioc->hi_priority_depth = ioc->internal_depth - (5);
3244        /* command line tunables  for max controller queue depth */
3245        if (max_queue_depth != -1 && max_queue_depth != 0) {
3246                max_request_credit = min_t(u16, max_queue_depth +
3247                        ioc->internal_depth, facts->RequestCredit);
3248                if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
3249                        max_request_credit =  MAX_HBA_QUEUE_DEPTH;
3250        } else
3251                max_request_credit = min_t(u16, facts->RequestCredit,
3252                    MAX_HBA_QUEUE_DEPTH);
3253
3254        /* Firmware maintains additional facts->HighPriorityCredit number of
3255         * credits for HiPriprity Request messages, so hba queue depth will be
3256         * sum of max_request_credit and high priority queue depth.
3257         */
3258        ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
3259
3260        /* request frame size */
3261        ioc->request_sz = facts->IOCRequestFrameSize * 4;
3262
3263        /* reply frame size */
3264        ioc->reply_sz = facts->ReplyFrameSize * 4;
3265
3266        /* chain segment size */
3267        if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3268                if (facts->IOCMaxChainSegmentSize)
3269                        ioc->chain_segment_sz =
3270                                        facts->IOCMaxChainSegmentSize *
3271                                        MAX_CHAIN_ELEMT_SZ;
3272                else
3273                /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
3274                        ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
3275                                                    MAX_CHAIN_ELEMT_SZ;
3276        } else
3277                ioc->chain_segment_sz = ioc->request_sz;
3278
3279        /* calculate the max scatter element size */
3280        sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
3281
3282 retry_allocation:
3283        total_sz = 0;
3284        /* calculate number of sg elements left over in the 1st frame */
3285        max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
3286            sizeof(Mpi2SGEIOUnion_t)) + sge_size);
3287        ioc->max_sges_in_main_message = max_sge_elements/sge_size;
3288
3289        /* now do the same for a chain buffer */
3290        max_sge_elements = ioc->chain_segment_sz - sge_size;
3291        ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
3292
3293        /*
3294         *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
3295         */
3296        chains_needed_per_io = ((ioc->shost->sg_tablesize -
3297           ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
3298            + 1;
3299        if (chains_needed_per_io > facts->MaxChainDepth) {
3300                chains_needed_per_io = facts->MaxChainDepth;
3301                ioc->shost->sg_tablesize = min_t(u16,
3302                ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
3303                * chains_needed_per_io), ioc->shost->sg_tablesize);
3304        }
3305        ioc->chains_needed_per_io = chains_needed_per_io;
3306
3307        /* reply free queue sizing - taking into account for 64 FW events */
3308        ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3309
3310        /* calculate reply descriptor post queue depth */
3311        ioc->reply_post_queue_depth = ioc->hba_queue_depth +
3312                                ioc->reply_free_queue_depth +  1 ;
3313        /* align the reply post queue on the next 16 count boundary */
3314        if (ioc->reply_post_queue_depth % 16)
3315                ioc->reply_post_queue_depth += 16 -
3316                (ioc->reply_post_queue_depth % 16);
3317
3318        if (ioc->reply_post_queue_depth >
3319            facts->MaxReplyDescriptorPostQueueDepth) {
3320                ioc->reply_post_queue_depth =
3321                                facts->MaxReplyDescriptorPostQueueDepth -
3322                    (facts->MaxReplyDescriptorPostQueueDepth % 16);
3323                ioc->hba_queue_depth =
3324                                ((ioc->reply_post_queue_depth - 64) / 2) - 1;
3325                ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3326        }
3327
3328        dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
3329            "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
3330            "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
3331            ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
3332            ioc->chains_needed_per_io));
3333
3334        /* reply post queue, 16 byte align */
3335        reply_post_free_sz = ioc->reply_post_queue_depth *
3336            sizeof(Mpi2DefaultReplyDescriptor_t);
3337
3338        sz = reply_post_free_sz;
3339        if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
3340                sz *= ioc->reply_queue_count;
3341
3342        ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
3343            (ioc->reply_queue_count):1,
3344            sizeof(struct reply_post_struct), GFP_KERNEL);
3345
3346        if (!ioc->reply_post) {
3347                pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
3348                        ioc->name);
3349                goto out;
3350        }
3351        ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
3352            ioc->pdev, sz, 16, 0);
3353        if (!ioc->reply_post_free_dma_pool) {
3354                pr_err(MPT3SAS_FMT
3355                 "reply_post_free pool: pci_pool_create failed\n",
3356                 ioc->name);
3357                goto out;
3358        }
3359        i = 0;
3360        do {
3361                ioc->reply_post[i].reply_post_free =
3362                    pci_pool_alloc(ioc->reply_post_free_dma_pool,
3363                    GFP_KERNEL,
3364                    &ioc->reply_post[i].reply_post_free_dma);
3365                if (!ioc->reply_post[i].reply_post_free) {
3366                        pr_err(MPT3SAS_FMT
3367                        "reply_post_free pool: pci_pool_alloc failed\n",
3368                        ioc->name);
3369                        goto out;
3370                }
3371                memset(ioc->reply_post[i].reply_post_free, 0, sz);
3372                dinitprintk(ioc, pr_info(MPT3SAS_FMT
3373                    "reply post free pool (0x%p): depth(%d),"
3374                    "element_size(%d), pool_size(%d kB)\n", ioc->name,
3375                    ioc->reply_post[i].reply_post_free,
3376                    ioc->reply_post_queue_depth, 8, sz/1024));
3377                dinitprintk(ioc, pr_info(MPT3SAS_FMT
3378                    "reply_post_free_dma = (0x%llx)\n", ioc->name,
3379                    (unsigned long long)
3380                    ioc->reply_post[i].reply_post_free_dma));
3381                total_sz += sz;
3382        } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
3383
3384        if (ioc->dma_mask == 64) {
3385                if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
3386                        pr_warn(MPT3SAS_FMT
3387                            "no suitable consistent DMA mask for %s\n",
3388                            ioc->name, pci_name(ioc->pdev));
3389                        goto out;
3390                }
3391        }
3392
3393        ioc->scsiio_depth = ioc->hba_queue_depth -
3394            ioc->hi_priority_depth - ioc->internal_depth;
3395
3396        /* set the scsi host can_queue depth
3397         * with some internal commands that could be outstanding
3398         */
3399        ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
3400        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3401                "scsi host: can_queue depth (%d)\n",
3402                ioc->name, ioc->shost->can_queue));
3403
3404
3405        /* contiguous pool for request and chains, 16 byte align, one extra "
3406         * "frame for smid=0
3407         */
3408        ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
3409        sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
3410
3411        /* hi-priority queue */
3412        sz += (ioc->hi_priority_depth * ioc->request_sz);
3413
3414        /* internal queue */
3415        sz += (ioc->internal_depth * ioc->request_sz);
3416
3417        ioc->request_dma_sz = sz;
3418        ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
3419        if (!ioc->request) {
3420                pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3421                    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3422                    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
3423                    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3424                if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
3425                        goto out;
3426                retry_sz = 64;
3427                ioc->hba_queue_depth -= retry_sz;
3428                _base_release_memory_pools(ioc);
3429                goto retry_allocation;
3430        }
3431
3432        if (retry_sz)
3433                pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3434                    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3435                    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
3436                    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3437
3438        /* hi-priority queue */
3439        ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
3440            ioc->request_sz);
3441        ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
3442            ioc->request_sz);
3443
3444        /* internal queue */
3445        ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
3446            ioc->request_sz);
3447        ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
3448            ioc->request_sz);
3449
3450        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3451                "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3452                ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
3453            (ioc->hba_queue_depth * ioc->request_sz)/1024));
3454
3455        dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
3456            ioc->name, (unsigned long long) ioc->request_dma));
3457        total_sz += sz;
3458
3459        sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
3460        ioc->scsi_lookup_pages = get_order(sz);
3461        ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
3462            GFP_KERNEL, ioc->scsi_lookup_pages);
3463        if (!ioc->scsi_lookup) {
3464                pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
3465                        ioc->name, (int)sz);
3466                goto out;
3467        }
3468
3469        dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
3470                ioc->name, ioc->request, ioc->scsiio_depth));
3471
3472        ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
3473        sz = ioc->chain_depth * sizeof(struct chain_tracker);
3474        ioc->chain_pages = get_order(sz);
3475        ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
3476            GFP_KERNEL, ioc->chain_pages);
3477        if (!ioc->chain_lookup) {
3478                pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
3479                        ioc->name);
3480                goto out;
3481        }
3482        ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
3483            ioc->chain_segment_sz, 16, 0);
3484        if (!ioc->chain_dma_pool) {
3485                pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
3486                        ioc->name);
3487                goto out;
3488        }
3489        for (i = 0; i < ioc->chain_depth; i++) {
3490                ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
3491                    ioc->chain_dma_pool , GFP_KERNEL,
3492                    &ioc->chain_lookup[i].chain_buffer_dma);
3493                if (!ioc->chain_lookup[i].chain_buffer) {
3494                        ioc->chain_depth = i;
3495                        goto chain_done;
3496                }
3497                total_sz += ioc->chain_segment_sz;
3498        }
3499 chain_done:
3500        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3501                "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
3502                ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
3503                ((ioc->chain_depth *  ioc->chain_segment_sz))/1024));
3504
3505        /* initialize hi-priority queue smid's */
3506        ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
3507            sizeof(struct request_tracker), GFP_KERNEL);
3508        if (!ioc->hpr_lookup) {
3509                pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
3510                    ioc->name);
3511                goto out;
3512        }
3513        ioc->hi_priority_smid = ioc->scsiio_depth + 1;
3514        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3515                "hi_priority(0x%p): depth(%d), start smid(%d)\n",
3516                ioc->name, ioc->hi_priority,
3517            ioc->hi_priority_depth, ioc->hi_priority_smid));
3518
3519        /* initialize internal queue smid's */
3520        ioc->internal_lookup = kcalloc(ioc->internal_depth,
3521            sizeof(struct request_tracker), GFP_KERNEL);
3522        if (!ioc->internal_lookup) {
3523                pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
3524                    ioc->name);
3525                goto out;
3526        }
3527        ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
3528        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3529                "internal(0x%p): depth(%d), start smid(%d)\n",
3530                ioc->name, ioc->internal,
3531            ioc->internal_depth, ioc->internal_smid));
3532
3533        /* sense buffers, 4 byte align */
3534        sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
3535        ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
3536            0);
3537        if (!ioc->sense_dma_pool) {
3538                pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
3539                    ioc->name);
3540                goto out;
3541        }
3542        ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
3543            &ioc->sense_dma);
3544        if (!ioc->sense) {
3545                pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
3546                    ioc->name);
3547                goto out;
3548        }
3549        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3550            "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
3551            "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
3552            SCSI_SENSE_BUFFERSIZE, sz/1024));
3553        dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
3554            ioc->name, (unsigned long long)ioc->sense_dma));
3555        total_sz += sz;
3556
3557        /* reply pool, 4 byte align */
3558        sz = ioc->reply_free_queue_depth * ioc->reply_sz;
3559        ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
3560            0);
3561        if (!ioc->reply_dma_pool) {
3562                pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
3563                    ioc->name);
3564                goto out;
3565        }
3566        ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
3567            &ioc->reply_dma);
3568        if (!ioc->reply) {
3569                pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
3570                    ioc->name);
3571                goto out;
3572        }
3573        ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
3574        ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
3575        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3576                "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3577                ioc->name, ioc->reply,
3578            ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
3579        dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
3580            ioc->name, (unsigned long long)ioc->reply_dma));
3581        total_sz += sz;
3582
3583        /* reply free queue, 16 byte align */
3584        sz = ioc->reply_free_queue_depth * 4;
3585        ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
3586            ioc->pdev, sz, 16, 0);
3587        if (!ioc->reply_free_dma_pool) {
3588                pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
3589                        ioc->name);
3590                goto out;
3591        }
3592        ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
3593            &ioc->reply_free_dma);
3594        if (!ioc->reply_free) {
3595                pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
3596                        ioc->name);
3597                goto out;
3598        }
3599        memset(ioc->reply_free, 0, sz);
3600        dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
3601            "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
3602            ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
3603        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3604                "reply_free_dma (0x%llx)\n",
3605                ioc->name, (unsigned long long)ioc->reply_free_dma));
3606        total_sz += sz;
3607
3608        ioc->config_page_sz = 512;
3609        ioc->config_page = pci_alloc_consistent(ioc->pdev,
3610            ioc->config_page_sz, &ioc->config_page_dma);
3611        if (!ioc->config_page) {
3612                pr_err(MPT3SAS_FMT
3613                        "config page: pci_pool_alloc failed\n",
3614                        ioc->name);
3615                goto out;
3616        }
3617        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3618                "config page(0x%p): size(%d)\n",
3619                ioc->name, ioc->config_page, ioc->config_page_sz));
3620        dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
3621                ioc->name, (unsigned long long)ioc->config_page_dma));
3622        total_sz += ioc->config_page_sz;
3623
3624        pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
3625            ioc->name, total_sz/1024);
3626        pr_info(MPT3SAS_FMT
3627                "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
3628            ioc->name, ioc->shost->can_queue, facts->RequestCredit);
3629        pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
3630            ioc->name, ioc->shost->sg_tablesize);
3631        return 0;
3632
3633 out:
3634        return -ENOMEM;
3635}
3636
3637/**
3638 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
3639 * @ioc: Pointer to MPT_ADAPTER structure
3640 * @cooked: Request raw or cooked IOC state
3641 *
3642 * Returns all IOC Doorbell register bits if cooked==0, else just the
3643 * Doorbell bits in MPI_IOC_STATE_MASK.
3644 */
3645u32
3646mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
3647{
3648        u32 s, sc;
3649
3650        s = readl(&ioc->chip->Doorbell);
3651        sc = s & MPI2_IOC_STATE_MASK;
3652        return cooked ? sc : s;
3653}
3654
3655/**
3656 * _base_wait_on_iocstate - waiting on a particular ioc state
3657 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
3658 * @timeout: timeout in second
3659 *
3660 * Returns 0 for success, non-zero for failure.
3661 */
3662static int
3663_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
3664{
3665        u32 count, cntdn;
3666        u32 current_state;
3667
3668        count = 0;
3669        cntdn = 1000 * timeout;
3670        do {
3671                current_state = mpt3sas_base_get_iocstate(ioc, 1);
3672                if (current_state == ioc_state)
3673                        return 0;
3674                if (count && current_state == MPI2_IOC_STATE_FAULT)
3675                        break;
3676
3677                usleep_range(1000, 1500);
3678                count++;
3679        } while (--cntdn);
3680
3681        return current_state;
3682}
3683
3684/**
3685 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
3686 * a write to the doorbell)
3687 * @ioc: per adapter object
3688 * @timeout: timeout in second
3689 *
3690 * Returns 0 for success, non-zero for failure.
3691 *
3692 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3693 */
3694static int
3695_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3696
3697static int
3698_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
3699{
3700        u32 cntdn, count;
3701        u32 int_status;
3702
3703        count = 0;
3704        cntdn = 1000 * timeout;
3705        do {
3706                int_status = readl(&ioc->chip->HostInterruptStatus);
3707                if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3708                        dhsprintk(ioc, pr_info(MPT3SAS_FMT
3709                                "%s: successful count(%d), timeout(%d)\n",
3710                                ioc->name, __func__, count, timeout));
3711                        return 0;
3712                }
3713
3714                usleep_range(1000, 1500);
3715                count++;
3716        } while (--cntdn);
3717
3718        pr_err(MPT3SAS_FMT
3719                "%s: failed due to timeout count(%d), int_status(%x)!\n",
3720                ioc->name, __func__, count, int_status);
3721        return -EFAULT;
3722}
3723
3724static int
3725_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
3726{
3727        u32 cntdn, count;
3728        u32 int_status;
3729
3730        count = 0;
3731        cntdn = 2000 * timeout;
3732        do {
3733                int_status = readl(&ioc->chip->HostInterruptStatus);
3734                if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3735                        dhsprintk(ioc, pr_info(MPT3SAS_FMT
3736                                "%s: successful count(%d), timeout(%d)\n",
3737                                ioc->name, __func__, count, timeout));
3738                        return 0;
3739                }
3740
3741                udelay(500);
3742                count++;
3743        } while (--cntdn);
3744
3745        pr_err(MPT3SAS_FMT
3746                "%s: failed due to timeout count(%d), int_status(%x)!\n",
3747                ioc->name, __func__, count, int_status);
3748        return -EFAULT;
3749
3750}
3751
3752/**
3753 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3754 * @ioc: per adapter object
3755 * @timeout: timeout in second
3756 *
3757 * Returns 0 for success, non-zero for failure.
3758 *
3759 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3760 * doorbell.
3761 */
3762static int
3763_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
3764{
3765        u32 cntdn, count;
3766        u32 int_status;
3767        u32 doorbell;
3768
3769        count = 0;
3770        cntdn = 1000 * timeout;
3771        do {
3772                int_status = readl(&ioc->chip->HostInterruptStatus);
3773                if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3774                        dhsprintk(ioc, pr_info(MPT3SAS_FMT
3775                                "%s: successful count(%d), timeout(%d)\n",
3776                                ioc->name, __func__, count, timeout));
3777                        return 0;
3778                } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3779                        doorbell = readl(&ioc->chip->Doorbell);
3780                        if ((doorbell & MPI2_IOC_STATE_MASK) ==
3781                            MPI2_IOC_STATE_FAULT) {
3782                                mpt3sas_base_fault_info(ioc , doorbell);
3783                                return -EFAULT;
3784                        }
3785                } else if (int_status == 0xFFFFFFFF)
3786                        goto out;
3787
3788                usleep_range(1000, 1500);
3789                count++;
3790        } while (--cntdn);
3791
3792 out:
3793        pr_err(MPT3SAS_FMT
3794         "%s: failed due to timeout count(%d), int_status(%x)!\n",
3795         ioc->name, __func__, count, int_status);
3796        return -EFAULT;
3797}
3798
3799/**
3800 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3801 * @ioc: per adapter object
3802 * @timeout: timeout in second
3803 *
3804 * Returns 0 for success, non-zero for failure.
3805 *
3806 */
3807static int
3808_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
3809{
3810        u32 cntdn, count;
3811        u32 doorbell_reg;
3812
3813        count = 0;
3814        cntdn = 1000 * timeout;
3815        do {
3816                doorbell_reg = readl(&ioc->chip->Doorbell);
3817                if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3818                        dhsprintk(ioc, pr_info(MPT3SAS_FMT
3819                                "%s: successful count(%d), timeout(%d)\n",
3820                                ioc->name, __func__, count, timeout));
3821                        return 0;
3822                }
3823
3824                usleep_range(1000, 1500);
3825                count++;
3826        } while (--cntdn);
3827
3828        pr_err(MPT3SAS_FMT
3829                "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3830                ioc->name, __func__, count, doorbell_reg);
3831        return -EFAULT;
3832}
3833
3834/**
3835 * _base_send_ioc_reset - send doorbell reset
3836 * @ioc: per adapter object
3837 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3838 * @timeout: timeout in second
3839 *
3840 * Returns 0 for success, non-zero for failure.
3841 */
3842static int
3843_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
3844{
3845        u32 ioc_state;
3846        int r = 0;
3847
3848        if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3849                pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3850                    ioc->name, __func__);
3851                return -EFAULT;
3852        }
3853
3854        if (!(ioc->facts.IOCCapabilities &
3855           MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3856                return -EFAULT;
3857
3858        pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3859
3860        writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3861            &ioc->chip->Doorbell);
3862        if ((_base_wait_for_doorbell_ack(ioc, 15))) {
3863                r = -EFAULT;
3864                goto out;
3865        }
3866        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
3867        if (ioc_state) {
3868                pr_err(MPT3SAS_FMT
3869                        "%s: failed going to ready state (ioc_state=0x%x)\n",
3870                        ioc->name, __func__, ioc_state);
3871                r = -EFAULT;
3872                goto out;
3873        }
3874 out:
3875        pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3876            ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3877        return r;
3878}
3879
3880/**
3881 * _base_handshake_req_reply_wait - send request thru doorbell interface
3882 * @ioc: per adapter object
3883 * @request_bytes: request length
3884 * @request: pointer having request payload
3885 * @reply_bytes: reply length
3886 * @reply: pointer to reply payload
3887 * @timeout: timeout in second
3888 *
3889 * Returns 0 for success, non-zero for failure.
3890 */
3891static int
3892_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3893        u32 *request, int reply_bytes, u16 *reply, int timeout)
3894{
3895        MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3896        int i;
3897        u8 failed;
3898        __le32 *mfp;
3899
3900        /* make sure doorbell is not in use */
3901        if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3902                pr_err(MPT3SAS_FMT
3903                        "doorbell is in use (line=%d)\n",
3904                        ioc->name, __LINE__);
3905                return -EFAULT;
3906        }
3907
3908        /* clear pending doorbell interrupts from previous state changes */
3909        if (readl(&ioc->chip->HostInterruptStatus) &
3910            MPI2_HIS_IOC2SYS_DB_STATUS)
3911                writel(0, &ioc->chip->HostInterruptStatus);
3912
3913        /* send message to ioc */
3914        writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3915            ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3916            &ioc->chip->Doorbell);
3917
3918        if ((_base_spin_on_doorbell_int(ioc, 5))) {
3919                pr_err(MPT3SAS_FMT
3920                        "doorbell handshake int failed (line=%d)\n",
3921                        ioc->name, __LINE__);
3922                return -EFAULT;
3923        }
3924        writel(0, &ioc->chip->HostInterruptStatus);
3925
3926        if ((_base_wait_for_doorbell_ack(ioc, 5))) {
3927                pr_err(MPT3SAS_FMT
3928                        "doorbell handshake ack failed (line=%d)\n",
3929                        ioc->name, __LINE__);
3930                return -EFAULT;
3931        }
3932
3933        /* send message 32-bits at a time */
3934        for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3935                writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3936                if ((_base_wait_for_doorbell_ack(ioc, 5)))
3937                        failed = 1;
3938        }
3939
3940        if (failed) {
3941                pr_err(MPT3SAS_FMT
3942                        "doorbell handshake sending request failed (line=%d)\n",
3943                        ioc->name, __LINE__);
3944                return -EFAULT;
3945        }
3946
3947        /* now wait for the reply */
3948        if ((_base_wait_for_doorbell_int(ioc, timeout))) {
3949                pr_err(MPT3SAS_FMT
3950                        "doorbell handshake int failed (line=%d)\n",
3951                        ioc->name, __LINE__);
3952                return -EFAULT;
3953        }
3954
3955        /* read the first two 16-bits, it gives the total length of the reply */
3956        reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3957            & MPI2_DOORBELL_DATA_MASK);
3958        writel(0, &ioc->chip->HostInterruptStatus);
3959        if ((_base_wait_for_doorbell_int(ioc, 5))) {
3960                pr_err(MPT3SAS_FMT
3961                        "doorbell handshake int failed (line=%d)\n",
3962                        ioc->name, __LINE__);
3963                return -EFAULT;
3964        }
3965        reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3966            & MPI2_DOORBELL_DATA_MASK);
3967        writel(0, &ioc->chip->HostInterruptStatus);
3968
3969        for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3970                if ((_base_wait_for_doorbell_int(ioc, 5))) {
3971                        pr_err(MPT3SAS_FMT
3972                                "doorbell handshake int failed (line=%d)\n",
3973                                ioc->name, __LINE__);
3974                        return -EFAULT;
3975                }
3976                if (i >=  reply_bytes/2) /* overflow case */
3977                        readl(&ioc->chip->Doorbell);
3978                else
3979                        reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3980                            & MPI2_DOORBELL_DATA_MASK);
3981                writel(0, &ioc->chip->HostInterruptStatus);
3982        }
3983
3984        _base_wait_for_doorbell_int(ioc, 5);
3985        if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
3986                dhsprintk(ioc, pr_info(MPT3SAS_FMT
3987                        "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3988        }
3989        writel(0, &ioc->chip->HostInterruptStatus);
3990
3991        if (ioc->logging_level & MPT_DEBUG_INIT) {
3992                mfp = (__le32 *)reply;
3993                pr_info("\toffset:data\n");
3994                for (i = 0; i < reply_bytes/4; i++)
3995                        pr_info("\t[0x%02x]:%08x\n", i*4,
3996                            le32_to_cpu(mfp[i]));
3997        }
3998        return 0;
3999}
4000
4001/**
4002 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
4003 * @ioc: per adapter object
4004 * @mpi_reply: the reply payload from FW
4005 * @mpi_request: the request payload sent to FW
4006 *
4007 * The SAS IO Unit Control Request message allows the host to perform low-level
4008 * operations, such as resets on the PHYs of the IO Unit, also allows the host
4009 * to obtain the IOC assigned device handles for a device if it has other
4010 * identifying information about the device, in addition allows the host to
4011 * remove IOC resources associated with the device.
4012 *
4013 * Returns 0 for success, non-zero for failure.
4014 */
4015int
4016mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
4017        Mpi2SasIoUnitControlReply_t *mpi_reply,
4018        Mpi2SasIoUnitControlRequest_t *mpi_request)
4019{
4020        u16 smid;
4021        u32 ioc_state;
4022        bool issue_reset = false;
4023        int rc;
4024        void *request;
4025        u16 wait_state_count;
4026
4027        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4028            __func__));
4029
4030        mutex_lock(&ioc->base_cmds.mutex);
4031
4032        if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4033                pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4034                    ioc->name, __func__);
4035                rc = -EAGAIN;
4036                goto out;
4037        }
4038
4039        wait_state_count = 0;
4040        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4041        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4042                if (wait_state_count++ == 10) {
4043                        pr_err(MPT3SAS_FMT
4044                            "%s: failed due to ioc not operational\n",
4045                            ioc->name, __func__);
4046                        rc = -EFAULT;
4047                        goto out;
4048                }
4049                ssleep(1);
4050                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4051                pr_info(MPT3SAS_FMT
4052                        "%s: waiting for operational state(count=%d)\n",
4053                        ioc->name, __func__, wait_state_count);
4054        }
4055
4056        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4057        if (!smid) {
4058                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4059                    ioc->name, __func__);
4060                rc = -EAGAIN;
4061                goto out;
4062        }
4063
4064        rc = 0;
4065        ioc->base_cmds.status = MPT3_CMD_PENDING;
4066        request = mpt3sas_base_get_msg_frame(ioc, smid);
4067        ioc->base_cmds.smid = smid;
4068        memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
4069        if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4070            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
4071                ioc->ioc_link_reset_in_progress = 1;
4072        init_completion(&ioc->base_cmds.done);
4073        mpt3sas_base_put_smid_default(ioc, smid);
4074        wait_for_completion_timeout(&ioc->base_cmds.done,
4075            msecs_to_jiffies(10000));
4076        if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4077            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
4078            ioc->ioc_link_reset_in_progress)
4079                ioc->ioc_link_reset_in_progress = 0;
4080        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4081                pr_err(MPT3SAS_FMT "%s: timeout\n",
4082                    ioc->name, __func__);
4083                _debug_dump_mf(mpi_request,
4084                    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
4085                if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
4086                        issue_reset = true;
4087                goto issue_host_reset;
4088        }
4089        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4090                memcpy(mpi_reply, ioc->base_cmds.reply,
4091                    sizeof(Mpi2SasIoUnitControlReply_t));
4092        else
4093                memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
4094        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4095        goto out;
4096
4097 issue_host_reset:
4098        if (issue_reset)
4099                mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
4100        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4101        rc = -EFAULT;
4102 out:
4103        mutex_unlock(&ioc->base_cmds.mutex);
4104        return rc;
4105}
4106
4107/**
4108 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
4109 * @ioc: per adapter object
4110 * @mpi_reply: the reply payload from FW
4111 * @mpi_request: the request payload sent to FW
4112 *
4113 * The SCSI Enclosure Processor request message causes the IOC to
4114 * communicate with SES devices to control LED status signals.
4115 *
4116 * Returns 0 for success, non-zero for failure.
4117 */
4118int
4119mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4120        Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
4121{
4122        u16 smid;
4123        u32 ioc_state;
4124        bool issue_reset = false;
4125        int rc;
4126        void *request;
4127        u16 wait_state_count;
4128
4129        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4130            __func__));
4131
4132        mutex_lock(&ioc->base_cmds.mutex);
4133
4134        if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4135                pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4136                    ioc->name, __func__);
4137                rc = -EAGAIN;
4138                goto out;
4139        }
4140
4141        wait_state_count = 0;
4142        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4143        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4144                if (wait_state_count++ == 10) {
4145                        pr_err(MPT3SAS_FMT
4146                            "%s: failed due to ioc not operational\n",
4147                            ioc->name, __func__);
4148                        rc = -EFAULT;
4149                        goto out;
4150                }
4151                ssleep(1);
4152                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4153                pr_info(MPT3SAS_FMT
4154                        "%s: waiting for operational state(count=%d)\n",
4155                        ioc->name,
4156                    __func__, wait_state_count);
4157        }
4158
4159        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4160        if (!smid) {
4161                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4162                    ioc->name, __func__);
4163                rc = -EAGAIN;
4164                goto out;
4165        }
4166
4167        rc = 0;
4168        ioc->base_cmds.status = MPT3_CMD_PENDING;
4169        request = mpt3sas_base_get_msg_frame(ioc, smid);
4170        ioc->base_cmds.smid = smid;
4171        memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
4172        init_completion(&ioc->base_cmds.done);
4173        mpt3sas_base_put_smid_default(ioc, smid);
4174        wait_for_completion_timeout(&ioc->base_cmds.done,
4175            msecs_to_jiffies(10000));
4176        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4177                pr_err(MPT3SAS_FMT "%s: timeout\n",
4178                    ioc->name, __func__);
4179                _debug_dump_mf(mpi_request,
4180                    sizeof(Mpi2SepRequest_t)/4);
4181                if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
4182                        issue_reset = false;
4183                goto issue_host_reset;
4184        }
4185        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4186                memcpy(mpi_reply, ioc->base_cmds.reply,
4187                    sizeof(Mpi2SepReply_t));
4188        else
4189                memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
4190        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4191        goto out;
4192
4193 issue_host_reset:
4194        if (issue_reset)
4195                mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
4196        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4197        rc = -EFAULT;
4198 out:
4199        mutex_unlock(&ioc->base_cmds.mutex);
4200        return rc;
4201}
4202
4203/**
4204 * _base_get_port_facts - obtain port facts reply and save in ioc
4205 * @ioc: per adapter object
4206 *
4207 * Returns 0 for success, non-zero for failure.
4208 */
4209static int
4210_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
4211{
4212        Mpi2PortFactsRequest_t mpi_request;
4213        Mpi2PortFactsReply_t mpi_reply;
4214        struct mpt3sas_port_facts *pfacts;
4215        int mpi_reply_sz, mpi_request_sz, r;
4216
4217        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4218            __func__));
4219
4220        mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
4221        mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
4222        memset(&mpi_request, 0, mpi_request_sz);
4223        mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
4224        mpi_request.PortNumber = port;
4225        r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4226            (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
4227
4228        if (r != 0) {
4229                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4230                    ioc->name, __func__, r);
4231                return r;
4232        }
4233
4234        pfacts = &ioc->pfacts[port];
4235        memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
4236        pfacts->PortNumber = mpi_reply.PortNumber;
4237        pfacts->VP_ID = mpi_reply.VP_ID;
4238        pfacts->VF_ID = mpi_reply.VF_ID;
4239        pfacts->MaxPostedCmdBuffers =
4240            le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
4241
4242        return 0;
4243}
4244
4245/**
4246 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
4247 * @ioc: per adapter object
4248 * @timeout:
4249 *
4250 * Returns 0 for success, non-zero for failure.
4251 */
4252static int
4253_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
4254{
4255        u32 ioc_state;
4256        int rc;
4257
4258        dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
4259            __func__));
4260
4261        if (ioc->pci_error_recovery) {
4262                dfailprintk(ioc, printk(MPT3SAS_FMT
4263                    "%s: host in pci error recovery\n", ioc->name, __func__));
4264                return -EFAULT;
4265        }
4266
4267        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4268        dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4269            ioc->name, __func__, ioc_state));
4270
4271        if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
4272            (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4273                return 0;
4274
4275        if (ioc_state & MPI2_DOORBELL_USED) {
4276                dhsprintk(ioc, printk(MPT3SAS_FMT
4277                    "unexpected doorbell active!\n", ioc->name));
4278                goto issue_diag_reset;
4279        }
4280
4281        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4282                mpt3sas_base_fault_info(ioc, ioc_state &
4283                    MPI2_DOORBELL_DATA_MASK);
4284                goto issue_diag_reset;
4285        }
4286
4287        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
4288        if (ioc_state) {
4289                dfailprintk(ioc, printk(MPT3SAS_FMT
4290                    "%s: failed going to ready state (ioc_state=0x%x)\n",
4291                    ioc->name, __func__, ioc_state));
4292                return -EFAULT;
4293        }
4294
4295 issue_diag_reset:
4296        rc = _base_diag_reset(ioc);
4297        return rc;
4298}
4299
4300/**
4301 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
4302 * @ioc: per adapter object
4303 *
4304 * Returns 0 for success, non-zero for failure.
4305 */
4306static int
4307_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
4308{
4309        Mpi2IOCFactsRequest_t mpi_request;
4310        Mpi2IOCFactsReply_t mpi_reply;
4311        struct mpt3sas_facts *facts;
4312        int mpi_reply_sz, mpi_request_sz, r;
4313
4314        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4315            __func__));
4316
4317        r = _base_wait_for_iocstate(ioc, 10);
4318        if (r) {
4319                dfailprintk(ioc, printk(MPT3SAS_FMT
4320                    "%s: failed getting to correct state\n",
4321                    ioc->name, __func__));
4322                return r;
4323        }
4324        mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
4325        mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
4326        memset(&mpi_request, 0, mpi_request_sz);
4327        mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
4328        r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4329            (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
4330
4331        if (r != 0) {
4332                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4333                    ioc->name, __func__, r);
4334                return r;
4335        }
4336
4337        facts = &ioc->facts;
4338        memset(facts, 0, sizeof(struct mpt3sas_facts));
4339        facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
4340        facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
4341        facts->VP_ID = mpi_reply.VP_ID;
4342        facts->VF_ID = mpi_reply.VF_ID;
4343        facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
4344        facts->MaxChainDepth = mpi_reply.MaxChainDepth;
4345        facts->WhoInit = mpi_reply.WhoInit;
4346        facts->NumberOfPorts = mpi_reply.NumberOfPorts;
4347        facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
4348        facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
4349        facts->MaxReplyDescriptorPostQueueDepth =
4350            le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
4351        facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
4352        facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
4353        if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
4354                ioc->ir_firmware = 1;
4355        if ((facts->IOCCapabilities &
4356              MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
4357                ioc->rdpq_array_capable = 1;
4358        facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
4359        facts->IOCRequestFrameSize =
4360            le16_to_cpu(mpi_reply.IOCRequestFrameSize);
4361        if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4362                facts->IOCMaxChainSegmentSize =
4363                        le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
4364        }
4365        facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
4366        facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
4367        ioc->shost->max_id = -1;
4368        facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
4369        facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
4370        facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
4371        facts->HighPriorityCredit =
4372            le16_to_cpu(mpi_reply.HighPriorityCredit);
4373        facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
4374        facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
4375
4376        dinitprintk(ioc, pr_info(MPT3SAS_FMT
4377                "hba queue depth(%d), max chains per io(%d)\n",
4378                ioc->name, facts->RequestCredit,
4379            facts->MaxChainDepth));
4380        dinitprintk(ioc, pr_info(MPT3SAS_FMT
4381                "request frame size(%d), reply frame size(%d)\n", ioc->name,
4382            facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
4383        return 0;
4384}
4385
4386/**
4387 * _base_send_ioc_init - send ioc_init to firmware
4388 * @ioc: per adapter object
4389 *
4390 * Returns 0 for success, non-zero for failure.
4391 */
4392static int
4393_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
4394{
4395        Mpi2IOCInitRequest_t mpi_request;
4396        Mpi2IOCInitReply_t mpi_reply;
4397        int i, r = 0;
4398        ktime_t current_time;
4399        u16 ioc_status;
4400        u32 reply_post_free_array_sz = 0;
4401        Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
4402        dma_addr_t reply_post_free_array_dma;
4403
4404        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4405            __func__));
4406
4407        memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
4408        mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
4409        mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
4410        mpi_request.VF_ID = 0; /* TODO */
4411        mpi_request.VP_ID = 0;
4412        mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
4413        mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
4414
4415        if (_base_is_controller_msix_enabled(ioc))
4416                mpi_request.HostMSIxVectors = ioc->reply_queue_count;
4417        mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
4418        mpi_request.ReplyDescriptorPostQueueDepth =
4419            cpu_to_le16(ioc->reply_post_queue_depth);
4420        mpi_request.ReplyFreeQueueDepth =
4421            cpu_to_le16(ioc->reply_free_queue_depth);
4422
4423        mpi_request.SenseBufferAddressHigh =
4424            cpu_to_le32((u64)ioc->sense_dma >> 32);
4425        mpi_request.SystemReplyAddressHigh =
4426            cpu_to_le32((u64)ioc->reply_dma >> 32);
4427        mpi_request.SystemRequestFrameBaseAddress =
4428            cpu_to_le64((u64)ioc->request_dma);
4429        mpi_request.ReplyFreeQueueAddress =
4430            cpu_to_le64((u64)ioc->reply_free_dma);
4431
4432        if (ioc->rdpq_array_enable) {
4433                reply_post_free_array_sz = ioc->reply_queue_count *
4434                    sizeof(Mpi2IOCInitRDPQArrayEntry);
4435                reply_post_free_array = pci_alloc_consistent(ioc->pdev,
4436                        reply_post_free_array_sz, &reply_post_free_array_dma);
4437                if (!reply_post_free_array) {
4438                        pr_err(MPT3SAS_FMT
4439                        "reply_post_free_array: pci_alloc_consistent failed\n",
4440                        ioc->name);
4441                        r = -ENOMEM;
4442                        goto out;
4443                }
4444                memset(reply_post_free_array, 0, reply_post_free_array_sz);
4445                for (i = 0; i < ioc->reply_queue_count; i++)
4446                        reply_post_free_array[i].RDPQBaseAddress =
4447                            cpu_to_le64(
4448                                (u64)ioc->reply_post[i].reply_post_free_dma);
4449                mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
4450                mpi_request.ReplyDescriptorPostQueueAddress =
4451                    cpu_to_le64((u64)reply_post_free_array_dma);
4452        } else {
4453                mpi_request.ReplyDescriptorPostQueueAddress =
4454                    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
4455        }
4456
4457        /* This time stamp specifies number of milliseconds
4458         * since epoch ~ midnight January 1, 1970.
4459         */
4460        current_time = ktime_get_real();
4461        mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
4462
4463        if (ioc->logging_level & MPT_DEBUG_INIT) {
4464                __le32 *mfp;
4465                int i;
4466
4467                mfp = (__le32 *)&mpi_request;
4468                pr_info("\toffset:data\n");
4469                for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
4470                        pr_info("\t[0x%02x]:%08x\n", i*4,
4471                            le32_to_cpu(mfp[i]));
4472        }
4473
4474        r = _base_handshake_req_reply_wait(ioc,
4475            sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
4476            sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
4477
4478        if (r != 0) {
4479                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4480                    ioc->name, __func__, r);
4481                goto out;
4482        }
4483
4484        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
4485        if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
4486            mpi_reply.IOCLogInfo) {
4487                pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
4488                r = -EIO;
4489        }
4490
4491out:
4492        if (reply_post_free_array)
4493                pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
4494                                    reply_post_free_array,
4495                                    reply_post_free_array_dma);
4496        return r;
4497}
4498
4499/**
4500 * mpt3sas_port_enable_done - command completion routine for port enable
4501 * @ioc: per adapter object
4502 * @smid: system request message index
4503 * @msix_index: MSIX table index supplied by the OS
4504 * @reply: reply message frame(lower 32bit addr)
4505 *
4506 * Return 1 meaning mf should be freed from _base_interrupt
4507 *        0 means the mf is freed from this function.
4508 */
4509u8
4510mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4511        u32 reply)
4512{
4513        MPI2DefaultReply_t *mpi_reply;
4514        u16 ioc_status;
4515
4516        if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
4517                return 1;
4518
4519        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4520        if (!mpi_reply)
4521                return 1;
4522
4523        if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
4524                return 1;
4525
4526        ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
4527        ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
4528        ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
4529        memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
4530        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4531        if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
4532                ioc->port_enable_failed = 1;
4533
4534        if (ioc->is_driver_loading) {
4535                if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4536                        mpt3sas_port_enable_complete(ioc);
4537                        return 1;
4538                } else {
4539                        ioc->start_scan_failed = ioc_status;
4540                        ioc->start_scan = 0;
4541                        return 1;
4542                }
4543        }
4544        complete(&ioc->port_enable_cmds.done);
4545        return 1;
4546}
4547
4548/**
4549 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
4550 * @ioc: per adapter object
4551 *
4552 * Returns 0 for success, non-zero for failure.
4553 */
4554static int
4555_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
4556{
4557        Mpi2PortEnableRequest_t *mpi_request;
4558        Mpi2PortEnableReply_t *mpi_reply;
4559        int r = 0;
4560        u16 smid;
4561        u16 ioc_status;
4562
4563        pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4564
4565        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4566                pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4567                    ioc->name, __func__);
4568                return -EAGAIN;
4569        }
4570
4571        smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4572        if (!smid) {
4573                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4574                    ioc->name, __func__);
4575                return -EAGAIN;
4576        }
4577
4578        ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4579        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4580        ioc->port_enable_cmds.smid = smid;
4581        memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4582        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4583
4584        init_completion(&ioc->port_enable_cmds.done);
4585        mpt3sas_base_put_smid_default(ioc, smid);
4586        wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
4587        if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
4588                pr_err(MPT3SAS_FMT "%s: timeout\n",
4589                    ioc->name, __func__);
4590                _debug_dump_mf(mpi_request,
4591                    sizeof(Mpi2PortEnableRequest_t)/4);
4592                if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
4593                        r = -EFAULT;
4594                else
4595                        r = -ETIME;
4596                goto out;
4597        }
4598
4599        mpi_reply = ioc->port_enable_cmds.reply;
4600        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4601        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4602                pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
4603                    ioc->name, __func__, ioc_status);
4604                r = -EFAULT;
4605                goto out;
4606        }
4607
4608 out:
4609        ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4610        pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
4611            "SUCCESS" : "FAILED"));
4612        return r;
4613}
4614
4615/**
4616 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
4617 * @ioc: per adapter object
4618 *
4619 * Returns 0 for success, non-zero for failure.
4620 */
4621int
4622mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
4623{
4624        Mpi2PortEnableRequest_t *mpi_request;
4625        u16 smid;
4626
4627        pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4628
4629        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4630                pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4631                    ioc->name, __func__);
4632                return -EAGAIN;
4633        }
4634
4635        smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4636        if (!smid) {
4637                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4638                    ioc->name, __func__);
4639                return -EAGAIN;
4640        }
4641
4642        ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4643        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4644        ioc->port_enable_cmds.smid = smid;
4645        memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4646        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4647
4648        mpt3sas_base_put_smid_default(ioc, smid);
4649        return 0;
4650}
4651
4652/**
4653 * _base_determine_wait_on_discovery - desposition
4654 * @ioc: per adapter object
4655 *
4656 * Decide whether to wait on discovery to complete. Used to either
4657 * locate boot device, or report volumes ahead of physical devices.
4658 *
4659 * Returns 1 for wait, 0 for don't wait
4660 */
4661static int
4662_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
4663{
4664        /* We wait for discovery to complete if IR firmware is loaded.
4665         * The sas topology events arrive before PD events, so we need time to
4666         * turn on the bit in ioc->pd_handles to indicate PD
4667         * Also, it maybe required to report Volumes ahead of physical
4668         * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
4669         */
4670        if (ioc->ir_firmware)
4671                return 1;
4672
4673        /* if no Bios, then we don't need to wait */
4674        if (!ioc->bios_pg3.BiosVersion)
4675                return 0;
4676
4677        /* Bios is present, then we drop down here.
4678         *
4679         * If there any entries in the Bios Page 2, then we wait
4680         * for discovery to complete.
4681         */
4682
4683        /* Current Boot Device */
4684        if ((ioc->bios_pg2.CurrentBootDeviceForm &
4685            MPI2_BIOSPAGE2_FORM_MASK) ==
4686            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4687        /* Request Boot Device */
4688           (ioc->bios_pg2.ReqBootDeviceForm &
4689            MPI2_BIOSPAGE2_FORM_MASK) ==
4690            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4691        /* Alternate Request Boot Device */
4692           (ioc->bios_pg2.ReqAltBootDeviceForm &
4693            MPI2_BIOSPAGE2_FORM_MASK) ==
4694            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
4695                return 0;
4696
4697        return 1;
4698}
4699
4700/**
4701 * _base_unmask_events - turn on notification for this event
4702 * @ioc: per adapter object
4703 * @event: firmware event
4704 *
4705 * The mask is stored in ioc->event_masks.
4706 */
4707static void
4708_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
4709{
4710        u32 desired_event;
4711
4712        if (event >= 128)
4713                return;
4714
4715        desired_event = (1 << (event % 32));
4716
4717        if (event < 32)
4718                ioc->event_masks[0] &= ~desired_event;
4719        else if (event < 64)
4720                ioc->event_masks[1] &= ~desired_event;
4721        else if (event < 96)
4722                ioc->event_masks[2] &= ~desired_event;
4723        else if (event < 128)
4724                ioc->event_masks[3] &= ~desired_event;
4725}
4726
4727/**
4728 * _base_event_notification - send event notification
4729 * @ioc: per adapter object
4730 *
4731 * Returns 0 for success, non-zero for failure.
4732 */
4733static int
4734_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
4735{
4736        Mpi2EventNotificationRequest_t *mpi_request;
4737        u16 smid;
4738        int r = 0;
4739        int i;
4740
4741        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4742            __func__));
4743
4744        if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4745                pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4746                    ioc->name, __func__);
4747                return -EAGAIN;
4748        }
4749
4750        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4751        if (!smid) {
4752                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4753                    ioc->name, __func__);
4754                return -EAGAIN;
4755        }
4756        ioc->base_cmds.status = MPT3_CMD_PENDING;
4757        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4758        ioc->base_cmds.smid = smid;
4759        memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
4760        mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
4761        mpi_request->VF_ID = 0; /* TODO */
4762        mpi_request->VP_ID = 0;
4763        for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4764                mpi_request->EventMasks[i] =
4765                    cpu_to_le32(ioc->event_masks[i]);
4766        init_completion(&ioc->base_cmds.done);
4767        mpt3sas_base_put_smid_default(ioc, smid);
4768        wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
4769        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4770                pr_err(MPT3SAS_FMT "%s: timeout\n",
4771                    ioc->name, __func__);
4772                _debug_dump_mf(mpi_request,
4773                    sizeof(Mpi2EventNotificationRequest_t)/4);
4774                if (ioc->base_cmds.status & MPT3_CMD_RESET)
4775                        r = -EFAULT;
4776                else
4777                        r = -ETIME;
4778        } else
4779                dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
4780                    ioc->name, __func__));
4781        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4782        return r;
4783}
4784
4785/**
4786 * mpt3sas_base_validate_event_type - validating event types
4787 * @ioc: per adapter object
4788 * @event: firmware event
4789 *
4790 * This will turn on firmware event notification when application
4791 * ask for that event. We don't mask events that are already enabled.
4792 */
4793void
4794mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4795{
4796        int i, j;
4797        u32 event_mask, desired_event;
4798        u8 send_update_to_fw;
4799
4800        for (i = 0, send_update_to_fw = 0; i <
4801            MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4802                event_mask = ~event_type[i];
4803                desired_event = 1;
4804                for (j = 0; j < 32; j++) {
4805                        if (!(event_mask & desired_event) &&
4806                            (ioc->event_masks[i] & desired_event)) {
4807                                ioc->event_masks[i] &= ~desired_event;
4808                                send_update_to_fw = 1;
4809                        }
4810                        desired_event = (desired_event << 1);
4811                }
4812        }
4813
4814        if (!send_update_to_fw)
4815                return;
4816
4817        mutex_lock(&ioc->base_cmds.mutex);
4818        _base_event_notification(ioc);
4819        mutex_unlock(&ioc->base_cmds.mutex);
4820}
4821
4822/**
4823 * _base_diag_reset - the "big hammer" start of day reset
4824 * @ioc: per adapter object
4825 *
4826 * Returns 0 for success, non-zero for failure.
4827 */
4828static int
4829_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
4830{
4831        u32 host_diagnostic;
4832        u32 ioc_state;
4833        u32 count;
4834        u32 hcb_size;
4835
4836        pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4837
4838        drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4839            ioc->name));
4840
4841        count = 0;
4842        do {
4843                /* Write magic sequence to WriteSequence register
4844                 * Loop until in diagnostic mode
4845                 */
4846                drsprintk(ioc, pr_info(MPT3SAS_FMT
4847                        "write magic sequence\n", ioc->name));
4848                writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4849                writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4850                writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4851                writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4852                writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4853                writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4854                writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4855
4856                /* wait 100 msec */
4857                msleep(100);
4858
4859                if (count++ > 20)
4860                        goto out;
4861
4862                host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4863                drsprintk(ioc, pr_info(MPT3SAS_FMT
4864                        "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4865                    ioc->name, count, host_diagnostic));
4866
4867        } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4868
4869        hcb_size = readl(&ioc->chip->HCBSize);
4870
4871        drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4872            ioc->name));
4873        writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4874             &ioc->chip->HostDiagnostic);
4875
4876        /*This delay allows the chip PCIe hardware time to finish reset tasks*/
4877        msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4878
4879        /* Approximately 300 second max wait */
4880        for (count = 0; count < (300000000 /
4881                MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
4882
4883                host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4884
4885                if (host_diagnostic == 0xFFFFFFFF)
4886                        goto out;
4887                if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4888                        break;
4889
4890                msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
4891        }
4892
4893        if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4894
4895                drsprintk(ioc, pr_info(MPT3SAS_FMT
4896                "restart the adapter assuming the HCB Address points to good F/W\n",
4897                    ioc->name));
4898                host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4899                host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4900                writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4901
4902                drsprintk(ioc, pr_info(MPT3SAS_FMT
4903                    "re-enable the HCDW\n", ioc->name));
4904                writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4905                    &ioc->chip->HCBSize);
4906        }
4907
4908        drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
4909            ioc->name));
4910        writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4911            &ioc->chip->HostDiagnostic);
4912
4913        drsprintk(ioc, pr_info(MPT3SAS_FMT
4914                "disable writes to the diagnostic register\n", ioc->name));
4915        writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4916
4917        drsprintk(ioc, pr_info(MPT3SAS_FMT
4918                "Wait for FW to go to the READY state\n", ioc->name));
4919        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
4920        if (ioc_state) {
4921                pr_err(MPT3SAS_FMT
4922                        "%s: failed going to ready state (ioc_state=0x%x)\n",
4923                        ioc->name, __func__, ioc_state);
4924                goto out;
4925        }
4926
4927        pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
4928        return 0;
4929
4930 out:
4931        pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
4932        return -EFAULT;
4933}
4934
4935/**
4936 * _base_make_ioc_ready - put controller in READY state
4937 * @ioc: per adapter object
4938 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4939 *
4940 * Returns 0 for success, non-zero for failure.
4941 */
4942static int
4943_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
4944{
4945        u32 ioc_state;
4946        int rc;
4947        int count;
4948
4949        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4950            __func__));
4951
4952        if (ioc->pci_error_recovery)
4953                return 0;
4954
4955        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4956        dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4957            ioc->name, __func__, ioc_state));
4958
4959        /* if in RESET state, it should move to READY state shortly */
4960        count = 0;
4961        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
4962                while ((ioc_state & MPI2_IOC_STATE_MASK) !=
4963                    MPI2_IOC_STATE_READY) {
4964                        if (count++ == 10) {
4965                                pr_err(MPT3SAS_FMT
4966                                        "%s: failed going to ready state (ioc_state=0x%x)\n",
4967                                    ioc->name, __func__, ioc_state);
4968                                return -EFAULT;
4969                        }
4970                        ssleep(1);
4971                        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4972                }
4973        }
4974
4975        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4976                return 0;
4977
4978        if (ioc_state & MPI2_DOORBELL_USED) {
4979                dhsprintk(ioc, pr_info(MPT3SAS_FMT
4980                        "unexpected doorbell active!\n",
4981                        ioc->name));
4982                goto issue_diag_reset;
4983        }
4984
4985        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4986                mpt3sas_base_fault_info(ioc, ioc_state &
4987                    MPI2_DOORBELL_DATA_MASK);
4988                goto issue_diag_reset;
4989        }
4990
4991        if (type == FORCE_BIG_HAMMER)
4992                goto issue_diag_reset;
4993
4994        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4995                if (!(_base_send_ioc_reset(ioc,
4996                    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
4997                        return 0;
4998        }
4999
5000 issue_diag_reset:
5001        rc = _base_diag_reset(ioc);
5002        return rc;
5003}
5004
5005/**
5006 * _base_make_ioc_operational - put controller in OPERATIONAL state
5007 * @ioc: per adapter object
5008 *
5009 * Returns 0 for success, non-zero for failure.
5010 */
5011static int
5012_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
5013{
5014        int r, i, index;
5015        unsigned long   flags;
5016        u32 reply_address;
5017        u16 smid;
5018        struct _tr_list *delayed_tr, *delayed_tr_next;
5019        struct _sc_list *delayed_sc, *delayed_sc_next;
5020        struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
5021        u8 hide_flag;
5022        struct adapter_reply_queue *reply_q;
5023        Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
5024
5025        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5026            __func__));
5027
5028        /* clean the delayed target reset list */
5029        list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5030            &ioc->delayed_tr_list, list) {
5031                list_del(&delayed_tr->list);
5032                kfree(delayed_tr);
5033        }
5034
5035
5036        list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5037            &ioc->delayed_tr_volume_list, list) {
5038                list_del(&delayed_tr->list);
5039                kfree(delayed_tr);
5040        }
5041
5042        list_for_each_entry_safe(delayed_sc, delayed_sc_next,
5043            &ioc->delayed_sc_list, list) {
5044                list_del(&delayed_sc->list);
5045                kfree(delayed_sc);
5046        }
5047
5048        list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
5049            &ioc->delayed_event_ack_list, list) {
5050                list_del(&delayed_event_ack->list);
5051                kfree(delayed_event_ack);
5052        }
5053
5054        /* initialize the scsi lookup free list */
5055        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5056        INIT_LIST_HEAD(&ioc->free_list);
5057        smid = 1;
5058        for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
5059                INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
5060                ioc->scsi_lookup[i].cb_idx = 0xFF;
5061                ioc->scsi_lookup[i].smid = smid;
5062                ioc->scsi_lookup[i].scmd = NULL;
5063                ioc->scsi_lookup[i].direct_io = 0;
5064                list_add_tail(&ioc->scsi_lookup[i].tracker_list,
5065                    &ioc->free_list);
5066        }
5067
5068        /* hi-priority queue */
5069        INIT_LIST_HEAD(&ioc->hpr_free_list);
5070        smid = ioc->hi_priority_smid;
5071        for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
5072                ioc->hpr_lookup[i].cb_idx = 0xFF;
5073                ioc->hpr_lookup[i].smid = smid;
5074                list_add_tail(&ioc->hpr_lookup[i].tracker_list,
5075                    &ioc->hpr_free_list);
5076        }
5077
5078        /* internal queue */
5079        INIT_LIST_HEAD(&ioc->internal_free_list);
5080        smid = ioc->internal_smid;
5081        for (i = 0; i < ioc->internal_depth; i++, smid++) {
5082                ioc->internal_lookup[i].cb_idx = 0xFF;
5083                ioc->internal_lookup[i].smid = smid;
5084                list_add_tail(&ioc->internal_lookup[i].tracker_list,
5085                    &ioc->internal_free_list);
5086        }
5087
5088        /* chain pool */
5089        INIT_LIST_HEAD(&ioc->free_chain_list);
5090        for (i = 0; i < ioc->chain_depth; i++)
5091                list_add_tail(&ioc->chain_lookup[i].tracker_list,
5092                    &ioc->free_chain_list);
5093
5094        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5095
5096        /* initialize Reply Free Queue */
5097        for (i = 0, reply_address = (u32)ioc->reply_dma ;
5098            i < ioc->reply_free_queue_depth ; i++, reply_address +=
5099            ioc->reply_sz)
5100                ioc->reply_free[i] = cpu_to_le32(reply_address);
5101
5102        /* initialize reply queues */
5103        if (ioc->is_driver_loading)
5104                _base_assign_reply_queues(ioc);
5105
5106        /* initialize Reply Post Free Queue */
5107        index = 0;
5108        reply_post_free_contig = ioc->reply_post[0].reply_post_free;
5109        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5110                /*
5111                 * If RDPQ is enabled, switch to the next allocation.
5112                 * Otherwise advance within the contiguous region.
5113                 */
5114                if (ioc->rdpq_array_enable) {
5115                        reply_q->reply_post_free =
5116                                ioc->reply_post[index++].reply_post_free;
5117                } else {
5118                        reply_q->reply_post_free = reply_post_free_contig;
5119                        reply_post_free_contig += ioc->reply_post_queue_depth;
5120                }
5121
5122                reply_q->reply_post_host_index = 0;
5123                for (i = 0; i < ioc->reply_post_queue_depth; i++)
5124                        reply_q->reply_post_free[i].Words =
5125                            cpu_to_le64(ULLONG_MAX);
5126                if (!_base_is_controller_msix_enabled(ioc))
5127                        goto skip_init_reply_post_free_queue;
5128        }
5129 skip_init_reply_post_free_queue:
5130
5131        r = _base_send_ioc_init(ioc);
5132        if (r)
5133                return r;
5134
5135        /* initialize reply free host index */
5136        ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
5137        writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
5138
5139        /* initialize reply post host index */
5140        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5141                if (ioc->msix96_vector)
5142                        writel((reply_q->msix_index & 7)<<
5143                           MPI2_RPHI_MSIX_INDEX_SHIFT,
5144                           ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
5145                else
5146                        writel(reply_q->msix_index <<
5147                                MPI2_RPHI_MSIX_INDEX_SHIFT,
5148                                &ioc->chip->ReplyPostHostIndex);
5149
5150                if (!_base_is_controller_msix_enabled(ioc))
5151                        goto skip_init_reply_post_host_index;
5152        }
5153
5154 skip_init_reply_post_host_index:
5155
5156        _base_unmask_interrupts(ioc);
5157        r = _base_event_notification(ioc);
5158        if (r)
5159                return r;
5160
5161        _base_static_config_pages(ioc);
5162
5163        if (ioc->is_driver_loading) {
5164
5165                if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
5166                    == 0x80) {
5167                        hide_flag = (u8) (
5168                            le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
5169                            MFG_PAGE10_HIDE_SSDS_MASK);
5170                        if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
5171                                ioc->mfg_pg10_hide_flag = hide_flag;
5172                }
5173
5174                ioc->wait_for_discovery_to_complete =
5175                    _base_determine_wait_on_discovery(ioc);
5176
5177                return r; /* scan_start and scan_finished support */
5178        }
5179
5180        r = _base_send_port_enable(ioc);
5181        if (r)
5182                return r;
5183
5184        return r;
5185}
5186
5187/**
5188 * mpt3sas_base_free_resources - free resources controller resources
5189 * @ioc: per adapter object
5190 *
5191 * Return nothing.
5192 */
5193void
5194mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
5195{
5196        dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5197            __func__));
5198
5199        /* synchronizing freeing resource with pci_access_mutex lock */
5200        mutex_lock(&ioc->pci_access_mutex);
5201        if (ioc->chip_phys && ioc->chip) {
5202                _base_mask_interrupts(ioc);
5203                ioc->shost_recovery = 1;
5204                _base_make_ioc_ready(ioc, SOFT_RESET);
5205                ioc->shost_recovery = 0;
5206        }
5207
5208        mpt3sas_base_unmap_resources(ioc);
5209        mutex_unlock(&ioc->pci_access_mutex);
5210        return;
5211}
5212
5213/**
5214 * mpt3sas_base_attach - attach controller instance
5215 * @ioc: per adapter object
5216 *
5217 * Returns 0 for success, non-zero for failure.
5218 */
5219int
5220mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5221{
5222        int r, i;
5223        int cpu_id, last_cpu_id = 0;
5224
5225        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5226            __func__));
5227
5228        /* setup cpu_msix_table */
5229        ioc->cpu_count = num_online_cpus();
5230        for_each_online_cpu(cpu_id)
5231                last_cpu_id = cpu_id;
5232        ioc->cpu_msix_table_sz = last_cpu_id + 1;
5233        ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
5234        ioc->reply_queue_count = 1;
5235        if (!ioc->cpu_msix_table) {
5236                dfailprintk(ioc, pr_info(MPT3SAS_FMT
5237                        "allocation for cpu_msix_table failed!!!\n",
5238                        ioc->name));
5239                r = -ENOMEM;
5240                goto out_free_resources;
5241        }
5242
5243        if (ioc->is_warpdrive) {
5244                ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
5245                    sizeof(resource_size_t *), GFP_KERNEL);
5246                if (!ioc->reply_post_host_index) {
5247                        dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
5248                                "for cpu_msix_table failed!!!\n", ioc->name));
5249                        r = -ENOMEM;
5250                        goto out_free_resources;
5251                }
5252        }
5253
5254        ioc->rdpq_array_enable_assigned = 0;
5255        ioc->dma_mask = 0;
5256        r = mpt3sas_base_map_resources(ioc);
5257        if (r)
5258                goto out_free_resources;
5259
5260        pci_set_drvdata(ioc->pdev, ioc->shost);
5261        r = _base_get_ioc_facts(ioc);
5262        if (r)
5263                goto out_free_resources;
5264
5265        switch (ioc->hba_mpi_version_belonged) {
5266        case MPI2_VERSION:
5267                ioc->build_sg_scmd = &_base_build_sg_scmd;
5268                ioc->build_sg = &_base_build_sg;
5269                ioc->build_zero_len_sge = &_base_build_zero_len_sge;
5270                break;
5271        case MPI25_VERSION:
5272        case MPI26_VERSION:
5273                /*
5274                 * In SAS3.0,
5275                 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
5276                 * Target Status - all require the IEEE formated scatter gather
5277                 * elements.
5278                 */
5279                ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
5280                ioc->build_sg = &_base_build_sg_ieee;
5281                ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
5282                ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
5283                break;
5284        }
5285
5286        /*
5287         * These function pointers for other requests that don't
5288         * the require IEEE scatter gather elements.
5289         *
5290         * For example Configuration Pages and SAS IOUNIT Control don't.
5291         */
5292        ioc->build_sg_mpi = &_base_build_sg;
5293        ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
5294
5295        r = _base_make_ioc_ready(ioc, SOFT_RESET);
5296        if (r)
5297                goto out_free_resources;
5298
5299        ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
5300            sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
5301        if (!ioc->pfacts) {
5302                r = -ENOMEM;
5303                goto out_free_resources;
5304        }
5305
5306        for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
5307                r = _base_get_port_facts(ioc, i);
5308                if (r)
5309                        goto out_free_resources;
5310        }
5311
5312        r = _base_allocate_memory_pools(ioc);
5313        if (r)
5314                goto out_free_resources;
5315
5316        init_waitqueue_head(&ioc->reset_wq);
5317
5318        /* allocate memory pd handle bitmask list */
5319        ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
5320        if (ioc->facts.MaxDevHandle % 8)
5321                ioc->pd_handles_sz++;
5322        ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
5323            GFP_KERNEL);
5324        if (!ioc->pd_handles) {
5325                r = -ENOMEM;
5326                goto out_free_resources;
5327        }
5328        ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
5329            GFP_KERNEL);
5330        if (!ioc->blocking_handles) {
5331                r = -ENOMEM;
5332                goto out_free_resources;
5333        }
5334
5335        ioc->fwfault_debug = mpt3sas_fwfault_debug;
5336
5337        /* base internal command bits */
5338        mutex_init(&ioc->base_cmds.mutex);
5339        ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5340        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5341
5342        /* port_enable command bits */
5343        ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5344        ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5345
5346        /* transport internal command bits */
5347        ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5348        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
5349        mutex_init(&ioc->transport_cmds.mutex);
5350
5351        /* scsih internal command bits */
5352        ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5353        ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
5354        mutex_init(&ioc->scsih_cmds.mutex);
5355
5356        /* task management internal command bits */
5357        ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5358        ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
5359        mutex_init(&ioc->tm_cmds.mutex);
5360
5361        /* config page internal command bits */
5362        ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5363        ioc->config_cmds.status = MPT3_CMD_NOT_USED;
5364        mutex_init(&ioc->config_cmds.mutex);
5365
5366        /* ctl module internal command bits */
5367        ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5368        ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5369        ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
5370        mutex_init(&ioc->ctl_cmds.mutex);
5371
5372        if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
5373            !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
5374            !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
5375            !ioc->ctl_cmds.sense) {
5376                r = -ENOMEM;
5377                goto out_free_resources;
5378        }
5379
5380        for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5381                ioc->event_masks[i] = -1;
5382
5383        /* here we enable the events we care about */
5384        _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
5385        _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
5386        _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
5387        _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5388        _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
5389        _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
5390        _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
5391        _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
5392        _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
5393        _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
5394        _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
5395        if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
5396                _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
5397
5398        r = _base_make_ioc_operational(ioc);
5399        if (r)
5400                goto out_free_resources;
5401
5402        ioc->non_operational_loop = 0;
5403        return 0;
5404
5405 out_free_resources:
5406
5407        ioc->remove_host = 1;
5408
5409        mpt3sas_base_free_resources(ioc);
5410        _base_release_memory_pools(ioc);
5411        pci_set_drvdata(ioc->pdev, NULL);
5412        kfree(ioc->cpu_msix_table);
5413        if (ioc->is_warpdrive)
5414                kfree(ioc->reply_post_host_index);
5415        kfree(ioc->pd_handles);
5416        kfree(ioc->blocking_handles);
5417        kfree(ioc->tm_cmds.reply);
5418        kfree(ioc->transport_cmds.reply);
5419        kfree(ioc->scsih_cmds.reply);
5420        kfree(ioc->config_cmds.reply);
5421        kfree(ioc->base_cmds.reply);
5422        kfree(ioc->port_enable_cmds.reply);
5423        kfree(ioc->ctl_cmds.reply);
5424        kfree(ioc->ctl_cmds.sense);
5425        kfree(ioc->pfacts);
5426        ioc->ctl_cmds.reply = NULL;
5427        ioc->base_cmds.reply = NULL;
5428        ioc->tm_cmds.reply = NULL;
5429        ioc->scsih_cmds.reply = NULL;
5430        ioc->transport_cmds.reply = NULL;
5431        ioc->config_cmds.reply = NULL;
5432        ioc->pfacts = NULL;
5433        return r;
5434}
5435
5436
5437/**
5438 * mpt3sas_base_detach - remove controller instance
5439 * @ioc: per adapter object
5440 *
5441 * Return nothing.
5442 */
5443void
5444mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
5445{
5446        dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5447            __func__));
5448
5449        mpt3sas_base_stop_watchdog(ioc);
5450        mpt3sas_base_free_resources(ioc);
5451        _base_release_memory_pools(ioc);
5452        pci_set_drvdata(ioc->pdev, NULL);
5453        kfree(ioc->cpu_msix_table);
5454        if (ioc->is_warpdrive)
5455                kfree(ioc->reply_post_host_index);
5456        kfree(ioc->pd_handles);
5457        kfree(ioc->blocking_handles);
5458        kfree(ioc->pfacts);
5459        kfree(ioc->ctl_cmds.reply);
5460        kfree(ioc->ctl_cmds.sense);
5461        kfree(ioc->base_cmds.reply);
5462        kfree(ioc->port_enable_cmds.reply);
5463        kfree(ioc->tm_cmds.reply);
5464        kfree(ioc->transport_cmds.reply);
5465        kfree(ioc->scsih_cmds.reply);
5466        kfree(ioc->config_cmds.reply);
5467}
5468
5469/**
5470 * _base_reset_handler - reset callback handler (for base)
5471 * @ioc: per adapter object
5472 * @reset_phase: phase
5473 *
5474 * The handler for doing any required cleanup or initialization.
5475 *
5476 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
5477 * MPT3_IOC_DONE_RESET
5478 *
5479 * Return nothing.
5480 */
5481static void
5482_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
5483{
5484        mpt3sas_scsih_reset_handler(ioc, reset_phase);
5485        mpt3sas_ctl_reset_handler(ioc, reset_phase);
5486        switch (reset_phase) {
5487        case MPT3_IOC_PRE_RESET:
5488                dtmprintk(ioc, pr_info(MPT3SAS_FMT
5489                "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
5490                break;
5491        case MPT3_IOC_AFTER_RESET:
5492                dtmprintk(ioc, pr_info(MPT3SAS_FMT
5493                "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
5494                if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
5495                        ioc->transport_cmds.status |= MPT3_CMD_RESET;
5496                        mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
5497                        complete(&ioc->transport_cmds.done);
5498                }
5499                if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
5500                        ioc->base_cmds.status |= MPT3_CMD_RESET;
5501                        mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
5502                        complete(&ioc->base_cmds.done);
5503                }
5504                if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5505                        ioc->port_enable_failed = 1;
5506                        ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
5507                        mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
5508                        if (ioc->is_driver_loading) {
5509                                ioc->start_scan_failed =
5510                                    MPI2_IOCSTATUS_INTERNAL_ERROR;
5511                                ioc->start_scan = 0;
5512                                ioc->port_enable_cmds.status =
5513                                    MPT3_CMD_NOT_USED;
5514                        } else
5515                                complete(&ioc->port_enable_cmds.done);
5516                }
5517                if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
5518                        ioc->config_cmds.status |= MPT3_CMD_RESET;
5519                        mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
5520                        ioc->config_cmds.smid = USHRT_MAX;
5521                        complete(&ioc->config_cmds.done);
5522                }
5523                break;
5524        case MPT3_IOC_DONE_RESET:
5525                dtmprintk(ioc, pr_info(MPT3SAS_FMT
5526                        "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
5527                break;
5528        }
5529}
5530
5531/**
5532 * _wait_for_commands_to_complete - reset controller
5533 * @ioc: Pointer to MPT_ADAPTER structure
5534 *
5535 * This function waiting(3s) for all pending commands to complete
5536 * prior to putting controller in reset.
5537 */
5538static void
5539_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
5540{
5541        u32 ioc_state;
5542        unsigned long flags;
5543        u16 i;
5544
5545        ioc->pending_io_count = 0;
5546
5547        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5548        if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
5549                return;
5550
5551        /* pending command count */
5552        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5553        for (i = 0; i < ioc->scsiio_depth; i++)
5554                if (ioc->scsi_lookup[i].cb_idx != 0xFF)
5555                        ioc->pending_io_count++;
5556        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5557
5558        if (!ioc->pending_io_count)
5559                return;
5560
5561        /* wait for pending commands to complete */
5562        wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
5563}
5564
5565/**
5566 * mpt3sas_base_hard_reset_handler - reset controller
5567 * @ioc: Pointer to MPT_ADAPTER structure
5568 * @type: FORCE_BIG_HAMMER or SOFT_RESET
5569 *
5570 * Returns 0 for success, non-zero for failure.
5571 */
5572int
5573mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
5574        enum reset_type type)
5575{
5576        int r;
5577        unsigned long flags;
5578        u32 ioc_state;
5579        u8 is_fault = 0, is_trigger = 0;
5580
5581        dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
5582            __func__));
5583
5584        if (ioc->pci_error_recovery) {
5585                pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
5586                    ioc->name, __func__);
5587                r = 0;
5588                goto out_unlocked;
5589        }
5590
5591        if (mpt3sas_fwfault_debug)
5592                mpt3sas_halt_firmware(ioc);
5593
5594        /* wait for an active reset in progress to complete */
5595        if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
5596                do {
5597                        ssleep(1);
5598                } while (ioc->shost_recovery == 1);
5599                dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5600                    __func__));
5601                return ioc->ioc_reset_in_progress_status;
5602        }
5603
5604        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5605        ioc->shost_recovery = 1;
5606        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5607
5608        if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5609            MPT3_DIAG_BUFFER_IS_REGISTERED) &&
5610            (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5611            MPT3_DIAG_BUFFER_IS_RELEASED))) {
5612                is_trigger = 1;
5613                ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5614                if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
5615                        is_fault = 1;
5616        }
5617        _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
5618        _wait_for_commands_to_complete(ioc);
5619        _base_mask_interrupts(ioc);
5620        r = _base_make_ioc_ready(ioc, type);
5621        if (r)
5622                goto out;
5623        _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
5624
5625        /* If this hard reset is called while port enable is active, then
5626         * there is no reason to call make_ioc_operational
5627         */
5628        if (ioc->is_driver_loading && ioc->port_enable_failed) {
5629                ioc->remove_host = 1;
5630                r = -EFAULT;
5631                goto out;
5632        }
5633        r = _base_get_ioc_facts(ioc);
5634        if (r)
5635                goto out;
5636
5637        if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
5638                panic("%s: Issue occurred with flashing controller firmware."
5639                      "Please reboot the system and ensure that the correct"
5640                      " firmware version is running\n", ioc->name);
5641
5642        r = _base_make_ioc_operational(ioc);
5643        if (!r)
5644                _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
5645
5646 out:
5647        dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
5648            ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
5649
5650        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5651        ioc->ioc_reset_in_progress_status = r;
5652        ioc->shost_recovery = 0;
5653        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5654        ioc->ioc_reset_count++;
5655        mutex_unlock(&ioc->reset_in_progress_mutex);
5656
5657 out_unlocked:
5658        if ((r == 0) && is_trigger) {
5659                if (is_fault)
5660                        mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
5661                else
5662                        mpt3sas_trigger_master(ioc,
5663                            MASTER_TRIGGER_ADAPTER_RESET);
5664        }
5665        dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5666            __func__));
5667        return r;
5668}
5669