linux/drivers/scsi/mpt3sas/mpt3sas_base.c
<<
>>
Prefs
   1/*
   2 * This is the Fusion MPT base driver providing common API layer interface
   3 * for access to MPT (Message Passing Technology) firmware.
   4 *
   5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
   6 * Copyright (C) 2012-2014  LSI Corporation
   7 * Copyright (C) 2013-2014 Avago Technologies
   8 *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * as published by the Free Software Foundation; either version 2
  13 * of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * NO WARRANTY
  21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  25 * solely responsible for determining the appropriateness of using and
  26 * distributing the Program and assumes all risks associated with its
  27 * exercise of rights under this Agreement, including but not limited to
  28 * the risks and costs of program errors, damage to or loss of data,
  29 * programs or equipment, and unavailability or interruption of operations.
  30
  31 * DISCLAIMER OF LIABILITY
  32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  39
  40 * You should have received a copy of the GNU General Public License
  41 * along with this program; if not, write to the Free Software
  42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
  43 * USA.
  44 */
  45
  46#include <linux/kernel.h>
  47#include <linux/module.h>
  48#include <linux/errno.h>
  49#include <linux/init.h>
  50#include <linux/slab.h>
  51#include <linux/types.h>
  52#include <linux/pci.h>
  53#include <linux/kdev_t.h>
  54#include <linux/blkdev.h>
  55#include <linux/delay.h>
  56#include <linux/interrupt.h>
  57#include <linux/dma-mapping.h>
  58#include <linux/io.h>
  59#include <linux/time.h>
  60#include <linux/kthread.h>
  61#include <linux/aer.h>
  62
  63
  64#include "mpt3sas_base.h"
  65
  66static MPT_CALLBACK     mpt_callbacks[MPT_MAX_CALLBACKS];
  67
  68
  69#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
  70
  71 /* maximum controller queue depth */
  72#define MAX_HBA_QUEUE_DEPTH     30000
  73#define MAX_CHAIN_DEPTH         100000
  74static int max_queue_depth = -1;
  75module_param(max_queue_depth, int, 0);
  76MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
  77
  78static int max_sgl_entries = -1;
  79module_param(max_sgl_entries, int, 0);
  80MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
  81
  82static int msix_disable = -1;
  83module_param(msix_disable, int, 0);
  84MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
  85
  86static int smp_affinity_enable = 1;
  87module_param(smp_affinity_enable, int, S_IRUGO);
  88MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
  89
  90static int max_msix_vectors = -1;
  91module_param(max_msix_vectors, int, 0);
  92MODULE_PARM_DESC(max_msix_vectors,
  93        " max msix vectors");
  94
  95static int mpt3sas_fwfault_debug;
  96MODULE_PARM_DESC(mpt3sas_fwfault_debug,
  97        " enable detection of firmware fault and halt firmware - (default=0)");
  98
  99static int
 100_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
 101
 102/**
 103 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
 104 *
 105 */
 106static int
 107_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
 108{
 109        int ret = param_set_int(val, kp);
 110        struct MPT3SAS_ADAPTER *ioc;
 111
 112        if (ret)
 113                return ret;
 114
 115        /* global ioc spinlock to protect controller list on list operations */
 116        pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
 117        spin_lock(&gioc_lock);
 118        list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
 119                ioc->fwfault_debug = mpt3sas_fwfault_debug;
 120        spin_unlock(&gioc_lock);
 121        return 0;
 122}
 123module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
 124        param_get_int, &mpt3sas_fwfault_debug, 0644);
 125
 126/**
 127 *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
 128 * @arg: input argument, used to derive ioc
 129 *
 130 * Return 0 if controller is removed from pci subsystem.
 131 * Return -1 for other case.
 132 */
 133static int mpt3sas_remove_dead_ioc_func(void *arg)
 134{
 135        struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
 136        struct pci_dev *pdev;
 137
 138        if ((ioc == NULL))
 139                return -1;
 140
 141        pdev = ioc->pdev;
 142        if ((pdev == NULL))
 143                return -1;
 144        pci_stop_and_remove_bus_device_locked(pdev);
 145        return 0;
 146}
 147
 148/**
 149 * _base_fault_reset_work - workq handling ioc fault conditions
 150 * @work: input argument, used to derive ioc
 151 * Context: sleep.
 152 *
 153 * Return nothing.
 154 */
 155static void
 156_base_fault_reset_work(struct work_struct *work)
 157{
 158        struct MPT3SAS_ADAPTER *ioc =
 159            container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
 160        unsigned long    flags;
 161        u32 doorbell;
 162        int rc;
 163        struct task_struct *p;
 164
 165
 166        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 167        if (ioc->shost_recovery || ioc->pci_error_recovery)
 168                goto rearm_timer;
 169        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 170
 171        doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 172        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
 173                pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
 174                    ioc->name);
 175
 176                /* It may be possible that EEH recovery can resolve some of
 177                 * pci bus failure issues rather removing the dead ioc function
 178                 * by considering controller is in a non-operational state. So
 179                 * here priority is given to the EEH recovery. If it doesn't
 180                 * not resolve this issue, mpt3sas driver will consider this
 181                 * controller to non-operational state and remove the dead ioc
 182                 * function.
 183                 */
 184                if (ioc->non_operational_loop++ < 5) {
 185                        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
 186                                                         flags);
 187                        goto rearm_timer;
 188                }
 189
 190                /*
 191                 * Call _scsih_flush_pending_cmds callback so that we flush all
 192                 * pending commands back to OS. This call is required to aovid
 193                 * deadlock at block layer. Dead IOC will fail to do diag reset,
 194                 * and this call is safe since dead ioc will never return any
 195                 * command back from HW.
 196                 */
 197                ioc->schedule_dead_ioc_flush_running_cmds(ioc);
 198                /*
 199                 * Set remove_host flag early since kernel thread will
 200                 * take some time to execute.
 201                 */
 202                ioc->remove_host = 1;
 203                /*Remove the Dead Host */
 204                p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
 205                    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
 206                if (IS_ERR(p))
 207                        pr_err(MPT3SAS_FMT
 208                        "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
 209                        ioc->name, __func__);
 210                else
 211                        pr_err(MPT3SAS_FMT
 212                        "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
 213                        ioc->name, __func__);
 214                return; /* don't rearm timer */
 215        }
 216
 217        ioc->non_operational_loop = 0;
 218
 219        if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
 220                rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
 221                    FORCE_BIG_HAMMER);
 222                pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
 223                    __func__, (rc == 0) ? "success" : "failed");
 224                doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 225                if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 226                        mpt3sas_base_fault_info(ioc, doorbell &
 227                            MPI2_DOORBELL_DATA_MASK);
 228                if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
 229                    MPI2_IOC_STATE_OPERATIONAL)
 230                        return; /* don't rearm timer */
 231        }
 232
 233        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 234 rearm_timer:
 235        if (ioc->fault_reset_work_q)
 236                queue_delayed_work(ioc->fault_reset_work_q,
 237                    &ioc->fault_reset_work,
 238                    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
 239        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 240}
 241
 242/**
 243 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
 244 * @ioc: per adapter object
 245 * Context: sleep.
 246 *
 247 * Return nothing.
 248 */
 249void
 250mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
 251{
 252        unsigned long    flags;
 253
 254        if (ioc->fault_reset_work_q)
 255                return;
 256
 257        /* initialize fault polling */
 258
 259        INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
 260        snprintf(ioc->fault_reset_work_q_name,
 261            sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
 262            ioc->driver_name, ioc->id);
 263        ioc->fault_reset_work_q =
 264                create_singlethread_workqueue(ioc->fault_reset_work_q_name);
 265        if (!ioc->fault_reset_work_q) {
 266                pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
 267                    ioc->name, __func__, __LINE__);
 268                        return;
 269        }
 270        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 271        if (ioc->fault_reset_work_q)
 272                queue_delayed_work(ioc->fault_reset_work_q,
 273                    &ioc->fault_reset_work,
 274                    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
 275        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 276}
 277
 278/**
 279 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
 280 * @ioc: per adapter object
 281 * Context: sleep.
 282 *
 283 * Return nothing.
 284 */
 285void
 286mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
 287{
 288        unsigned long flags;
 289        struct workqueue_struct *wq;
 290
 291        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 292        wq = ioc->fault_reset_work_q;
 293        ioc->fault_reset_work_q = NULL;
 294        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 295        if (wq) {
 296                if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
 297                        flush_workqueue(wq);
 298                destroy_workqueue(wq);
 299        }
 300}
 301
 302/**
 303 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
 304 * @ioc: per adapter object
 305 * @fault_code: fault code
 306 *
 307 * Return nothing.
 308 */
 309void
 310mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
 311{
 312        pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
 313            ioc->name, fault_code);
 314}
 315
 316/**
 317 * mpt3sas_halt_firmware - halt's mpt controller firmware
 318 * @ioc: per adapter object
 319 *
 320 * For debugging timeout related issues.  Writing 0xCOFFEE00
 321 * to the doorbell register will halt controller firmware. With
 322 * the purpose to stop both driver and firmware, the enduser can
 323 * obtain a ring buffer from controller UART.
 324 */
 325void
 326mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
 327{
 328        u32 doorbell;
 329
 330        if (!ioc->fwfault_debug)
 331                return;
 332
 333        dump_stack();
 334
 335        doorbell = readl(&ioc->chip->Doorbell);
 336        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 337                mpt3sas_base_fault_info(ioc , doorbell);
 338        else {
 339                writel(0xC0FFEE00, &ioc->chip->Doorbell);
 340                pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
 341                        ioc->name);
 342        }
 343
 344        if (ioc->fwfault_debug == 2)
 345                for (;;)
 346                        ;
 347        else
 348                panic("panic in %s\n", __func__);
 349}
 350
 351/**
 352 * _base_sas_ioc_info - verbose translation of the ioc status
 353 * @ioc: per adapter object
 354 * @mpi_reply: reply mf payload returned from firmware
 355 * @request_hdr: request mf
 356 *
 357 * Return nothing.
 358 */
 359static void
 360_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
 361        MPI2RequestHeader_t *request_hdr)
 362{
 363        u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
 364            MPI2_IOCSTATUS_MASK;
 365        char *desc = NULL;
 366        u16 frame_sz;
 367        char *func_str = NULL;
 368
 369        /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
 370        if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 371            request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
 372            request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
 373                return;
 374
 375        if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 376                return;
 377
 378        switch (ioc_status) {
 379
 380/****************************************************************************
 381*  Common IOCStatus values for all replies
 382****************************************************************************/
 383
 384        case MPI2_IOCSTATUS_INVALID_FUNCTION:
 385                desc = "invalid function";
 386                break;
 387        case MPI2_IOCSTATUS_BUSY:
 388                desc = "busy";
 389                break;
 390        case MPI2_IOCSTATUS_INVALID_SGL:
 391                desc = "invalid sgl";
 392                break;
 393        case MPI2_IOCSTATUS_INTERNAL_ERROR:
 394                desc = "internal error";
 395                break;
 396        case MPI2_IOCSTATUS_INVALID_VPID:
 397                desc = "invalid vpid";
 398                break;
 399        case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
 400                desc = "insufficient resources";
 401                break;
 402        case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
 403                desc = "insufficient power";
 404                break;
 405        case MPI2_IOCSTATUS_INVALID_FIELD:
 406                desc = "invalid field";
 407                break;
 408        case MPI2_IOCSTATUS_INVALID_STATE:
 409                desc = "invalid state";
 410                break;
 411        case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
 412                desc = "op state not supported";
 413                break;
 414
 415/****************************************************************************
 416*  Config IOCStatus values
 417****************************************************************************/
 418
 419        case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
 420                desc = "config invalid action";
 421                break;
 422        case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
 423                desc = "config invalid type";
 424                break;
 425        case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
 426                desc = "config invalid page";
 427                break;
 428        case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
 429                desc = "config invalid data";
 430                break;
 431        case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
 432                desc = "config no defaults";
 433                break;
 434        case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
 435                desc = "config cant commit";
 436                break;
 437
 438/****************************************************************************
 439*  SCSI IO Reply
 440****************************************************************************/
 441
 442        case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 443        case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 444        case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 445        case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 446        case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 447        case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 448        case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 449        case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 450        case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 451        case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 452        case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 453        case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 454                break;
 455
 456/****************************************************************************
 457*  For use by SCSI Initiator and SCSI Target end-to-end data protection
 458****************************************************************************/
 459
 460        case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
 461                desc = "eedp guard error";
 462                break;
 463        case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
 464                desc = "eedp ref tag error";
 465                break;
 466        case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
 467                desc = "eedp app tag error";
 468                break;
 469
 470/****************************************************************************
 471*  SCSI Target values
 472****************************************************************************/
 473
 474        case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
 475                desc = "target invalid io index";
 476                break;
 477        case MPI2_IOCSTATUS_TARGET_ABORTED:
 478                desc = "target aborted";
 479                break;
 480        case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
 481                desc = "target no conn retryable";
 482                break;
 483        case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
 484                desc = "target no connection";
 485                break;
 486        case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
 487                desc = "target xfer count mismatch";
 488                break;
 489        case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
 490                desc = "target data offset error";
 491                break;
 492        case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
 493                desc = "target too much write data";
 494                break;
 495        case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
 496                desc = "target iu too short";
 497                break;
 498        case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
 499                desc = "target ack nak timeout";
 500                break;
 501        case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
 502                desc = "target nak received";
 503                break;
 504
 505/****************************************************************************
 506*  Serial Attached SCSI values
 507****************************************************************************/
 508
 509        case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
 510                desc = "smp request failed";
 511                break;
 512        case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
 513                desc = "smp data overrun";
 514                break;
 515
 516/****************************************************************************
 517*  Diagnostic Buffer Post / Diagnostic Release values
 518****************************************************************************/
 519
 520        case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
 521                desc = "diagnostic released";
 522                break;
 523        default:
 524                break;
 525        }
 526
 527        if (!desc)
 528                return;
 529
 530        switch (request_hdr->Function) {
 531        case MPI2_FUNCTION_CONFIG:
 532                frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
 533                func_str = "config_page";
 534                break;
 535        case MPI2_FUNCTION_SCSI_TASK_MGMT:
 536                frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
 537                func_str = "task_mgmt";
 538                break;
 539        case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
 540                frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
 541                func_str = "sas_iounit_ctl";
 542                break;
 543        case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
 544                frame_sz = sizeof(Mpi2SepRequest_t);
 545                func_str = "enclosure";
 546                break;
 547        case MPI2_FUNCTION_IOC_INIT:
 548                frame_sz = sizeof(Mpi2IOCInitRequest_t);
 549                func_str = "ioc_init";
 550                break;
 551        case MPI2_FUNCTION_PORT_ENABLE:
 552                frame_sz = sizeof(Mpi2PortEnableRequest_t);
 553                func_str = "port_enable";
 554                break;
 555        case MPI2_FUNCTION_SMP_PASSTHROUGH:
 556                frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
 557                func_str = "smp_passthru";
 558                break;
 559        default:
 560                frame_sz = 32;
 561                func_str = "unknown";
 562                break;
 563        }
 564
 565        pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
 566                ioc->name, desc, ioc_status, request_hdr, func_str);
 567
 568        _debug_dump_mf(request_hdr, frame_sz/4);
 569}
 570
 571/**
 572 * _base_display_event_data - verbose translation of firmware asyn events
 573 * @ioc: per adapter object
 574 * @mpi_reply: reply mf payload returned from firmware
 575 *
 576 * Return nothing.
 577 */
 578static void
 579_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
 580        Mpi2EventNotificationReply_t *mpi_reply)
 581{
 582        char *desc = NULL;
 583        u16 event;
 584
 585        if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
 586                return;
 587
 588        event = le16_to_cpu(mpi_reply->Event);
 589
 590        switch (event) {
 591        case MPI2_EVENT_LOG_DATA:
 592                desc = "Log Data";
 593                break;
 594        case MPI2_EVENT_STATE_CHANGE:
 595                desc = "Status Change";
 596                break;
 597        case MPI2_EVENT_HARD_RESET_RECEIVED:
 598                desc = "Hard Reset Received";
 599                break;
 600        case MPI2_EVENT_EVENT_CHANGE:
 601                desc = "Event Change";
 602                break;
 603        case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
 604                desc = "Device Status Change";
 605                break;
 606        case MPI2_EVENT_IR_OPERATION_STATUS:
 607                if (!ioc->hide_ir_msg)
 608                        desc = "IR Operation Status";
 609                break;
 610        case MPI2_EVENT_SAS_DISCOVERY:
 611        {
 612                Mpi2EventDataSasDiscovery_t *event_data =
 613                    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
 614                pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
 615                    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
 616                    "start" : "stop");
 617                if (event_data->DiscoveryStatus)
 618                        pr_info("discovery_status(0x%08x)",
 619                            le32_to_cpu(event_data->DiscoveryStatus));
 620                        pr_info("\n");
 621                return;
 622        }
 623        case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
 624                desc = "SAS Broadcast Primitive";
 625                break;
 626        case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
 627                desc = "SAS Init Device Status Change";
 628                break;
 629        case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
 630                desc = "SAS Init Table Overflow";
 631                break;
 632        case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
 633                desc = "SAS Topology Change List";
 634                break;
 635        case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
 636                desc = "SAS Enclosure Device Status Change";
 637                break;
 638        case MPI2_EVENT_IR_VOLUME:
 639                if (!ioc->hide_ir_msg)
 640                        desc = "IR Volume";
 641                break;
 642        case MPI2_EVENT_IR_PHYSICAL_DISK:
 643                if (!ioc->hide_ir_msg)
 644                        desc = "IR Physical Disk";
 645                break;
 646        case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
 647                if (!ioc->hide_ir_msg)
 648                        desc = "IR Configuration Change List";
 649                break;
 650        case MPI2_EVENT_LOG_ENTRY_ADDED:
 651                if (!ioc->hide_ir_msg)
 652                        desc = "Log Entry Added";
 653                break;
 654        case MPI2_EVENT_TEMP_THRESHOLD:
 655                desc = "Temperature Threshold";
 656                break;
 657        }
 658
 659        if (!desc)
 660                return;
 661
 662        pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
 663}
 664
 665/**
 666 * _base_sas_log_info - verbose translation of firmware log info
 667 * @ioc: per adapter object
 668 * @log_info: log info
 669 *
 670 * Return nothing.
 671 */
 672static void
 673_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
 674{
 675        union loginfo_type {
 676                u32     loginfo;
 677                struct {
 678                        u32     subcode:16;
 679                        u32     code:8;
 680                        u32     originator:4;
 681                        u32     bus_type:4;
 682                } dw;
 683        };
 684        union loginfo_type sas_loginfo;
 685        char *originator_str = NULL;
 686
 687        sas_loginfo.loginfo = log_info;
 688        if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
 689                return;
 690
 691        /* each nexus loss loginfo */
 692        if (log_info == 0x31170000)
 693                return;
 694
 695        /* eat the loginfos associated with task aborts */
 696        if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
 697            0x31140000 || log_info == 0x31130000))
 698                return;
 699
 700        switch (sas_loginfo.dw.originator) {
 701        case 0:
 702                originator_str = "IOP";
 703                break;
 704        case 1:
 705                originator_str = "PL";
 706                break;
 707        case 2:
 708                if (!ioc->hide_ir_msg)
 709                        originator_str = "IR";
 710                else
 711                        originator_str = "WarpDrive";
 712                break;
 713        }
 714
 715        pr_warn(MPT3SAS_FMT
 716                "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
 717                ioc->name, log_info,
 718             originator_str, sas_loginfo.dw.code,
 719             sas_loginfo.dw.subcode);
 720}
 721
 722/**
 723 * _base_display_reply_info -
 724 * @ioc: per adapter object
 725 * @smid: system request message index
 726 * @msix_index: MSIX table index supplied by the OS
 727 * @reply: reply message frame(lower 32bit addr)
 728 *
 729 * Return nothing.
 730 */
 731static void
 732_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 733        u32 reply)
 734{
 735        MPI2DefaultReply_t *mpi_reply;
 736        u16 ioc_status;
 737        u32 loginfo = 0;
 738
 739        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 740        if (unlikely(!mpi_reply)) {
 741                pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
 742                    ioc->name, __FILE__, __LINE__, __func__);
 743                return;
 744        }
 745        ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
 746
 747        if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
 748            (ioc->logging_level & MPT_DEBUG_REPLY)) {
 749                _base_sas_ioc_info(ioc , mpi_reply,
 750                   mpt3sas_base_get_msg_frame(ioc, smid));
 751        }
 752
 753        if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
 754                loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
 755                _base_sas_log_info(ioc, loginfo);
 756        }
 757
 758        if (ioc_status || loginfo) {
 759                ioc_status &= MPI2_IOCSTATUS_MASK;
 760                mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
 761        }
 762}
 763
 764/**
 765 * mpt3sas_base_done - base internal command completion routine
 766 * @ioc: per adapter object
 767 * @smid: system request message index
 768 * @msix_index: MSIX table index supplied by the OS
 769 * @reply: reply message frame(lower 32bit addr)
 770 *
 771 * Return 1 meaning mf should be freed from _base_interrupt
 772 *        0 means the mf is freed from this function.
 773 */
 774u8
 775mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 776        u32 reply)
 777{
 778        MPI2DefaultReply_t *mpi_reply;
 779
 780        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 781        if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
 782                return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
 783
 784        if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
 785                return 1;
 786
 787        ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
 788        if (mpi_reply) {
 789                ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
 790                memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
 791        }
 792        ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
 793
 794        complete(&ioc->base_cmds.done);
 795        return 1;
 796}
 797
 798/**
 799 * _base_async_event - main callback handler for firmware asyn events
 800 * @ioc: per adapter object
 801 * @msix_index: MSIX table index supplied by the OS
 802 * @reply: reply message frame(lower 32bit addr)
 803 *
 804 * Return 1 meaning mf should be freed from _base_interrupt
 805 *        0 means the mf is freed from this function.
 806 */
 807static u8
 808_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
 809{
 810        Mpi2EventNotificationReply_t *mpi_reply;
 811        Mpi2EventAckRequest_t *ack_request;
 812        u16 smid;
 813        struct _event_ack_list *delayed_event_ack;
 814
 815        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 816        if (!mpi_reply)
 817                return 1;
 818        if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
 819                return 1;
 820
 821        _base_display_event_data(ioc, mpi_reply);
 822
 823        if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
 824                goto out;
 825        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 826        if (!smid) {
 827                delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
 828                                        GFP_ATOMIC);
 829                if (!delayed_event_ack)
 830                        goto out;
 831                INIT_LIST_HEAD(&delayed_event_ack->list);
 832                delayed_event_ack->Event = mpi_reply->Event;
 833                delayed_event_ack->EventContext = mpi_reply->EventContext;
 834                list_add_tail(&delayed_event_ack->list,
 835                                &ioc->delayed_event_ack_list);
 836                dewtprintk(ioc, pr_info(MPT3SAS_FMT
 837                                "DELAYED: EVENT ACK: event (0x%04x)\n",
 838                                ioc->name, le16_to_cpu(mpi_reply->Event)));
 839                goto out;
 840        }
 841
 842        ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
 843        memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
 844        ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
 845        ack_request->Event = mpi_reply->Event;
 846        ack_request->EventContext = mpi_reply->EventContext;
 847        ack_request->VF_ID = 0;  /* TODO */
 848        ack_request->VP_ID = 0;
 849        mpt3sas_base_put_smid_default(ioc, smid);
 850
 851 out:
 852
 853        /* scsih callback handler */
 854        mpt3sas_scsih_event_callback(ioc, msix_index, reply);
 855
 856        /* ctl callback handler */
 857        mpt3sas_ctl_event_callback(ioc, msix_index, reply);
 858
 859        return 1;
 860}
 861
 862/**
 863 * _base_get_cb_idx - obtain the callback index
 864 * @ioc: per adapter object
 865 * @smid: system request message index
 866 *
 867 * Return callback index.
 868 */
 869static u8
 870_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 871{
 872        int i;
 873        u8 cb_idx;
 874
 875        if (smid < ioc->hi_priority_smid) {
 876                i = smid - 1;
 877                cb_idx = ioc->scsi_lookup[i].cb_idx;
 878        } else if (smid < ioc->internal_smid) {
 879                i = smid - ioc->hi_priority_smid;
 880                cb_idx = ioc->hpr_lookup[i].cb_idx;
 881        } else if (smid <= ioc->hba_queue_depth) {
 882                i = smid - ioc->internal_smid;
 883                cb_idx = ioc->internal_lookup[i].cb_idx;
 884        } else
 885                cb_idx = 0xFF;
 886        return cb_idx;
 887}
 888
 889/**
 890 * _base_mask_interrupts - disable interrupts
 891 * @ioc: per adapter object
 892 *
 893 * Disabling ResetIRQ, Reply and Doorbell Interrupts
 894 *
 895 * Return nothing.
 896 */
 897static void
 898_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
 899{
 900        u32 him_register;
 901
 902        ioc->mask_interrupts = 1;
 903        him_register = readl(&ioc->chip->HostInterruptMask);
 904        him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
 905        writel(him_register, &ioc->chip->HostInterruptMask);
 906        readl(&ioc->chip->HostInterruptMask);
 907}
 908
 909/**
 910 * _base_unmask_interrupts - enable interrupts
 911 * @ioc: per adapter object
 912 *
 913 * Enabling only Reply Interrupts
 914 *
 915 * Return nothing.
 916 */
 917static void
 918_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
 919{
 920        u32 him_register;
 921
 922        him_register = readl(&ioc->chip->HostInterruptMask);
 923        him_register &= ~MPI2_HIM_RIM;
 924        writel(him_register, &ioc->chip->HostInterruptMask);
 925        ioc->mask_interrupts = 0;
 926}
 927
 928union reply_descriptor {
 929        u64 word;
 930        struct {
 931                u32 low;
 932                u32 high;
 933        } u;
 934};
 935
 936/**
 937 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
 938 * @irq: irq number (not used)
 939 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
 940 * @r: pt_regs pointer (not used)
 941 *
 942 * Return IRQ_HANDLE if processed, else IRQ_NONE.
 943 */
 944static irqreturn_t
 945_base_interrupt(int irq, void *bus_id)
 946{
 947        struct adapter_reply_queue *reply_q = bus_id;
 948        union reply_descriptor rd;
 949        u32 completed_cmds;
 950        u8 request_desript_type;
 951        u16 smid;
 952        u8 cb_idx;
 953        u32 reply;
 954        u8 msix_index = reply_q->msix_index;
 955        struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
 956        Mpi2ReplyDescriptorsUnion_t *rpf;
 957        u8 rc;
 958
 959        if (ioc->mask_interrupts)
 960                return IRQ_NONE;
 961
 962        if (!atomic_add_unless(&reply_q->busy, 1, 1))
 963                return IRQ_NONE;
 964
 965        rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
 966        request_desript_type = rpf->Default.ReplyFlags
 967             & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 968        if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
 969                atomic_dec(&reply_q->busy);
 970                return IRQ_NONE;
 971        }
 972
 973        completed_cmds = 0;
 974        cb_idx = 0xFF;
 975        do {
 976                rd.word = le64_to_cpu(rpf->Words);
 977                if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
 978                        goto out;
 979                reply = 0;
 980                smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
 981                if (request_desript_type ==
 982                    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
 983                    request_desript_type ==
 984                    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
 985                        cb_idx = _base_get_cb_idx(ioc, smid);
 986                        if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
 987                            (likely(mpt_callbacks[cb_idx] != NULL))) {
 988                                rc = mpt_callbacks[cb_idx](ioc, smid,
 989                                    msix_index, 0);
 990                                if (rc)
 991                                        mpt3sas_base_free_smid(ioc, smid);
 992                        }
 993                } else if (request_desript_type ==
 994                    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
 995                        reply = le32_to_cpu(
 996                            rpf->AddressReply.ReplyFrameAddress);
 997                        if (reply > ioc->reply_dma_max_address ||
 998                            reply < ioc->reply_dma_min_address)
 999                                reply = 0;
1000                        if (smid) {
1001                                cb_idx = _base_get_cb_idx(ioc, smid);
1002                                if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1003                                    (likely(mpt_callbacks[cb_idx] != NULL))) {
1004                                        rc = mpt_callbacks[cb_idx](ioc, smid,
1005                                            msix_index, reply);
1006                                        if (reply)
1007                                                _base_display_reply_info(ioc,
1008                                                    smid, msix_index, reply);
1009                                        if (rc)
1010                                                mpt3sas_base_free_smid(ioc,
1011                                                    smid);
1012                                }
1013                        } else {
1014                                _base_async_event(ioc, msix_index, reply);
1015                        }
1016
1017                        /* reply free queue handling */
1018                        if (reply) {
1019                                ioc->reply_free_host_index =
1020                                    (ioc->reply_free_host_index ==
1021                                    (ioc->reply_free_queue_depth - 1)) ?
1022                                    0 : ioc->reply_free_host_index + 1;
1023                                ioc->reply_free[ioc->reply_free_host_index] =
1024                                    cpu_to_le32(reply);
1025                                wmb();
1026                                writel(ioc->reply_free_host_index,
1027                                    &ioc->chip->ReplyFreeHostIndex);
1028                        }
1029                }
1030
1031                rpf->Words = cpu_to_le64(ULLONG_MAX);
1032                reply_q->reply_post_host_index =
1033                    (reply_q->reply_post_host_index ==
1034                    (ioc->reply_post_queue_depth - 1)) ? 0 :
1035                    reply_q->reply_post_host_index + 1;
1036                request_desript_type =
1037                    reply_q->reply_post_free[reply_q->reply_post_host_index].
1038                    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1039                completed_cmds++;
1040                if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1041                        goto out;
1042                if (!reply_q->reply_post_host_index)
1043                        rpf = reply_q->reply_post_free;
1044                else
1045                        rpf++;
1046        } while (1);
1047
1048 out:
1049
1050        if (!completed_cmds) {
1051                atomic_dec(&reply_q->busy);
1052                return IRQ_NONE;
1053        }
1054
1055        wmb();
1056        if (ioc->is_warpdrive) {
1057                writel(reply_q->reply_post_host_index,
1058                ioc->reply_post_host_index[msix_index]);
1059                atomic_dec(&reply_q->busy);
1060                return IRQ_HANDLED;
1061        }
1062
1063        /* Update Reply Post Host Index.
1064         * For those HBA's which support combined reply queue feature
1065         * 1. Get the correct Supplemental Reply Post Host Index Register.
1066         *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1067         *    Index Register address bank i.e replyPostRegisterIndex[],
1068         * 2. Then update this register with new reply host index value
1069         *    in ReplyPostIndex field and the MSIxIndex field with
1070         *    msix_index value reduced to a value between 0 and 7,
1071         *    using a modulo 8 operation. Since each Supplemental Reply Post
1072         *    Host Index Register supports 8 MSI-X vectors.
1073         *
1074         * For other HBA's just update the Reply Post Host Index register with
1075         * new reply host index value in ReplyPostIndex Field and msix_index
1076         * value in MSIxIndex field.
1077         */
1078        if (ioc->msix96_vector)
1079                writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1080                        MPI2_RPHI_MSIX_INDEX_SHIFT),
1081                        ioc->replyPostRegisterIndex[msix_index/8]);
1082        else
1083                writel(reply_q->reply_post_host_index | (msix_index <<
1084                        MPI2_RPHI_MSIX_INDEX_SHIFT),
1085                        &ioc->chip->ReplyPostHostIndex);
1086        atomic_dec(&reply_q->busy);
1087        return IRQ_HANDLED;
1088}
1089
1090/**
1091 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1092 * @ioc: per adapter object
1093 *
1094 */
1095static inline int
1096_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1097{
1098        return (ioc->facts.IOCCapabilities &
1099            MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1100}
1101
1102/**
1103 * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
1104 * @ioc: per adapter object
1105 * Context: ISR conext
1106 *
1107 * Called when a Task Management request has completed. We want
1108 * to flush the other reply queues so all the outstanding IO has been
1109 * completed back to OS before we process the TM completetion.
1110 *
1111 * Return nothing.
1112 */
1113void
1114mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1115{
1116        struct adapter_reply_queue *reply_q;
1117
1118        /* If MSIX capability is turned off
1119         * then multi-queues are not enabled
1120         */
1121        if (!_base_is_controller_msix_enabled(ioc))
1122                return;
1123
1124        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1125                if (ioc->shost_recovery)
1126                        return;
1127                /* TMs are on msix_index == 0 */
1128                if (reply_q->msix_index == 0)
1129                        continue;
1130                _base_interrupt(reply_q->vector, (void *)reply_q);
1131        }
1132}
1133
1134/**
1135 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1136 * @cb_idx: callback index
1137 *
1138 * Return nothing.
1139 */
1140void
1141mpt3sas_base_release_callback_handler(u8 cb_idx)
1142{
1143        mpt_callbacks[cb_idx] = NULL;
1144}
1145
1146/**
1147 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1148 * @cb_func: callback function
1149 *
1150 * Returns cb_func.
1151 */
1152u8
1153mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1154{
1155        u8 cb_idx;
1156
1157        for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1158                if (mpt_callbacks[cb_idx] == NULL)
1159                        break;
1160
1161        mpt_callbacks[cb_idx] = cb_func;
1162        return cb_idx;
1163}
1164
1165/**
1166 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1167 *
1168 * Return nothing.
1169 */
1170void
1171mpt3sas_base_initialize_callback_handler(void)
1172{
1173        u8 cb_idx;
1174
1175        for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1176                mpt3sas_base_release_callback_handler(cb_idx);
1177}
1178
1179
1180/**
1181 * _base_build_zero_len_sge - build zero length sg entry
1182 * @ioc: per adapter object
1183 * @paddr: virtual address for SGE
1184 *
1185 * Create a zero length scatter gather entry to insure the IOCs hardware has
1186 * something to use if the target device goes brain dead and tries
1187 * to send data even when none is asked for.
1188 *
1189 * Return nothing.
1190 */
1191static void
1192_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1193{
1194        u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1195            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1196            MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1197            MPI2_SGE_FLAGS_SHIFT);
1198        ioc->base_add_sg_single(paddr, flags_length, -1);
1199}
1200
1201/**
1202 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1203 * @paddr: virtual address for SGE
1204 * @flags_length: SGE flags and data transfer length
1205 * @dma_addr: Physical address
1206 *
1207 * Return nothing.
1208 */
1209static void
1210_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1211{
1212        Mpi2SGESimple32_t *sgel = paddr;
1213
1214        flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1215            MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1216        sgel->FlagsLength = cpu_to_le32(flags_length);
1217        sgel->Address = cpu_to_le32(dma_addr);
1218}
1219
1220
1221/**
1222 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1223 * @paddr: virtual address for SGE
1224 * @flags_length: SGE flags and data transfer length
1225 * @dma_addr: Physical address
1226 *
1227 * Return nothing.
1228 */
1229static void
1230_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1231{
1232        Mpi2SGESimple64_t *sgel = paddr;
1233
1234        flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1235            MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1236        sgel->FlagsLength = cpu_to_le32(flags_length);
1237        sgel->Address = cpu_to_le64(dma_addr);
1238}
1239
1240/**
1241 * _base_get_chain_buffer_tracker - obtain chain tracker
1242 * @ioc: per adapter object
1243 * @smid: smid associated to an IO request
1244 *
1245 * Returns chain tracker(from ioc->free_chain_list)
1246 */
1247static struct chain_tracker *
1248_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1249{
1250        struct chain_tracker *chain_req;
1251        unsigned long flags;
1252
1253        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1254        if (list_empty(&ioc->free_chain_list)) {
1255                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1256                dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1257                        "chain buffers not available\n", ioc->name));
1258                return NULL;
1259        }
1260        chain_req = list_entry(ioc->free_chain_list.next,
1261            struct chain_tracker, tracker_list);
1262        list_del_init(&chain_req->tracker_list);
1263        list_add_tail(&chain_req->tracker_list,
1264            &ioc->scsi_lookup[smid - 1].chain_list);
1265        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1266        return chain_req;
1267}
1268
1269
1270/**
1271 * _base_build_sg - build generic sg
1272 * @ioc: per adapter object
1273 * @psge: virtual address for SGE
1274 * @data_out_dma: physical address for WRITES
1275 * @data_out_sz: data xfer size for WRITES
1276 * @data_in_dma: physical address for READS
1277 * @data_in_sz: data xfer size for READS
1278 *
1279 * Return nothing.
1280 */
1281static void
1282_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1283        dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1284        size_t data_in_sz)
1285{
1286        u32 sgl_flags;
1287
1288        if (!data_out_sz && !data_in_sz) {
1289                _base_build_zero_len_sge(ioc, psge);
1290                return;
1291        }
1292
1293        if (data_out_sz && data_in_sz) {
1294                /* WRITE sgel first */
1295                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1296                    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1297                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1298                ioc->base_add_sg_single(psge, sgl_flags |
1299                    data_out_sz, data_out_dma);
1300
1301                /* incr sgel */
1302                psge += ioc->sge_size;
1303
1304                /* READ sgel last */
1305                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1306                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1307                    MPI2_SGE_FLAGS_END_OF_LIST);
1308                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1309                ioc->base_add_sg_single(psge, sgl_flags |
1310                    data_in_sz, data_in_dma);
1311        } else if (data_out_sz) /* WRITE */ {
1312                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1313                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1314                    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1315                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1316                ioc->base_add_sg_single(psge, sgl_flags |
1317                    data_out_sz, data_out_dma);
1318        } else if (data_in_sz) /* READ */ {
1319                sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1320                    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1321                    MPI2_SGE_FLAGS_END_OF_LIST);
1322                sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1323                ioc->base_add_sg_single(psge, sgl_flags |
1324                    data_in_sz, data_in_dma);
1325        }
1326}
1327
1328/* IEEE format sgls */
1329
1330/**
1331 * _base_add_sg_single_ieee - add sg element for IEEE format
1332 * @paddr: virtual address for SGE
1333 * @flags: SGE flags
1334 * @chain_offset: number of 128 byte elements from start of segment
1335 * @length: data transfer length
1336 * @dma_addr: Physical address
1337 *
1338 * Return nothing.
1339 */
1340static void
1341_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1342        dma_addr_t dma_addr)
1343{
1344        Mpi25IeeeSgeChain64_t *sgel = paddr;
1345
1346        sgel->Flags = flags;
1347        sgel->NextChainOffset = chain_offset;
1348        sgel->Length = cpu_to_le32(length);
1349        sgel->Address = cpu_to_le64(dma_addr);
1350}
1351
1352/**
1353 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1354 * @ioc: per adapter object
1355 * @paddr: virtual address for SGE
1356 *
1357 * Create a zero length scatter gather entry to insure the IOCs hardware has
1358 * something to use if the target device goes brain dead and tries
1359 * to send data even when none is asked for.
1360 *
1361 * Return nothing.
1362 */
1363static void
1364_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1365{
1366        u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1367                MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1368                MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
1369
1370        _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1371}
1372
1373/**
1374 * _base_build_sg_scmd - main sg creation routine
1375 * @ioc: per adapter object
1376 * @scmd: scsi command
1377 * @smid: system request message index
1378 * Context: none.
1379 *
1380 * The main routine that builds scatter gather table from a given
1381 * scsi request sent via the .queuecommand main handler.
1382 *
1383 * Returns 0 success, anything else error
1384 */
1385static int
1386_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1387                struct scsi_cmnd *scmd, u16 smid)
1388{
1389        Mpi2SCSIIORequest_t *mpi_request;
1390        dma_addr_t chain_dma;
1391        struct scatterlist *sg_scmd;
1392        void *sg_local, *chain;
1393        u32 chain_offset;
1394        u32 chain_length;
1395        u32 chain_flags;
1396        int sges_left;
1397        u32 sges_in_segment;
1398        u32 sgl_flags;
1399        u32 sgl_flags_last_element;
1400        u32 sgl_flags_end_buffer;
1401        struct chain_tracker *chain_req;
1402
1403        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1404
1405        /* init scatter gather flags */
1406        sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
1407        if (scmd->sc_data_direction == DMA_TO_DEVICE)
1408                sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
1409        sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
1410            << MPI2_SGE_FLAGS_SHIFT;
1411        sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
1412            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
1413            << MPI2_SGE_FLAGS_SHIFT;
1414        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1415
1416        sg_scmd = scsi_sglist(scmd);
1417        sges_left = scsi_dma_map(scmd);
1418        if (sges_left < 0) {
1419                sdev_printk(KERN_ERR, scmd->device,
1420                 "pci_map_sg failed: request for %d bytes!\n",
1421                 scsi_bufflen(scmd));
1422                return -ENOMEM;
1423        }
1424
1425        sg_local = &mpi_request->SGL;
1426        sges_in_segment = ioc->max_sges_in_main_message;
1427        if (sges_left <= sges_in_segment)
1428                goto fill_in_last_segment;
1429
1430        mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
1431            (sges_in_segment * ioc->sge_size))/4;
1432
1433        /* fill in main message segment when there is a chain following */
1434        while (sges_in_segment) {
1435                if (sges_in_segment == 1)
1436                        ioc->base_add_sg_single(sg_local,
1437                            sgl_flags_last_element | sg_dma_len(sg_scmd),
1438                            sg_dma_address(sg_scmd));
1439                else
1440                        ioc->base_add_sg_single(sg_local, sgl_flags |
1441                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1442                sg_scmd = sg_next(sg_scmd);
1443                sg_local += ioc->sge_size;
1444                sges_left--;
1445                sges_in_segment--;
1446        }
1447
1448        /* initializing the chain flags and pointers */
1449        chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1450        chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1451        if (!chain_req)
1452                return -1;
1453        chain = chain_req->chain_buffer;
1454        chain_dma = chain_req->chain_buffer_dma;
1455        do {
1456                sges_in_segment = (sges_left <=
1457                    ioc->max_sges_in_chain_message) ? sges_left :
1458                    ioc->max_sges_in_chain_message;
1459                chain_offset = (sges_left == sges_in_segment) ?
1460                    0 : (sges_in_segment * ioc->sge_size)/4;
1461                chain_length = sges_in_segment * ioc->sge_size;
1462                if (chain_offset) {
1463                        chain_offset = chain_offset <<
1464                            MPI2_SGE_CHAIN_OFFSET_SHIFT;
1465                        chain_length += ioc->sge_size;
1466                }
1467                ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
1468                    chain_length, chain_dma);
1469                sg_local = chain;
1470                if (!chain_offset)
1471                        goto fill_in_last_segment;
1472
1473                /* fill in chain segments */
1474                while (sges_in_segment) {
1475                        if (sges_in_segment == 1)
1476                                ioc->base_add_sg_single(sg_local,
1477                                    sgl_flags_last_element |
1478                                    sg_dma_len(sg_scmd),
1479                                    sg_dma_address(sg_scmd));
1480                        else
1481                                ioc->base_add_sg_single(sg_local, sgl_flags |
1482                                    sg_dma_len(sg_scmd),
1483                                    sg_dma_address(sg_scmd));
1484                        sg_scmd = sg_next(sg_scmd);
1485                        sg_local += ioc->sge_size;
1486                        sges_left--;
1487                        sges_in_segment--;
1488                }
1489
1490                chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1491                if (!chain_req)
1492                        return -1;
1493                chain = chain_req->chain_buffer;
1494                chain_dma = chain_req->chain_buffer_dma;
1495        } while (1);
1496
1497
1498 fill_in_last_segment:
1499
1500        /* fill the last segment */
1501        while (sges_left) {
1502                if (sges_left == 1)
1503                        ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
1504                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1505                else
1506                        ioc->base_add_sg_single(sg_local, sgl_flags |
1507                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1508                sg_scmd = sg_next(sg_scmd);
1509                sg_local += ioc->sge_size;
1510                sges_left--;
1511        }
1512
1513        return 0;
1514}
1515
1516/**
1517 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1518 * @ioc: per adapter object
1519 * @scmd: scsi command
1520 * @smid: system request message index
1521 * Context: none.
1522 *
1523 * The main routine that builds scatter gather table from a given
1524 * scsi request sent via the .queuecommand main handler.
1525 *
1526 * Returns 0 success, anything else error
1527 */
1528static int
1529_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1530        struct scsi_cmnd *scmd, u16 smid)
1531{
1532        Mpi2SCSIIORequest_t *mpi_request;
1533        dma_addr_t chain_dma;
1534        struct scatterlist *sg_scmd;
1535        void *sg_local, *chain;
1536        u32 chain_offset;
1537        u32 chain_length;
1538        int sges_left;
1539        u32 sges_in_segment;
1540        u8 simple_sgl_flags;
1541        u8 simple_sgl_flags_last;
1542        u8 chain_sgl_flags;
1543        struct chain_tracker *chain_req;
1544
1545        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1546
1547        /* init scatter gather flags */
1548        simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1549            MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1550        simple_sgl_flags_last = simple_sgl_flags |
1551            MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1552        chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1553            MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1554
1555        sg_scmd = scsi_sglist(scmd);
1556        sges_left = scsi_dma_map(scmd);
1557        if (sges_left < 0) {
1558                sdev_printk(KERN_ERR, scmd->device,
1559                        "pci_map_sg failed: request for %d bytes!\n",
1560                        scsi_bufflen(scmd));
1561                return -ENOMEM;
1562        }
1563
1564        sg_local = &mpi_request->SGL;
1565        sges_in_segment = (ioc->request_sz -
1566            offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1567        if (sges_left <= sges_in_segment)
1568                goto fill_in_last_segment;
1569
1570        mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1571            (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1572
1573        /* fill in main message segment when there is a chain following */
1574        while (sges_in_segment > 1) {
1575                _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1576                    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1577                sg_scmd = sg_next(sg_scmd);
1578                sg_local += ioc->sge_size_ieee;
1579                sges_left--;
1580                sges_in_segment--;
1581        }
1582
1583        /* initializing the pointers */
1584        chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1585        if (!chain_req)
1586                return -1;
1587        chain = chain_req->chain_buffer;
1588        chain_dma = chain_req->chain_buffer_dma;
1589        do {
1590                sges_in_segment = (sges_left <=
1591                    ioc->max_sges_in_chain_message) ? sges_left :
1592                    ioc->max_sges_in_chain_message;
1593                chain_offset = (sges_left == sges_in_segment) ?
1594                    0 : sges_in_segment;
1595                chain_length = sges_in_segment * ioc->sge_size_ieee;
1596                if (chain_offset)
1597                        chain_length += ioc->sge_size_ieee;
1598                _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1599                    chain_offset, chain_length, chain_dma);
1600
1601                sg_local = chain;
1602                if (!chain_offset)
1603                        goto fill_in_last_segment;
1604
1605                /* fill in chain segments */
1606                while (sges_in_segment) {
1607                        _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1608                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1609                        sg_scmd = sg_next(sg_scmd);
1610                        sg_local += ioc->sge_size_ieee;
1611                        sges_left--;
1612                        sges_in_segment--;
1613                }
1614
1615                chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1616                if (!chain_req)
1617                        return -1;
1618                chain = chain_req->chain_buffer;
1619                chain_dma = chain_req->chain_buffer_dma;
1620        } while (1);
1621
1622
1623 fill_in_last_segment:
1624
1625        /* fill the last segment */
1626        while (sges_left > 0) {
1627                if (sges_left == 1)
1628                        _base_add_sg_single_ieee(sg_local,
1629                            simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1630                            sg_dma_address(sg_scmd));
1631                else
1632                        _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1633                            sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1634                sg_scmd = sg_next(sg_scmd);
1635                sg_local += ioc->sge_size_ieee;
1636                sges_left--;
1637        }
1638
1639        return 0;
1640}
1641
1642/**
1643 * _base_build_sg_ieee - build generic sg for IEEE format
1644 * @ioc: per adapter object
1645 * @psge: virtual address for SGE
1646 * @data_out_dma: physical address for WRITES
1647 * @data_out_sz: data xfer size for WRITES
1648 * @data_in_dma: physical address for READS
1649 * @data_in_sz: data xfer size for READS
1650 *
1651 * Return nothing.
1652 */
1653static void
1654_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1655        dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1656        size_t data_in_sz)
1657{
1658        u8 sgl_flags;
1659
1660        if (!data_out_sz && !data_in_sz) {
1661                _base_build_zero_len_sge_ieee(ioc, psge);
1662                return;
1663        }
1664
1665        if (data_out_sz && data_in_sz) {
1666                /* WRITE sgel first */
1667                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1668                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1669                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1670                    data_out_dma);
1671
1672                /* incr sgel */
1673                psge += ioc->sge_size_ieee;
1674
1675                /* READ sgel last */
1676                sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1677                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1678                    data_in_dma);
1679        } else if (data_out_sz) /* WRITE */ {
1680                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1681                    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1682                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1683                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1684                    data_out_dma);
1685        } else if (data_in_sz) /* READ */ {
1686                sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1687                    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1688                    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1689                _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1690                    data_in_dma);
1691        }
1692}
1693
1694#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1695
1696/**
1697 * _base_config_dma_addressing - set dma addressing
1698 * @ioc: per adapter object
1699 * @pdev: PCI device struct
1700 *
1701 * Returns 0 for success, non-zero for failure.
1702 */
1703static int
1704_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1705{
1706        struct sysinfo s;
1707        u64 consistent_dma_mask;
1708
1709        if (ioc->dma_mask)
1710                consistent_dma_mask = DMA_BIT_MASK(64);
1711        else
1712                consistent_dma_mask = DMA_BIT_MASK(32);
1713
1714        if (sizeof(dma_addr_t) > 4) {
1715                const uint64_t required_mask =
1716                    dma_get_required_mask(&pdev->dev);
1717                if ((required_mask > DMA_BIT_MASK(32)) &&
1718                    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1719                    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
1720                        ioc->base_add_sg_single = &_base_add_sg_single_64;
1721                        ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1722                        ioc->dma_mask = 64;
1723                        goto out;
1724                }
1725        }
1726
1727        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1728            && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1729                ioc->base_add_sg_single = &_base_add_sg_single_32;
1730                ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1731                ioc->dma_mask = 32;
1732        } else
1733                return -ENODEV;
1734
1735 out:
1736        si_meminfo(&s);
1737        pr_info(MPT3SAS_FMT
1738                "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1739                ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
1740
1741        return 0;
1742}
1743
1744static int
1745_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
1746                                      struct pci_dev *pdev)
1747{
1748        if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1749                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1750                        return -ENODEV;
1751        }
1752        return 0;
1753}
1754
1755/**
1756 * _base_check_enable_msix - checks MSIX capabable.
1757 * @ioc: per adapter object
1758 *
1759 * Check to see if card is capable of MSIX, and set number
1760 * of available msix vectors
1761 */
1762static int
1763_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1764{
1765        int base;
1766        u16 message_control;
1767
1768        /* Check whether controller SAS2008 B0 controller,
1769         * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
1770         */
1771        if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1772            ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
1773                return -EINVAL;
1774        }
1775
1776        base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1777        if (!base) {
1778                dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1779                        ioc->name));
1780                return -EINVAL;
1781        }
1782
1783        /* get msix vector count */
1784        /* NUMA_IO not supported for older controllers */
1785        if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1786            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1787            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1788            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1789            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1790            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1791            ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1792                ioc->msix_vector_count = 1;
1793        else {
1794                pci_read_config_word(ioc->pdev, base + 2, &message_control);
1795                ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1796        }
1797        dinitprintk(ioc, pr_info(MPT3SAS_FMT
1798                "msix is supported, vector_count(%d)\n",
1799                ioc->name, ioc->msix_vector_count));
1800        return 0;
1801}
1802
1803/**
1804 * _base_free_irq - free irq
1805 * @ioc: per adapter object
1806 *
1807 * Freeing respective reply_queue from the list.
1808 */
1809static void
1810_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1811{
1812        struct adapter_reply_queue *reply_q, *next;
1813
1814        if (list_empty(&ioc->reply_queue_list))
1815                return;
1816
1817        list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1818                list_del(&reply_q->list);
1819                if (smp_affinity_enable) {
1820                        irq_set_affinity_hint(reply_q->vector, NULL);
1821                        free_cpumask_var(reply_q->affinity_hint);
1822                }
1823                free_irq(reply_q->vector, reply_q);
1824                kfree(reply_q);
1825        }
1826}
1827
1828/**
1829 * _base_request_irq - request irq
1830 * @ioc: per adapter object
1831 * @index: msix index into vector table
1832 * @vector: irq vector
1833 *
1834 * Inserting respective reply_queue into the list.
1835 */
1836static int
1837_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1838{
1839        struct adapter_reply_queue *reply_q;
1840        int r;
1841
1842        reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1843        if (!reply_q) {
1844                pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1845                    ioc->name, (int)sizeof(struct adapter_reply_queue));
1846                return -ENOMEM;
1847        }
1848        reply_q->ioc = ioc;
1849        reply_q->msix_index = index;
1850        reply_q->vector = vector;
1851
1852        if (smp_affinity_enable) {
1853                if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) {
1854                        kfree(reply_q);
1855                        return -ENOMEM;
1856                }
1857        }
1858
1859        atomic_set(&reply_q->busy, 0);
1860        if (ioc->msix_enable)
1861                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1862                    ioc->driver_name, ioc->id, index);
1863        else
1864                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1865                    ioc->driver_name, ioc->id);
1866        r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1867            reply_q);
1868        if (r) {
1869                pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1870                    reply_q->name, vector);
1871                free_cpumask_var(reply_q->affinity_hint);
1872                kfree(reply_q);
1873                return -EBUSY;
1874        }
1875
1876        INIT_LIST_HEAD(&reply_q->list);
1877        list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1878        return 0;
1879}
1880
1881/**
1882 * _base_assign_reply_queues - assigning msix index for each cpu
1883 * @ioc: per adapter object
1884 *
1885 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1886 *
1887 * It would nice if we could call irq_set_affinity, however it is not
1888 * an exported symbol
1889 */
1890static void
1891_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1892{
1893        unsigned int cpu, nr_cpus, nr_msix, index = 0;
1894        struct adapter_reply_queue *reply_q;
1895
1896        if (!_base_is_controller_msix_enabled(ioc))
1897                return;
1898
1899        memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1900
1901        nr_cpus = num_online_cpus();
1902        nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
1903                                               ioc->facts.MaxMSIxVectors);
1904        if (!nr_msix)
1905                return;
1906
1907        cpu = cpumask_first(cpu_online_mask);
1908
1909        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1910
1911                unsigned int i, group = nr_cpus / nr_msix;
1912
1913                if (cpu >= nr_cpus)
1914                        break;
1915
1916                if (index < nr_cpus % nr_msix)
1917                        group++;
1918
1919                for (i = 0 ; i < group ; i++) {
1920                        ioc->cpu_msix_table[cpu] = index;
1921                        if (smp_affinity_enable)
1922                                cpumask_or(reply_q->affinity_hint,
1923                                   reply_q->affinity_hint, get_cpu_mask(cpu));
1924                        cpu = cpumask_next(cpu, cpu_online_mask);
1925                }
1926                if (smp_affinity_enable)
1927                        if (irq_set_affinity_hint(reply_q->vector,
1928                                           reply_q->affinity_hint))
1929                                dinitprintk(ioc, pr_info(MPT3SAS_FMT
1930                                 "Err setting affinity hint to irq vector %d\n",
1931                                 ioc->name, reply_q->vector));
1932                index++;
1933        }
1934}
1935
1936/**
1937 * _base_disable_msix - disables msix
1938 * @ioc: per adapter object
1939 *
1940 */
1941static void
1942_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1943{
1944        if (!ioc->msix_enable)
1945                return;
1946        pci_disable_msix(ioc->pdev);
1947        ioc->msix_enable = 0;
1948}
1949
1950/**
1951 * _base_enable_msix - enables msix, failback to io_apic
1952 * @ioc: per adapter object
1953 *
1954 */
1955static int
1956_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1957{
1958        struct msix_entry *entries, *a;
1959        int r;
1960        int i;
1961        u8 try_msix = 0;
1962
1963        if (msix_disable == -1 || msix_disable == 0)
1964                try_msix = 1;
1965
1966        if (!try_msix)
1967                goto try_ioapic;
1968
1969        if (_base_check_enable_msix(ioc) != 0)
1970                goto try_ioapic;
1971
1972        ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1973            ioc->msix_vector_count);
1974
1975        printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1976          ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1977          ioc->cpu_count, max_msix_vectors);
1978
1979        if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1980                max_msix_vectors = 8;
1981
1982        if (max_msix_vectors > 0) {
1983                ioc->reply_queue_count = min_t(int, max_msix_vectors,
1984                        ioc->reply_queue_count);
1985                ioc->msix_vector_count = ioc->reply_queue_count;
1986        } else if (max_msix_vectors == 0)
1987                goto try_ioapic;
1988
1989        if (ioc->msix_vector_count < ioc->cpu_count)
1990                smp_affinity_enable = 0;
1991
1992        entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1993            GFP_KERNEL);
1994        if (!entries) {
1995                dfailprintk(ioc, pr_info(MPT3SAS_FMT
1996                        "kcalloc failed @ at %s:%d/%s() !!!\n",
1997                        ioc->name, __FILE__, __LINE__, __func__));
1998                goto try_ioapic;
1999        }
2000
2001        for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
2002                a->entry = i;
2003
2004        r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
2005        if (r) {
2006                dfailprintk(ioc, pr_info(MPT3SAS_FMT
2007                        "pci_enable_msix_exact failed (r=%d) !!!\n",
2008                        ioc->name, r));
2009                kfree(entries);
2010                goto try_ioapic;
2011        }
2012
2013        ioc->msix_enable = 1;
2014        for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
2015                r = _base_request_irq(ioc, i, a->vector);
2016                if (r) {
2017                        _base_free_irq(ioc);
2018                        _base_disable_msix(ioc);
2019                        kfree(entries);
2020                        goto try_ioapic;
2021                }
2022        }
2023
2024        kfree(entries);
2025        return 0;
2026
2027/* failback to io_apic interrupt routing */
2028 try_ioapic:
2029
2030        ioc->reply_queue_count = 1;
2031        r = _base_request_irq(ioc, 0, ioc->pdev->irq);
2032
2033        return r;
2034}
2035
2036/**
2037 * mpt3sas_base_unmap_resources - free controller resources
2038 * @ioc: per adapter object
2039 */
2040void
2041mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2042{
2043        struct pci_dev *pdev = ioc->pdev;
2044
2045        dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
2046                ioc->name, __func__));
2047
2048        _base_free_irq(ioc);
2049        _base_disable_msix(ioc);
2050
2051        if (ioc->msix96_vector) {
2052                kfree(ioc->replyPostRegisterIndex);
2053                ioc->replyPostRegisterIndex = NULL;
2054        }
2055
2056        if (ioc->chip_phys) {
2057                iounmap(ioc->chip);
2058                ioc->chip_phys = 0;
2059        }
2060
2061        if (pci_is_enabled(pdev)) {
2062                pci_release_selected_regions(ioc->pdev, ioc->bars);
2063                pci_disable_pcie_error_reporting(pdev);
2064                pci_disable_device(pdev);
2065        }
2066}
2067
2068/**
2069 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2070 * @ioc: per adapter object
2071 *
2072 * Returns 0 for success, non-zero for failure.
2073 */
2074int
2075mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2076{
2077        struct pci_dev *pdev = ioc->pdev;
2078        u32 memap_sz;
2079        u32 pio_sz;
2080        int i, r = 0;
2081        u64 pio_chip = 0;
2082        u64 chip_phys = 0;
2083        struct adapter_reply_queue *reply_q;
2084
2085        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
2086            ioc->name, __func__));
2087
2088        ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2089        if (pci_enable_device_mem(pdev)) {
2090                pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
2091                        ioc->name);
2092                ioc->bars = 0;
2093                return -ENODEV;
2094        }
2095
2096
2097        if (pci_request_selected_regions(pdev, ioc->bars,
2098            ioc->driver_name)) {
2099                pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
2100                        ioc->name);
2101                ioc->bars = 0;
2102                r = -ENODEV;
2103                goto out_fail;
2104        }
2105
2106/* AER (Advanced Error Reporting) hooks */
2107        pci_enable_pcie_error_reporting(pdev);
2108
2109        pci_set_master(pdev);
2110
2111
2112        if (_base_config_dma_addressing(ioc, pdev) != 0) {
2113                pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
2114                    ioc->name, pci_name(pdev));
2115                r = -ENODEV;
2116                goto out_fail;
2117        }
2118
2119        for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2120             (!memap_sz || !pio_sz); i++) {
2121                if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2122                        if (pio_sz)
2123                                continue;
2124                        pio_chip = (u64)pci_resource_start(pdev, i);
2125                        pio_sz = pci_resource_len(pdev, i);
2126                } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
2127                        if (memap_sz)
2128                                continue;
2129                        ioc->chip_phys = pci_resource_start(pdev, i);
2130                        chip_phys = (u64)ioc->chip_phys;
2131                        memap_sz = pci_resource_len(pdev, i);
2132                        ioc->chip = ioremap(ioc->chip_phys, memap_sz);
2133                }
2134        }
2135
2136        if (ioc->chip == NULL) {
2137                pr_err(MPT3SAS_FMT "unable to map adapter memory! "
2138                        " or resource not found\n", ioc->name);
2139                r = -EINVAL;
2140                goto out_fail;
2141        }
2142
2143        _base_mask_interrupts(ioc);
2144
2145        r = _base_get_ioc_facts(ioc, CAN_SLEEP);
2146        if (r)
2147                goto out_fail;
2148
2149        if (!ioc->rdpq_array_enable_assigned) {
2150                ioc->rdpq_array_enable = ioc->rdpq_array_capable;
2151                ioc->rdpq_array_enable_assigned = 1;
2152        }
2153
2154        r = _base_enable_msix(ioc);
2155        if (r)
2156                goto out_fail;
2157
2158        /* Use the Combined reply queue feature only for SAS3 C0 & higher
2159         * revision HBAs and also only when reply queue count is greater than 8
2160         */
2161        if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
2162                /* Determine the Supplemental Reply Post Host Index Registers
2163                 * Addresse. Supplemental Reply Post Host Index Registers
2164                 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
2165                 * each register is at offset bytes of
2166                 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
2167                 */
2168                ioc->replyPostRegisterIndex = kcalloc(
2169                     MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
2170                     sizeof(resource_size_t *), GFP_KERNEL);
2171                if (!ioc->replyPostRegisterIndex) {
2172                        dfailprintk(ioc, printk(MPT3SAS_FMT
2173                        "allocation for reply Post Register Index failed!!!\n",
2174                                                                   ioc->name));
2175                        r = -ENOMEM;
2176                        goto out_fail;
2177                }
2178
2179                for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
2180                        ioc->replyPostRegisterIndex[i] = (resource_size_t *)
2181                             ((u8 *)&ioc->chip->Doorbell +
2182                             MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2183                             (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
2184                }
2185        } else
2186                ioc->msix96_vector = 0;
2187
2188        list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
2189                pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
2190                    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
2191                    "IO-APIC enabled"), reply_q->vector);
2192
2193        pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
2194            ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
2195        pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
2196            ioc->name, (unsigned long long)pio_chip, pio_sz);
2197
2198        /* Save PCI configuration state for recovery from PCI AER/EEH errors */
2199        pci_save_state(pdev);
2200        return 0;
2201
2202 out_fail:
2203        mpt3sas_base_unmap_resources(ioc);
2204        return r;
2205}
2206
2207/**
2208 * mpt3sas_base_get_msg_frame - obtain request mf pointer
2209 * @ioc: per adapter object
2210 * @smid: system request message index(smid zero is invalid)
2211 *
2212 * Returns virt pointer to message frame.
2213 */
2214void *
2215mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2216{
2217        return (void *)(ioc->request + (smid * ioc->request_sz));
2218}
2219
2220/**
2221 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
2222 * @ioc: per adapter object
2223 * @smid: system request message index
2224 *
2225 * Returns virt pointer to sense buffer.
2226 */
2227void *
2228mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2229{
2230        return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
2231}
2232
2233/**
2234 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
2235 * @ioc: per adapter object
2236 * @smid: system request message index
2237 *
2238 * Returns phys pointer to the low 32bit address of the sense buffer.
2239 */
2240__le32
2241mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2242{
2243        return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
2244            SCSI_SENSE_BUFFERSIZE));
2245}
2246
2247/**
2248 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
2249 * @ioc: per adapter object
2250 * @phys_addr: lower 32 physical addr of the reply
2251 *
2252 * Converts 32bit lower physical addr into a virt address.
2253 */
2254void *
2255mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
2256{
2257        if (!phys_addr)
2258                return NULL;
2259        return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
2260}
2261
2262static inline u8
2263_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2264{
2265        return ioc->cpu_msix_table[raw_smp_processor_id()];
2266}
2267
2268/**
2269 * mpt3sas_base_get_smid - obtain a free smid from internal queue
2270 * @ioc: per adapter object
2271 * @cb_idx: callback index
2272 *
2273 * Returns smid (zero is invalid)
2274 */
2275u16
2276mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2277{
2278        unsigned long flags;
2279        struct request_tracker *request;
2280        u16 smid;
2281
2282        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2283        if (list_empty(&ioc->internal_free_list)) {
2284                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2285                pr_err(MPT3SAS_FMT "%s: smid not available\n",
2286                    ioc->name, __func__);
2287                return 0;
2288        }
2289
2290        request = list_entry(ioc->internal_free_list.next,
2291            struct request_tracker, tracker_list);
2292        request->cb_idx = cb_idx;
2293        smid = request->smid;
2294        list_del(&request->tracker_list);
2295        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2296        return smid;
2297}
2298
2299/**
2300 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
2301 * @ioc: per adapter object
2302 * @cb_idx: callback index
2303 * @scmd: pointer to scsi command object
2304 *
2305 * Returns smid (zero is invalid)
2306 */
2307u16
2308mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
2309        struct scsi_cmnd *scmd)
2310{
2311        unsigned long flags;
2312        struct scsiio_tracker *request;
2313        u16 smid;
2314
2315        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2316        if (list_empty(&ioc->free_list)) {
2317                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2318                pr_err(MPT3SAS_FMT "%s: smid not available\n",
2319                    ioc->name, __func__);
2320                return 0;
2321        }
2322
2323        request = list_entry(ioc->free_list.next,
2324            struct scsiio_tracker, tracker_list);
2325        request->scmd = scmd;
2326        request->cb_idx = cb_idx;
2327        smid = request->smid;
2328        request->msix_io = _base_get_msix_index(ioc);
2329        list_del(&request->tracker_list);
2330        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2331        return smid;
2332}
2333
2334/**
2335 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
2336 * @ioc: per adapter object
2337 * @cb_idx: callback index
2338 *
2339 * Returns smid (zero is invalid)
2340 */
2341u16
2342mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2343{
2344        unsigned long flags;
2345        struct request_tracker *request;
2346        u16 smid;
2347
2348        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2349        if (list_empty(&ioc->hpr_free_list)) {
2350                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2351                return 0;
2352        }
2353
2354        request = list_entry(ioc->hpr_free_list.next,
2355            struct request_tracker, tracker_list);
2356        request->cb_idx = cb_idx;
2357        smid = request->smid;
2358        list_del(&request->tracker_list);
2359        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2360        return smid;
2361}
2362
2363/**
2364 * mpt3sas_base_free_smid - put smid back on free_list
2365 * @ioc: per adapter object
2366 * @smid: system request message index
2367 *
2368 * Return nothing.
2369 */
2370void
2371mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2372{
2373        unsigned long flags;
2374        int i;
2375        struct chain_tracker *chain_req, *next;
2376
2377        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2378        if (smid < ioc->hi_priority_smid) {
2379                /* scsiio queue */
2380                i = smid - 1;
2381                if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2382                        list_for_each_entry_safe(chain_req, next,
2383                            &ioc->scsi_lookup[i].chain_list, tracker_list) {
2384                                list_del_init(&chain_req->tracker_list);
2385                                list_add(&chain_req->tracker_list,
2386                                    &ioc->free_chain_list);
2387                        }
2388                }
2389                ioc->scsi_lookup[i].cb_idx = 0xFF;
2390                ioc->scsi_lookup[i].scmd = NULL;
2391                ioc->scsi_lookup[i].direct_io = 0;
2392                list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2393                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2394
2395                /*
2396                 * See _wait_for_commands_to_complete() call with regards
2397                 * to this code.
2398                 */
2399                if (ioc->shost_recovery && ioc->pending_io_count) {
2400                        if (ioc->pending_io_count == 1)
2401                                wake_up(&ioc->reset_wq);
2402                        ioc->pending_io_count--;
2403                }
2404                return;
2405        } else if (smid < ioc->internal_smid) {
2406                /* hi-priority */
2407                i = smid - ioc->hi_priority_smid;
2408                ioc->hpr_lookup[i].cb_idx = 0xFF;
2409                list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2410        } else if (smid <= ioc->hba_queue_depth) {
2411                /* internal queue */
2412                i = smid - ioc->internal_smid;
2413                ioc->internal_lookup[i].cb_idx = 0xFF;
2414                list_add(&ioc->internal_lookup[i].tracker_list,
2415                    &ioc->internal_free_list);
2416        }
2417        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2418}
2419
2420/**
2421 * _base_writeq - 64 bit write to MMIO
2422 * @ioc: per adapter object
2423 * @b: data payload
2424 * @addr: address in MMIO space
2425 * @writeq_lock: spin lock
2426 *
2427 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2428 * care of 32 bit environment where its not quarenteed to send the entire word
2429 * in one transfer.
2430 */
2431#if defined(writeq) && defined(CONFIG_64BIT)
2432static inline void
2433_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2434{
2435        writeq(cpu_to_le64(b), addr);
2436}
2437#else
2438static inline void
2439_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2440{
2441        unsigned long flags;
2442        __u64 data_out = cpu_to_le64(b);
2443
2444        spin_lock_irqsave(writeq_lock, flags);
2445        writel((u32)(data_out), addr);
2446        writel((u32)(data_out >> 32), (addr + 4));
2447        spin_unlock_irqrestore(writeq_lock, flags);
2448}
2449#endif
2450
2451/**
2452 * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
2453 * @ioc: per adapter object
2454 * @smid: system request message index
2455 * @handle: device handle
2456 *
2457 * Return nothing.
2458 */
2459void
2460mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2461{
2462        Mpi2RequestDescriptorUnion_t descriptor;
2463        u64 *request = (u64 *)&descriptor;
2464
2465
2466        descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2467        descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
2468        descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2469        descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2470        descriptor.SCSIIO.LMID = 0;
2471        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2472            &ioc->scsi_lookup_lock);
2473}
2474
2475/**
2476 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
2477 * @ioc: per adapter object
2478 * @smid: system request message index
2479 * @handle: device handle
2480 *
2481 * Return nothing.
2482 */
2483void
2484mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2485        u16 handle)
2486{
2487        Mpi2RequestDescriptorUnion_t descriptor;
2488        u64 *request = (u64 *)&descriptor;
2489
2490        descriptor.SCSIIO.RequestFlags =
2491            MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2492        descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2493        descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2494        descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2495        descriptor.SCSIIO.LMID = 0;
2496        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2497            &ioc->scsi_lookup_lock);
2498}
2499
2500/**
2501 * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
2502 * @ioc: per adapter object
2503 * @smid: system request message index
2504 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
2505 * Return nothing.
2506 */
2507void
2508mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2509        u16 msix_task)
2510{
2511        Mpi2RequestDescriptorUnion_t descriptor;
2512        u64 *request = (u64 *)&descriptor;
2513
2514        descriptor.HighPriority.RequestFlags =
2515            MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2516        descriptor.HighPriority.MSIxIndex =  msix_task;
2517        descriptor.HighPriority.SMID = cpu_to_le16(smid);
2518        descriptor.HighPriority.LMID = 0;
2519        descriptor.HighPriority.Reserved1 = 0;
2520        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2521            &ioc->scsi_lookup_lock);
2522}
2523
2524/**
2525 * mpt3sas_base_put_smid_default - Default, primarily used for config pages
2526 * @ioc: per adapter object
2527 * @smid: system request message index
2528 *
2529 * Return nothing.
2530 */
2531void
2532mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2533{
2534        Mpi2RequestDescriptorUnion_t descriptor;
2535        u64 *request = (u64 *)&descriptor;
2536
2537        descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2538        descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
2539        descriptor.Default.SMID = cpu_to_le16(smid);
2540        descriptor.Default.LMID = 0;
2541        descriptor.Default.DescriptorTypeDependent = 0;
2542        _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2543            &ioc->scsi_lookup_lock);
2544}
2545
2546/**
2547 * _base_display_OEMs_branding - Display branding string
2548 * @ioc: per adapter object
2549 *
2550 * Return nothing.
2551 */
2552static void
2553_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
2554{
2555        if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
2556                return;
2557
2558        switch (ioc->pdev->subsystem_vendor) {
2559        case PCI_VENDOR_ID_INTEL:
2560                switch (ioc->pdev->device) {
2561                case MPI2_MFGPAGE_DEVID_SAS2008:
2562                        switch (ioc->pdev->subsystem_device) {
2563                        case MPT2SAS_INTEL_RMS2LL080_SSDID:
2564                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2565                                    MPT2SAS_INTEL_RMS2LL080_BRANDING);
2566                                break;
2567                        case MPT2SAS_INTEL_RMS2LL040_SSDID:
2568                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2569                                    MPT2SAS_INTEL_RMS2LL040_BRANDING);
2570                                break;
2571                        case MPT2SAS_INTEL_SSD910_SSDID:
2572                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2573                                    MPT2SAS_INTEL_SSD910_BRANDING);
2574                                break;
2575                        default:
2576                                pr_info(MPT3SAS_FMT
2577                                 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2578                                 ioc->name, ioc->pdev->subsystem_device);
2579                                break;
2580                        }
2581                case MPI2_MFGPAGE_DEVID_SAS2308_2:
2582                        switch (ioc->pdev->subsystem_device) {
2583                        case MPT2SAS_INTEL_RS25GB008_SSDID:
2584                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2585                                    MPT2SAS_INTEL_RS25GB008_BRANDING);
2586                                break;
2587                        case MPT2SAS_INTEL_RMS25JB080_SSDID:
2588                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2589                                    MPT2SAS_INTEL_RMS25JB080_BRANDING);
2590                                break;
2591                        case MPT2SAS_INTEL_RMS25JB040_SSDID:
2592                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2593                                    MPT2SAS_INTEL_RMS25JB040_BRANDING);
2594                                break;
2595                        case MPT2SAS_INTEL_RMS25KB080_SSDID:
2596                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2597                                    MPT2SAS_INTEL_RMS25KB080_BRANDING);
2598                                break;
2599                        case MPT2SAS_INTEL_RMS25KB040_SSDID:
2600                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2601                                    MPT2SAS_INTEL_RMS25KB040_BRANDING);
2602                                break;
2603                        case MPT2SAS_INTEL_RMS25LB040_SSDID:
2604                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2605                                    MPT2SAS_INTEL_RMS25LB040_BRANDING);
2606                                break;
2607                        case MPT2SAS_INTEL_RMS25LB080_SSDID:
2608                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2609                                    MPT2SAS_INTEL_RMS25LB080_BRANDING);
2610                                break;
2611                        default:
2612                                pr_info(MPT3SAS_FMT
2613                                 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2614                                 ioc->name, ioc->pdev->subsystem_device);
2615                                break;
2616                        }
2617                case MPI25_MFGPAGE_DEVID_SAS3008:
2618                        switch (ioc->pdev->subsystem_device) {
2619                        case MPT3SAS_INTEL_RMS3JC080_SSDID:
2620                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2621                                        MPT3SAS_INTEL_RMS3JC080_BRANDING);
2622                                break;
2623
2624                        case MPT3SAS_INTEL_RS3GC008_SSDID:
2625                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2626                                        MPT3SAS_INTEL_RS3GC008_BRANDING);
2627                                break;
2628                        case MPT3SAS_INTEL_RS3FC044_SSDID:
2629                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2630                                        MPT3SAS_INTEL_RS3FC044_BRANDING);
2631                                break;
2632                        case MPT3SAS_INTEL_RS3UC080_SSDID:
2633                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2634                                        MPT3SAS_INTEL_RS3UC080_BRANDING);
2635                                break;
2636                        default:
2637                                pr_info(MPT3SAS_FMT
2638                                 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2639                                 ioc->name, ioc->pdev->subsystem_device);
2640                                break;
2641                        }
2642                        break;
2643                default:
2644                        pr_info(MPT3SAS_FMT
2645                         "Intel(R) Controller: Subsystem ID: 0x%X\n",
2646                         ioc->name, ioc->pdev->subsystem_device);
2647                        break;
2648                }
2649                break;
2650        case PCI_VENDOR_ID_DELL:
2651                switch (ioc->pdev->device) {
2652                case MPI2_MFGPAGE_DEVID_SAS2008:
2653                        switch (ioc->pdev->subsystem_device) {
2654                        case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
2655                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2656                                 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
2657                                break;
2658                        case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
2659                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2660                                 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
2661                                break;
2662                        case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
2663                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2664                                 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
2665                                break;
2666                        case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
2667                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2668                                 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
2669                                break;
2670                        case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
2671                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2672                                 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
2673                                break;
2674                        case MPT2SAS_DELL_PERC_H200_SSDID:
2675                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2676                                 MPT2SAS_DELL_PERC_H200_BRANDING);
2677                                break;
2678                        case MPT2SAS_DELL_6GBPS_SAS_SSDID:
2679                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2680                                 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
2681                                break;
2682                        default:
2683                                pr_info(MPT3SAS_FMT
2684                                   "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
2685                                   ioc->name, ioc->pdev->subsystem_device);
2686                                break;
2687                        }
2688                        break;
2689                case MPI25_MFGPAGE_DEVID_SAS3008:
2690                        switch (ioc->pdev->subsystem_device) {
2691                        case MPT3SAS_DELL_12G_HBA_SSDID:
2692                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2693                                        MPT3SAS_DELL_12G_HBA_BRANDING);
2694                                break;
2695                        default:
2696                                pr_info(MPT3SAS_FMT
2697                                   "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
2698                                   ioc->name, ioc->pdev->subsystem_device);
2699                                break;
2700                        }
2701                        break;
2702                default:
2703                        pr_info(MPT3SAS_FMT
2704                           "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
2705                           ioc->pdev->subsystem_device);
2706                        break;
2707                }
2708                break;
2709        case PCI_VENDOR_ID_CISCO:
2710                switch (ioc->pdev->device) {
2711                case MPI25_MFGPAGE_DEVID_SAS3008:
2712                        switch (ioc->pdev->subsystem_device) {
2713                        case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
2714                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2715                                        MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
2716                                break;
2717                        case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
2718                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2719                                        MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
2720                                break;
2721                        case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2722                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2723                                        MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2724                                break;
2725                        default:
2726                                pr_info(MPT3SAS_FMT
2727                                  "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2728                                  ioc->name, ioc->pdev->subsystem_device);
2729                                break;
2730                        }
2731                        break;
2732                case MPI25_MFGPAGE_DEVID_SAS3108_1:
2733                        switch (ioc->pdev->subsystem_device) {
2734                        case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2735                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2736                                MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2737                                break;
2738                        case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
2739                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2740                                MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
2741                                );
2742                                break;
2743                        default:
2744                                pr_info(MPT3SAS_FMT
2745                                 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2746                                 ioc->name, ioc->pdev->subsystem_device);
2747                                break;
2748                        }
2749                        break;
2750                default:
2751                        pr_info(MPT3SAS_FMT
2752                           "Cisco SAS HBA: Subsystem ID: 0x%X\n",
2753                           ioc->name, ioc->pdev->subsystem_device);
2754                        break;
2755                }
2756                break;
2757        case MPT2SAS_HP_3PAR_SSVID:
2758                switch (ioc->pdev->device) {
2759                case MPI2_MFGPAGE_DEVID_SAS2004:
2760                        switch (ioc->pdev->subsystem_device) {
2761                        case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2762                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2763                                    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2764                                break;
2765                        default:
2766                                pr_info(MPT3SAS_FMT
2767                                   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2768                                   ioc->name, ioc->pdev->subsystem_device);
2769                                break;
2770                        }
2771                case MPI2_MFGPAGE_DEVID_SAS2308_2:
2772                        switch (ioc->pdev->subsystem_device) {
2773                        case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2774                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2775                                    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2776                                break;
2777                        case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2778                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2779                                    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2780                                break;
2781                        case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2782                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2783                                 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2784                                break;
2785                        case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2786                                pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2787                                    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2788                                break;
2789                        default:
2790                                pr_info(MPT3SAS_FMT
2791                                   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2792                                   ioc->name, ioc->pdev->subsystem_device);
2793                                break;
2794                        }
2795                default:
2796                        pr_info(MPT3SAS_FMT
2797                           "HP SAS HBA: Subsystem ID: 0x%X\n",
2798                           ioc->name, ioc->pdev->subsystem_device);
2799                        break;
2800                }
2801        default:
2802                break;
2803        }
2804}
2805
2806/**
2807 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2808 * @ioc: per adapter object
2809 *
2810 * Return nothing.
2811 */
2812static void
2813_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2814{
2815        int i = 0;
2816        char desc[16];
2817        u32 iounit_pg1_flags;
2818        u32 bios_version;
2819
2820        bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2821        strncpy(desc, ioc->manu_pg0.ChipName, 16);
2822        pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2823           "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2824            ioc->name, desc,
2825           (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2826           (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2827           (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2828           ioc->facts.FWVersion.Word & 0x000000FF,
2829           ioc->pdev->revision,
2830           (bios_version & 0xFF000000) >> 24,
2831           (bios_version & 0x00FF0000) >> 16,
2832           (bios_version & 0x0000FF00) >> 8,
2833            bios_version & 0x000000FF);
2834
2835        _base_display_OEMs_branding(ioc);
2836
2837        pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2838
2839        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2840                pr_info("Initiator");
2841                i++;
2842        }
2843
2844        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2845                pr_info("%sTarget", i ? "," : "");
2846                i++;
2847        }
2848
2849        i = 0;
2850        pr_info("), ");
2851        pr_info("Capabilities=(");
2852
2853        if (!ioc->hide_ir_msg) {
2854                if (ioc->facts.IOCCapabilities &
2855                    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2856                        pr_info("Raid");
2857                        i++;
2858                }
2859        }
2860
2861        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2862                pr_info("%sTLR", i ? "," : "");
2863                i++;
2864        }
2865
2866        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2867                pr_info("%sMulticast", i ? "," : "");
2868                i++;
2869        }
2870
2871        if (ioc->facts.IOCCapabilities &
2872            MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2873                pr_info("%sBIDI Target", i ? "," : "");
2874                i++;
2875        }
2876
2877        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2878                pr_info("%sEEDP", i ? "," : "");
2879                i++;
2880        }
2881
2882        if (ioc->facts.IOCCapabilities &
2883            MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2884                pr_info("%sSnapshot Buffer", i ? "," : "");
2885                i++;
2886        }
2887
2888        if (ioc->facts.IOCCapabilities &
2889            MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2890                pr_info("%sDiag Trace Buffer", i ? "," : "");
2891                i++;
2892        }
2893
2894        if (ioc->facts.IOCCapabilities &
2895            MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2896                pr_info("%sDiag Extended Buffer", i ? "," : "");
2897                i++;
2898        }
2899
2900        if (ioc->facts.IOCCapabilities &
2901            MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2902                pr_info("%sTask Set Full", i ? "," : "");
2903                i++;
2904        }
2905
2906        iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2907        if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2908                pr_info("%sNCQ", i ? "," : "");
2909                i++;
2910        }
2911
2912        pr_info(")\n");
2913}
2914
2915/**
2916 * mpt3sas_base_update_missing_delay - change the missing delay timers
2917 * @ioc: per adapter object
2918 * @device_missing_delay: amount of time till device is reported missing
2919 * @io_missing_delay: interval IO is returned when there is a missing device
2920 *
2921 * Return nothing.
2922 *
2923 * Passed on the command line, this function will modify the device missing
2924 * delay, as well as the io missing delay. This should be called at driver
2925 * load time.
2926 */
2927void
2928mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
2929        u16 device_missing_delay, u8 io_missing_delay)
2930{
2931        u16 dmd, dmd_new, dmd_orignal;
2932        u8 io_missing_delay_original;
2933        u16 sz;
2934        Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2935        Mpi2ConfigReply_t mpi_reply;
2936        u8 num_phys = 0;
2937        u16 ioc_status;
2938
2939        mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
2940        if (!num_phys)
2941                return;
2942
2943        sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2944            sizeof(Mpi2SasIOUnit1PhyData_t));
2945        sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2946        if (!sas_iounit_pg1) {
2947                pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2948                    ioc->name, __FILE__, __LINE__, __func__);
2949                goto out;
2950        }
2951        if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2952            sas_iounit_pg1, sz))) {
2953                pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2954                    ioc->name, __FILE__, __LINE__, __func__);
2955                goto out;
2956        }
2957        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2958            MPI2_IOCSTATUS_MASK;
2959        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2960                pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2961                    ioc->name, __FILE__, __LINE__, __func__);
2962                goto out;
2963        }
2964
2965        /* device missing delay */
2966        dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2967        if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2968                dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2969        else
2970                dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2971        dmd_orignal = dmd;
2972        if (device_missing_delay > 0x7F) {
2973                dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2974                    device_missing_delay;
2975                dmd = dmd / 16;
2976                dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2977        } else
2978                dmd = device_missing_delay;
2979        sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2980
2981        /* io missing delay */
2982        io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2983        sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2984
2985        if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2986            sz)) {
2987                if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2988                        dmd_new = (dmd &
2989                            MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2990                else
2991                        dmd_new =
2992                    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2993                pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
2994                        ioc->name, dmd_orignal, dmd_new);
2995                pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
2996                        ioc->name, io_missing_delay_original,
2997                    io_missing_delay);
2998                ioc->device_missing_delay = dmd_new;
2999                ioc->io_missing_delay = io_missing_delay;
3000        }
3001
3002out:
3003        kfree(sas_iounit_pg1);
3004}
3005/**
3006 * _base_static_config_pages - static start of day config pages
3007 * @ioc: per adapter object
3008 *
3009 * Return nothing.
3010 */
3011static void
3012_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
3013{
3014        Mpi2ConfigReply_t mpi_reply;
3015        u32 iounit_pg1_flags;
3016
3017        mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
3018        if (ioc->ir_firmware)
3019                mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
3020                    &ioc->manu_pg10);
3021
3022        /*
3023         * Ensure correct T10 PI operation if vendor left EEDPTagMode
3024         * flag unset in NVDATA.
3025         */
3026        mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
3027        if (ioc->manu_pg11.EEDPTagMode == 0) {
3028                pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
3029                    ioc->name);
3030                ioc->manu_pg11.EEDPTagMode &= ~0x3;
3031                ioc->manu_pg11.EEDPTagMode |= 0x1;
3032                mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
3033                    &ioc->manu_pg11);
3034        }
3035
3036        mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
3037        mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
3038        mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
3039        mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
3040        mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
3041        mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
3042        _base_display_ioc_capabilities(ioc);
3043
3044        /*
3045         * Enable task_set_full handling in iounit_pg1 when the
3046         * facts capabilities indicate that its supported.
3047         */
3048        iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3049        if ((ioc->facts.IOCCapabilities &
3050            MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
3051                iounit_pg1_flags &=
3052                    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3053        else
3054                iounit_pg1_flags |=
3055                    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3056        ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
3057        mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
3058
3059        if (ioc->iounit_pg8.NumSensors)
3060                ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
3061}
3062
3063/**
3064 * _base_release_memory_pools - release memory
3065 * @ioc: per adapter object
3066 *
3067 * Free memory allocated from _base_allocate_memory_pools.
3068 *
3069 * Return nothing.
3070 */
3071static void
3072_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3073{
3074        int i = 0;
3075        struct reply_post_struct *rps;
3076
3077        dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3078            __func__));
3079
3080        if (ioc->request) {
3081                pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
3082                    ioc->request,  ioc->request_dma);
3083                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3084                        "request_pool(0x%p): free\n",
3085                        ioc->name, ioc->request));
3086                ioc->request = NULL;
3087        }
3088
3089        if (ioc->sense) {
3090                pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
3091                if (ioc->sense_dma_pool)
3092                        pci_pool_destroy(ioc->sense_dma_pool);
3093                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3094                        "sense_pool(0x%p): free\n",
3095                        ioc->name, ioc->sense));
3096                ioc->sense = NULL;
3097        }
3098
3099        if (ioc->reply) {
3100                pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
3101                if (ioc->reply_dma_pool)
3102                        pci_pool_destroy(ioc->reply_dma_pool);
3103                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3104                        "reply_pool(0x%p): free\n",
3105                        ioc->name, ioc->reply));
3106                ioc->reply = NULL;
3107        }
3108
3109        if (ioc->reply_free) {
3110                pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
3111                    ioc->reply_free_dma);
3112                if (ioc->reply_free_dma_pool)
3113                        pci_pool_destroy(ioc->reply_free_dma_pool);
3114                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3115                        "reply_free_pool(0x%p): free\n",
3116                        ioc->name, ioc->reply_free));
3117                ioc->reply_free = NULL;
3118        }
3119
3120        if (ioc->reply_post) {
3121                do {
3122                        rps = &ioc->reply_post[i];
3123                        if (rps->reply_post_free) {
3124                                pci_pool_free(
3125                                    ioc->reply_post_free_dma_pool,
3126                                    rps->reply_post_free,
3127                                    rps->reply_post_free_dma);
3128                                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3129                                    "reply_post_free_pool(0x%p): free\n",
3130                                    ioc->name, rps->reply_post_free));
3131                                rps->reply_post_free = NULL;
3132                        }
3133                } while (ioc->rdpq_array_enable &&
3134                           (++i < ioc->reply_queue_count));
3135
3136                if (ioc->reply_post_free_dma_pool)
3137                        pci_pool_destroy(ioc->reply_post_free_dma_pool);
3138                kfree(ioc->reply_post);
3139        }
3140
3141        if (ioc->config_page) {
3142                dexitprintk(ioc, pr_info(MPT3SAS_FMT
3143                    "config_page(0x%p): free\n", ioc->name,
3144                    ioc->config_page));
3145                pci_free_consistent(ioc->pdev, ioc->config_page_sz,
3146                    ioc->config_page, ioc->config_page_dma);
3147        }
3148
3149        if (ioc->scsi_lookup) {
3150                free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
3151                ioc->scsi_lookup = NULL;
3152        }
3153        kfree(ioc->hpr_lookup);
3154        kfree(ioc->internal_lookup);
3155        if (ioc->chain_lookup) {
3156                for (i = 0; i < ioc->chain_depth; i++) {
3157                        if (ioc->chain_lookup[i].chain_buffer)
3158                                pci_pool_free(ioc->chain_dma_pool,
3159                                    ioc->chain_lookup[i].chain_buffer,
3160                                    ioc->chain_lookup[i].chain_buffer_dma);
3161                }
3162                if (ioc->chain_dma_pool)
3163                        pci_pool_destroy(ioc->chain_dma_pool);
3164                free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
3165                ioc->chain_lookup = NULL;
3166        }
3167}
3168
3169/**
3170 * _base_allocate_memory_pools - allocate start of day memory pools
3171 * @ioc: per adapter object
3172 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3173 *
3174 * Returns 0 success, anything else error
3175 */
3176static int
3177_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
3178{
3179        struct mpt3sas_facts *facts;
3180        u16 max_sge_elements;
3181        u16 chains_needed_per_io;
3182        u32 sz, total_sz, reply_post_free_sz;
3183        u32 retry_sz;
3184        u16 max_request_credit;
3185        unsigned short sg_tablesize;
3186        u16 sge_size;
3187        int i;
3188
3189        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3190            __func__));
3191
3192
3193        retry_sz = 0;
3194        facts = &ioc->facts;
3195
3196        /* command line tunables for max sgl entries */
3197        if (max_sgl_entries != -1)
3198                sg_tablesize = max_sgl_entries;
3199        else {
3200                if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
3201                        sg_tablesize = MPT2SAS_SG_DEPTH;
3202                else
3203                        sg_tablesize = MPT3SAS_SG_DEPTH;
3204        }
3205
3206        if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
3207                sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
3208        else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
3209                sg_tablesize = min_t(unsigned short, sg_tablesize,
3210                                      SCSI_MAX_SG_CHAIN_SEGMENTS);
3211                pr_warn(MPT3SAS_FMT
3212                 "sg_tablesize(%u) is bigger than kernel"
3213                 " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
3214                 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
3215        }
3216        ioc->shost->sg_tablesize = sg_tablesize;
3217
3218        ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
3219                (facts->RequestCredit / 4));
3220        if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
3221                if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
3222                                INTERNAL_SCSIIO_CMDS_COUNT)) {
3223                        pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
3224                            Credits, it has just %d number of credits\n",
3225                            ioc->name, facts->RequestCredit);
3226                        return -ENOMEM;
3227                }
3228                ioc->internal_depth = 10;
3229        }
3230
3231        ioc->hi_priority_depth = ioc->internal_depth - (5);
3232        /* command line tunables  for max controller queue depth */
3233        if (max_queue_depth != -1 && max_queue_depth != 0) {
3234                max_request_credit = min_t(u16, max_queue_depth +
3235                        ioc->internal_depth, facts->RequestCredit);
3236                if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
3237                        max_request_credit =  MAX_HBA_QUEUE_DEPTH;
3238        } else
3239                max_request_credit = min_t(u16, facts->RequestCredit,
3240                    MAX_HBA_QUEUE_DEPTH);
3241
3242        /* Firmware maintains additional facts->HighPriorityCredit number of
3243         * credits for HiPriprity Request messages, so hba queue depth will be
3244         * sum of max_request_credit and high priority queue depth.
3245         */
3246        ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
3247
3248        /* request frame size */
3249        ioc->request_sz = facts->IOCRequestFrameSize * 4;
3250
3251        /* reply frame size */
3252        ioc->reply_sz = facts->ReplyFrameSize * 4;
3253
3254        /* chain segment size */
3255        if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3256                if (facts->IOCMaxChainSegmentSize)
3257                        ioc->chain_segment_sz =
3258                                        facts->IOCMaxChainSegmentSize *
3259                                        MAX_CHAIN_ELEMT_SZ;
3260                else
3261                /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
3262                        ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
3263                                                    MAX_CHAIN_ELEMT_SZ;
3264        } else
3265                ioc->chain_segment_sz = ioc->request_sz;
3266
3267        /* calculate the max scatter element size */
3268        sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
3269
3270 retry_allocation:
3271        total_sz = 0;
3272        /* calculate number of sg elements left over in the 1st frame */
3273        max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
3274            sizeof(Mpi2SGEIOUnion_t)) + sge_size);
3275        ioc->max_sges_in_main_message = max_sge_elements/sge_size;
3276
3277        /* now do the same for a chain buffer */
3278        max_sge_elements = ioc->chain_segment_sz - sge_size;
3279        ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
3280
3281        /*
3282         *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
3283         */
3284        chains_needed_per_io = ((ioc->shost->sg_tablesize -
3285           ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
3286            + 1;
3287        if (chains_needed_per_io > facts->MaxChainDepth) {
3288                chains_needed_per_io = facts->MaxChainDepth;
3289                ioc->shost->sg_tablesize = min_t(u16,
3290                ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
3291                * chains_needed_per_io), ioc->shost->sg_tablesize);
3292        }
3293        ioc->chains_needed_per_io = chains_needed_per_io;
3294
3295        /* reply free queue sizing - taking into account for 64 FW events */
3296        ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3297
3298        /* calculate reply descriptor post queue depth */
3299        ioc->reply_post_queue_depth = ioc->hba_queue_depth +
3300                                ioc->reply_free_queue_depth +  1 ;
3301        /* align the reply post queue on the next 16 count boundary */
3302        if (ioc->reply_post_queue_depth % 16)
3303                ioc->reply_post_queue_depth += 16 -
3304                (ioc->reply_post_queue_depth % 16);
3305
3306        if (ioc->reply_post_queue_depth >
3307            facts->MaxReplyDescriptorPostQueueDepth) {
3308                ioc->reply_post_queue_depth =
3309                                facts->MaxReplyDescriptorPostQueueDepth -
3310                    (facts->MaxReplyDescriptorPostQueueDepth % 16);
3311                ioc->hba_queue_depth =
3312                                ((ioc->reply_post_queue_depth - 64) / 2) - 1;
3313                ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3314        }
3315
3316        dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
3317            "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
3318            "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
3319            ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
3320            ioc->chains_needed_per_io));
3321
3322        /* reply post queue, 16 byte align */
3323        reply_post_free_sz = ioc->reply_post_queue_depth *
3324            sizeof(Mpi2DefaultReplyDescriptor_t);
3325
3326        sz = reply_post_free_sz;
3327        if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
3328                sz *= ioc->reply_queue_count;
3329
3330        ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
3331            (ioc->reply_queue_count):1,
3332            sizeof(struct reply_post_struct), GFP_KERNEL);
3333
3334        if (!ioc->reply_post) {
3335                pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
3336                        ioc->name);
3337                goto out;
3338        }
3339        ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
3340            ioc->pdev, sz, 16, 0);
3341        if (!ioc->reply_post_free_dma_pool) {
3342                pr_err(MPT3SAS_FMT
3343                 "reply_post_free pool: pci_pool_create failed\n",
3344                 ioc->name);
3345                goto out;
3346        }
3347        i = 0;
3348        do {
3349                ioc->reply_post[i].reply_post_free =
3350                    pci_pool_alloc(ioc->reply_post_free_dma_pool,
3351                    GFP_KERNEL,
3352                    &ioc->reply_post[i].reply_post_free_dma);
3353                if (!ioc->reply_post[i].reply_post_free) {
3354                        pr_err(MPT3SAS_FMT
3355                        "reply_post_free pool: pci_pool_alloc failed\n",
3356                        ioc->name);
3357                        goto out;
3358                }
3359                memset(ioc->reply_post[i].reply_post_free, 0, sz);
3360                dinitprintk(ioc, pr_info(MPT3SAS_FMT
3361                    "reply post free pool (0x%p): depth(%d),"
3362                    "element_size(%d), pool_size(%d kB)\n", ioc->name,
3363                    ioc->reply_post[i].reply_post_free,
3364                    ioc->reply_post_queue_depth, 8, sz/1024));
3365                dinitprintk(ioc, pr_info(MPT3SAS_FMT
3366                    "reply_post_free_dma = (0x%llx)\n", ioc->name,
3367                    (unsigned long long)
3368                    ioc->reply_post[i].reply_post_free_dma));
3369                total_sz += sz;
3370        } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
3371
3372        if (ioc->dma_mask == 64) {
3373                if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
3374                        pr_warn(MPT3SAS_FMT
3375                            "no suitable consistent DMA mask for %s\n",
3376                            ioc->name, pci_name(ioc->pdev));
3377                        goto out;
3378                }
3379        }
3380
3381        ioc->scsiio_depth = ioc->hba_queue_depth -
3382            ioc->hi_priority_depth - ioc->internal_depth;
3383
3384        /* set the scsi host can_queue depth
3385         * with some internal commands that could be outstanding
3386         */
3387        ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
3388        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3389                "scsi host: can_queue depth (%d)\n",
3390                ioc->name, ioc->shost->can_queue));
3391
3392
3393        /* contiguous pool for request and chains, 16 byte align, one extra "
3394         * "frame for smid=0
3395         */
3396        ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
3397        sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
3398
3399        /* hi-priority queue */
3400        sz += (ioc->hi_priority_depth * ioc->request_sz);
3401
3402        /* internal queue */
3403        sz += (ioc->internal_depth * ioc->request_sz);
3404
3405        ioc->request_dma_sz = sz;
3406        ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
3407        if (!ioc->request) {
3408                pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3409                    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3410                    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
3411                    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3412                if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
3413                        goto out;
3414                retry_sz = 64;
3415                ioc->hba_queue_depth -= retry_sz;
3416                _base_release_memory_pools(ioc);
3417                goto retry_allocation;
3418        }
3419
3420        if (retry_sz)
3421                pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3422                    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3423                    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
3424                    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3425
3426        /* hi-priority queue */
3427        ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
3428            ioc->request_sz);
3429        ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
3430            ioc->request_sz);
3431
3432        /* internal queue */
3433        ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
3434            ioc->request_sz);
3435        ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
3436            ioc->request_sz);
3437
3438        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3439                "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3440                ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
3441            (ioc->hba_queue_depth * ioc->request_sz)/1024));
3442
3443        dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
3444            ioc->name, (unsigned long long) ioc->request_dma));
3445        total_sz += sz;
3446
3447        sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
3448        ioc->scsi_lookup_pages = get_order(sz);
3449        ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
3450            GFP_KERNEL, ioc->scsi_lookup_pages);
3451        if (!ioc->scsi_lookup) {
3452                pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
3453                        ioc->name, (int)sz);
3454                goto out;
3455        }
3456
3457        dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
3458                ioc->name, ioc->request, ioc->scsiio_depth));
3459
3460        ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
3461        sz = ioc->chain_depth * sizeof(struct chain_tracker);
3462        ioc->chain_pages = get_order(sz);
3463        ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
3464            GFP_KERNEL, ioc->chain_pages);
3465        if (!ioc->chain_lookup) {
3466                pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
3467                        ioc->name);
3468                goto out;
3469        }
3470        ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
3471            ioc->chain_segment_sz, 16, 0);
3472        if (!ioc->chain_dma_pool) {
3473                pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
3474                        ioc->name);
3475                goto out;
3476        }
3477        for (i = 0; i < ioc->chain_depth; i++) {
3478                ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
3479                    ioc->chain_dma_pool , GFP_KERNEL,
3480                    &ioc->chain_lookup[i].chain_buffer_dma);
3481                if (!ioc->chain_lookup[i].chain_buffer) {
3482                        ioc->chain_depth = i;
3483                        goto chain_done;
3484                }
3485                total_sz += ioc->chain_segment_sz;
3486        }
3487 chain_done:
3488        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3489                "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
3490                ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
3491                ((ioc->chain_depth *  ioc->chain_segment_sz))/1024));
3492
3493        /* initialize hi-priority queue smid's */
3494        ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
3495            sizeof(struct request_tracker), GFP_KERNEL);
3496        if (!ioc->hpr_lookup) {
3497                pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
3498                    ioc->name);
3499                goto out;
3500        }
3501        ioc->hi_priority_smid = ioc->scsiio_depth + 1;
3502        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3503                "hi_priority(0x%p): depth(%d), start smid(%d)\n",
3504                ioc->name, ioc->hi_priority,
3505            ioc->hi_priority_depth, ioc->hi_priority_smid));
3506
3507        /* initialize internal queue smid's */
3508        ioc->internal_lookup = kcalloc(ioc->internal_depth,
3509            sizeof(struct request_tracker), GFP_KERNEL);
3510        if (!ioc->internal_lookup) {
3511                pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
3512                    ioc->name);
3513                goto out;
3514        }
3515        ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
3516        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3517                "internal(0x%p): depth(%d), start smid(%d)\n",
3518                ioc->name, ioc->internal,
3519            ioc->internal_depth, ioc->internal_smid));
3520
3521        /* sense buffers, 4 byte align */
3522        sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
3523        ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
3524            0);
3525        if (!ioc->sense_dma_pool) {
3526                pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
3527                    ioc->name);
3528                goto out;
3529        }
3530        ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
3531            &ioc->sense_dma);
3532        if (!ioc->sense) {
3533                pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
3534                    ioc->name);
3535                goto out;
3536        }
3537        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3538            "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
3539            "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
3540            SCSI_SENSE_BUFFERSIZE, sz/1024));
3541        dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
3542            ioc->name, (unsigned long long)ioc->sense_dma));
3543        total_sz += sz;
3544
3545        /* reply pool, 4 byte align */
3546        sz = ioc->reply_free_queue_depth * ioc->reply_sz;
3547        ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
3548            0);
3549        if (!ioc->reply_dma_pool) {
3550                pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
3551                    ioc->name);
3552                goto out;
3553        }
3554        ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
3555            &ioc->reply_dma);
3556        if (!ioc->reply) {
3557                pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
3558                    ioc->name);
3559                goto out;
3560        }
3561        ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
3562        ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
3563        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3564                "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3565                ioc->name, ioc->reply,
3566            ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
3567        dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
3568            ioc->name, (unsigned long long)ioc->reply_dma));
3569        total_sz += sz;
3570
3571        /* reply free queue, 16 byte align */
3572        sz = ioc->reply_free_queue_depth * 4;
3573        ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
3574            ioc->pdev, sz, 16, 0);
3575        if (!ioc->reply_free_dma_pool) {
3576                pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
3577                        ioc->name);
3578                goto out;
3579        }
3580        ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
3581            &ioc->reply_free_dma);
3582        if (!ioc->reply_free) {
3583                pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
3584                        ioc->name);
3585                goto out;
3586        }
3587        memset(ioc->reply_free, 0, sz);
3588        dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
3589            "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
3590            ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
3591        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3592                "reply_free_dma (0x%llx)\n",
3593                ioc->name, (unsigned long long)ioc->reply_free_dma));
3594        total_sz += sz;
3595
3596        ioc->config_page_sz = 512;
3597        ioc->config_page = pci_alloc_consistent(ioc->pdev,
3598            ioc->config_page_sz, &ioc->config_page_dma);
3599        if (!ioc->config_page) {
3600                pr_err(MPT3SAS_FMT
3601                        "config page: pci_pool_alloc failed\n",
3602                        ioc->name);
3603                goto out;
3604        }
3605        dinitprintk(ioc, pr_info(MPT3SAS_FMT
3606                "config page(0x%p): size(%d)\n",
3607                ioc->name, ioc->config_page, ioc->config_page_sz));
3608        dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
3609                ioc->name, (unsigned long long)ioc->config_page_dma));
3610        total_sz += ioc->config_page_sz;
3611
3612        pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
3613            ioc->name, total_sz/1024);
3614        pr_info(MPT3SAS_FMT
3615                "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
3616            ioc->name, ioc->shost->can_queue, facts->RequestCredit);
3617        pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
3618            ioc->name, ioc->shost->sg_tablesize);
3619        return 0;
3620
3621 out:
3622        return -ENOMEM;
3623}
3624
3625/**
3626 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
3627 * @ioc: Pointer to MPT_ADAPTER structure
3628 * @cooked: Request raw or cooked IOC state
3629 *
3630 * Returns all IOC Doorbell register bits if cooked==0, else just the
3631 * Doorbell bits in MPI_IOC_STATE_MASK.
3632 */
3633u32
3634mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
3635{
3636        u32 s, sc;
3637
3638        s = readl(&ioc->chip->Doorbell);
3639        sc = s & MPI2_IOC_STATE_MASK;
3640        return cooked ? sc : s;
3641}
3642
3643/**
3644 * _base_wait_on_iocstate - waiting on a particular ioc state
3645 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
3646 * @timeout: timeout in second
3647 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3648 *
3649 * Returns 0 for success, non-zero for failure.
3650 */
3651static int
3652_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
3653        int sleep_flag)
3654{
3655        u32 count, cntdn;
3656        u32 current_state;
3657
3658        count = 0;
3659        cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3660        do {
3661                current_state = mpt3sas_base_get_iocstate(ioc, 1);
3662                if (current_state == ioc_state)
3663                        return 0;
3664                if (count && current_state == MPI2_IOC_STATE_FAULT)
3665                        break;
3666                if (sleep_flag == CAN_SLEEP)
3667                        usleep_range(1000, 1500);
3668                else
3669                        udelay(500);
3670                count++;
3671        } while (--cntdn);
3672
3673        return current_state;
3674}
3675
3676/**
3677 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
3678 * a write to the doorbell)
3679 * @ioc: per adapter object
3680 * @timeout: timeout in second
3681 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3682 *
3683 * Returns 0 for success, non-zero for failure.
3684 *
3685 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3686 */
3687static int
3688_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
3689
3690static int
3691_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3692        int sleep_flag)
3693{
3694        u32 cntdn, count;
3695        u32 int_status;
3696
3697        count = 0;
3698        cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3699        do {
3700                int_status = readl(&ioc->chip->HostInterruptStatus);
3701                if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3702                        dhsprintk(ioc, pr_info(MPT3SAS_FMT
3703                                "%s: successful count(%d), timeout(%d)\n",
3704                                ioc->name, __func__, count, timeout));
3705                        return 0;
3706                }
3707                if (sleep_flag == CAN_SLEEP)
3708                        usleep_range(1000, 1500);
3709                else
3710                        udelay(500);
3711                count++;
3712        } while (--cntdn);
3713
3714        pr_err(MPT3SAS_FMT
3715                "%s: failed due to timeout count(%d), int_status(%x)!\n",
3716                ioc->name, __func__, count, int_status);
3717        return -EFAULT;
3718}
3719
3720/**
3721 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3722 * @ioc: per adapter object
3723 * @timeout: timeout in second
3724 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3725 *
3726 * Returns 0 for success, non-zero for failure.
3727 *
3728 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3729 * doorbell.
3730 */
3731static int
3732_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3733        int sleep_flag)
3734{
3735        u32 cntdn, count;
3736        u32 int_status;
3737        u32 doorbell;
3738
3739        count = 0;
3740        cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3741        do {
3742                int_status = readl(&ioc->chip->HostInterruptStatus);
3743                if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3744                        dhsprintk(ioc, pr_info(MPT3SAS_FMT
3745                                "%s: successful count(%d), timeout(%d)\n",
3746                                ioc->name, __func__, count, timeout));
3747                        return 0;
3748                } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3749                        doorbell = readl(&ioc->chip->Doorbell);
3750                        if ((doorbell & MPI2_IOC_STATE_MASK) ==
3751                            MPI2_IOC_STATE_FAULT) {
3752                                mpt3sas_base_fault_info(ioc , doorbell);
3753                                return -EFAULT;
3754                        }
3755                } else if (int_status == 0xFFFFFFFF)
3756                        goto out;
3757
3758                if (sleep_flag == CAN_SLEEP)
3759                        usleep_range(1000, 1500);
3760                else
3761                        udelay(500);
3762                count++;
3763        } while (--cntdn);
3764
3765 out:
3766        pr_err(MPT3SAS_FMT
3767         "%s: failed due to timeout count(%d), int_status(%x)!\n",
3768         ioc->name, __func__, count, int_status);
3769        return -EFAULT;
3770}
3771
3772/**
3773 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3774 * @ioc: per adapter object
3775 * @timeout: timeout in second
3776 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3777 *
3778 * Returns 0 for success, non-zero for failure.
3779 *
3780 */
3781static int
3782_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3783        int sleep_flag)
3784{
3785        u32 cntdn, count;
3786        u32 doorbell_reg;
3787
3788        count = 0;
3789        cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3790        do {
3791                doorbell_reg = readl(&ioc->chip->Doorbell);
3792                if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3793                        dhsprintk(ioc, pr_info(MPT3SAS_FMT
3794                                "%s: successful count(%d), timeout(%d)\n",
3795                                ioc->name, __func__, count, timeout));
3796                        return 0;
3797                }
3798                if (sleep_flag == CAN_SLEEP)
3799                        usleep_range(1000, 1500);
3800                else
3801                        udelay(500);
3802                count++;
3803        } while (--cntdn);
3804
3805        pr_err(MPT3SAS_FMT
3806                "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3807                ioc->name, __func__, count, doorbell_reg);
3808        return -EFAULT;
3809}
3810
3811/**
3812 * _base_send_ioc_reset - send doorbell reset
3813 * @ioc: per adapter object
3814 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3815 * @timeout: timeout in second
3816 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3817 *
3818 * Returns 0 for success, non-zero for failure.
3819 */
3820static int
3821_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3822        int sleep_flag)
3823{
3824        u32 ioc_state;
3825        int r = 0;
3826
3827        if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3828                pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3829                    ioc->name, __func__);
3830                return -EFAULT;
3831        }
3832
3833        if (!(ioc->facts.IOCCapabilities &
3834           MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3835                return -EFAULT;
3836
3837        pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3838
3839        writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3840            &ioc->chip->Doorbell);
3841        if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3842                r = -EFAULT;
3843                goto out;
3844        }
3845        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3846            timeout, sleep_flag);
3847        if (ioc_state) {
3848                pr_err(MPT3SAS_FMT
3849                        "%s: failed going to ready state (ioc_state=0x%x)\n",
3850                        ioc->name, __func__, ioc_state);
3851                r = -EFAULT;
3852                goto out;
3853        }
3854 out:
3855        pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3856            ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3857        return r;
3858}
3859
3860/**
3861 * _base_handshake_req_reply_wait - send request thru doorbell interface
3862 * @ioc: per adapter object
3863 * @request_bytes: request length
3864 * @request: pointer having request payload
3865 * @reply_bytes: reply length
3866 * @reply: pointer to reply payload
3867 * @timeout: timeout in second
3868 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3869 *
3870 * Returns 0 for success, non-zero for failure.
3871 */
3872static int
3873_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3874        u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3875{
3876        MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3877        int i;
3878        u8 failed;
3879        u16 dummy;
3880        __le32 *mfp;
3881
3882        /* make sure doorbell is not in use */
3883        if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3884                pr_err(MPT3SAS_FMT
3885                        "doorbell is in use (line=%d)\n",
3886                        ioc->name, __LINE__);
3887                return -EFAULT;
3888        }
3889
3890        /* clear pending doorbell interrupts from previous state changes */
3891        if (readl(&ioc->chip->HostInterruptStatus) &
3892            MPI2_HIS_IOC2SYS_DB_STATUS)
3893                writel(0, &ioc->chip->HostInterruptStatus);
3894
3895        /* send message to ioc */
3896        writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3897            ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3898            &ioc->chip->Doorbell);
3899
3900        if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3901                pr_err(MPT3SAS_FMT
3902                        "doorbell handshake int failed (line=%d)\n",
3903                        ioc->name, __LINE__);
3904                return -EFAULT;
3905        }
3906        writel(0, &ioc->chip->HostInterruptStatus);
3907
3908        if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3909                pr_err(MPT3SAS_FMT
3910                        "doorbell handshake ack failed (line=%d)\n",
3911                        ioc->name, __LINE__);
3912                return -EFAULT;
3913        }
3914
3915        /* send message 32-bits at a time */
3916        for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3917                writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3918                if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3919                        failed = 1;
3920        }
3921
3922        if (failed) {
3923                pr_err(MPT3SAS_FMT
3924                        "doorbell handshake sending request failed (line=%d)\n",
3925                        ioc->name, __LINE__);
3926                return -EFAULT;
3927        }
3928
3929        /* now wait for the reply */
3930        if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3931                pr_err(MPT3SAS_FMT
3932                        "doorbell handshake int failed (line=%d)\n",
3933                        ioc->name, __LINE__);
3934                return -EFAULT;
3935        }
3936
3937        /* read the first two 16-bits, it gives the total length of the reply */
3938        reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3939            & MPI2_DOORBELL_DATA_MASK);
3940        writel(0, &ioc->chip->HostInterruptStatus);
3941        if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3942                pr_err(MPT3SAS_FMT
3943                        "doorbell handshake int failed (line=%d)\n",
3944                        ioc->name, __LINE__);
3945                return -EFAULT;
3946        }
3947        reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3948            & MPI2_DOORBELL_DATA_MASK);
3949        writel(0, &ioc->chip->HostInterruptStatus);
3950
3951        for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3952                if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3953                        pr_err(MPT3SAS_FMT
3954                                "doorbell handshake int failed (line=%d)\n",
3955                                ioc->name, __LINE__);
3956                        return -EFAULT;
3957                }
3958                if (i >=  reply_bytes/2) /* overflow case */
3959                        dummy = readl(&ioc->chip->Doorbell);
3960                else
3961                        reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3962                            & MPI2_DOORBELL_DATA_MASK);
3963                writel(0, &ioc->chip->HostInterruptStatus);
3964        }
3965
3966        _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3967        if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3968                dhsprintk(ioc, pr_info(MPT3SAS_FMT
3969                        "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3970        }
3971        writel(0, &ioc->chip->HostInterruptStatus);
3972
3973        if (ioc->logging_level & MPT_DEBUG_INIT) {
3974                mfp = (__le32 *)reply;
3975                pr_info("\toffset:data\n");
3976                for (i = 0; i < reply_bytes/4; i++)
3977                        pr_info("\t[0x%02x]:%08x\n", i*4,
3978                            le32_to_cpu(mfp[i]));
3979        }
3980        return 0;
3981}
3982
3983/**
3984 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
3985 * @ioc: per adapter object
3986 * @mpi_reply: the reply payload from FW
3987 * @mpi_request: the request payload sent to FW
3988 *
3989 * The SAS IO Unit Control Request message allows the host to perform low-level
3990 * operations, such as resets on the PHYs of the IO Unit, also allows the host
3991 * to obtain the IOC assigned device handles for a device if it has other
3992 * identifying information about the device, in addition allows the host to
3993 * remove IOC resources associated with the device.
3994 *
3995 * Returns 0 for success, non-zero for failure.
3996 */
3997int
3998mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
3999        Mpi2SasIoUnitControlReply_t *mpi_reply,
4000        Mpi2SasIoUnitControlRequest_t *mpi_request)
4001{
4002        u16 smid;
4003        u32 ioc_state;
4004        unsigned long timeleft;
4005        bool issue_reset = false;
4006        int rc;
4007        void *request;
4008        u16 wait_state_count;
4009
4010        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4011            __func__));
4012
4013        mutex_lock(&ioc->base_cmds.mutex);
4014
4015        if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4016                pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4017                    ioc->name, __func__);
4018                rc = -EAGAIN;
4019                goto out;
4020        }
4021
4022        wait_state_count = 0;
4023        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4024        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4025                if (wait_state_count++ == 10) {
4026                        pr_err(MPT3SAS_FMT
4027                            "%s: failed due to ioc not operational\n",
4028                            ioc->name, __func__);
4029                        rc = -EFAULT;
4030                        goto out;
4031                }
4032                ssleep(1);
4033                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4034                pr_info(MPT3SAS_FMT
4035                        "%s: waiting for operational state(count=%d)\n",
4036                        ioc->name, __func__, wait_state_count);
4037        }
4038
4039        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4040        if (!smid) {
4041                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4042                    ioc->name, __func__);
4043                rc = -EAGAIN;
4044                goto out;
4045        }
4046
4047        rc = 0;
4048        ioc->base_cmds.status = MPT3_CMD_PENDING;
4049        request = mpt3sas_base_get_msg_frame(ioc, smid);
4050        ioc->base_cmds.smid = smid;
4051        memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
4052        if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4053            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
4054                ioc->ioc_link_reset_in_progress = 1;
4055        init_completion(&ioc->base_cmds.done);
4056        mpt3sas_base_put_smid_default(ioc, smid);
4057        timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
4058            msecs_to_jiffies(10000));
4059        if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4060            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
4061            ioc->ioc_link_reset_in_progress)
4062                ioc->ioc_link_reset_in_progress = 0;
4063        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4064                pr_err(MPT3SAS_FMT "%s: timeout\n",
4065                    ioc->name, __func__);
4066                _debug_dump_mf(mpi_request,
4067                    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
4068                if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
4069                        issue_reset = true;
4070                goto issue_host_reset;
4071        }
4072        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4073                memcpy(mpi_reply, ioc->base_cmds.reply,
4074                    sizeof(Mpi2SasIoUnitControlReply_t));
4075        else
4076                memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
4077        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4078        goto out;
4079
4080 issue_host_reset:
4081        if (issue_reset)
4082                mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
4083                    FORCE_BIG_HAMMER);
4084        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4085        rc = -EFAULT;
4086 out:
4087        mutex_unlock(&ioc->base_cmds.mutex);
4088        return rc;
4089}
4090
4091/**
4092 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
4093 * @ioc: per adapter object
4094 * @mpi_reply: the reply payload from FW
4095 * @mpi_request: the request payload sent to FW
4096 *
4097 * The SCSI Enclosure Processor request message causes the IOC to
4098 * communicate with SES devices to control LED status signals.
4099 *
4100 * Returns 0 for success, non-zero for failure.
4101 */
4102int
4103mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4104        Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
4105{
4106        u16 smid;
4107        u32 ioc_state;
4108        unsigned long timeleft;
4109        bool issue_reset = false;
4110        int rc;
4111        void *request;
4112        u16 wait_state_count;
4113
4114        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4115            __func__));
4116
4117        mutex_lock(&ioc->base_cmds.mutex);
4118
4119        if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4120                pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4121                    ioc->name, __func__);
4122                rc = -EAGAIN;
4123                goto out;
4124        }
4125
4126        wait_state_count = 0;
4127        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4128        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4129                if (wait_state_count++ == 10) {
4130                        pr_err(MPT3SAS_FMT
4131                            "%s: failed due to ioc not operational\n",
4132                            ioc->name, __func__);
4133                        rc = -EFAULT;
4134                        goto out;
4135                }
4136                ssleep(1);
4137                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4138                pr_info(MPT3SAS_FMT
4139                        "%s: waiting for operational state(count=%d)\n",
4140                        ioc->name,
4141                    __func__, wait_state_count);
4142        }
4143
4144        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4145        if (!smid) {
4146                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4147                    ioc->name, __func__);
4148                rc = -EAGAIN;
4149                goto out;
4150        }
4151
4152        rc = 0;
4153        ioc->base_cmds.status = MPT3_CMD_PENDING;
4154        request = mpt3sas_base_get_msg_frame(ioc, smid);
4155        ioc->base_cmds.smid = smid;
4156        memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
4157        init_completion(&ioc->base_cmds.done);
4158        mpt3sas_base_put_smid_default(ioc, smid);
4159        timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
4160            msecs_to_jiffies(10000));
4161        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4162                pr_err(MPT3SAS_FMT "%s: timeout\n",
4163                    ioc->name, __func__);
4164                _debug_dump_mf(mpi_request,
4165                    sizeof(Mpi2SepRequest_t)/4);
4166                if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
4167                        issue_reset = false;
4168                goto issue_host_reset;
4169        }
4170        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4171                memcpy(mpi_reply, ioc->base_cmds.reply,
4172                    sizeof(Mpi2SepReply_t));
4173        else
4174                memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
4175        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4176        goto out;
4177
4178 issue_host_reset:
4179        if (issue_reset)
4180                mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
4181                    FORCE_BIG_HAMMER);
4182        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4183        rc = -EFAULT;
4184 out:
4185        mutex_unlock(&ioc->base_cmds.mutex);
4186        return rc;
4187}
4188
4189/**
4190 * _base_get_port_facts - obtain port facts reply and save in ioc
4191 * @ioc: per adapter object
4192 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4193 *
4194 * Returns 0 for success, non-zero for failure.
4195 */
4196static int
4197_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
4198{
4199        Mpi2PortFactsRequest_t mpi_request;
4200        Mpi2PortFactsReply_t mpi_reply;
4201        struct mpt3sas_port_facts *pfacts;
4202        int mpi_reply_sz, mpi_request_sz, r;
4203
4204        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4205            __func__));
4206
4207        mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
4208        mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
4209        memset(&mpi_request, 0, mpi_request_sz);
4210        mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
4211        mpi_request.PortNumber = port;
4212        r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4213            (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
4214
4215        if (r != 0) {
4216                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4217                    ioc->name, __func__, r);
4218                return r;
4219        }
4220
4221        pfacts = &ioc->pfacts[port];
4222        memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
4223        pfacts->PortNumber = mpi_reply.PortNumber;
4224        pfacts->VP_ID = mpi_reply.VP_ID;
4225        pfacts->VF_ID = mpi_reply.VF_ID;
4226        pfacts->MaxPostedCmdBuffers =
4227            le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
4228
4229        return 0;
4230}
4231
4232/**
4233 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
4234 * @ioc: per adapter object
4235 * @timeout:
4236 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4237 *
4238 * Returns 0 for success, non-zero for failure.
4239 */
4240static int
4241_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
4242        int sleep_flag)
4243{
4244        u32 ioc_state;
4245        int rc;
4246
4247        dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
4248            __func__));
4249
4250        if (ioc->pci_error_recovery) {
4251                dfailprintk(ioc, printk(MPT3SAS_FMT
4252                    "%s: host in pci error recovery\n", ioc->name, __func__));
4253                return -EFAULT;
4254        }
4255
4256        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4257        dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4258            ioc->name, __func__, ioc_state));
4259
4260        if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
4261            (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4262                return 0;
4263
4264        if (ioc_state & MPI2_DOORBELL_USED) {
4265                dhsprintk(ioc, printk(MPT3SAS_FMT
4266                    "unexpected doorbell active!\n", ioc->name));
4267                goto issue_diag_reset;
4268        }
4269
4270        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4271                mpt3sas_base_fault_info(ioc, ioc_state &
4272                    MPI2_DOORBELL_DATA_MASK);
4273                goto issue_diag_reset;
4274        }
4275
4276        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
4277            timeout, sleep_flag);
4278        if (ioc_state) {
4279                dfailprintk(ioc, printk(MPT3SAS_FMT
4280                    "%s: failed going to ready state (ioc_state=0x%x)\n",
4281                    ioc->name, __func__, ioc_state));
4282                return -EFAULT;
4283        }
4284
4285 issue_diag_reset:
4286        rc = _base_diag_reset(ioc, sleep_flag);
4287        return rc;
4288}
4289
4290/**
4291 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
4292 * @ioc: per adapter object
4293 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4294 *
4295 * Returns 0 for success, non-zero for failure.
4296 */
4297static int
4298_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4299{
4300        Mpi2IOCFactsRequest_t mpi_request;
4301        Mpi2IOCFactsReply_t mpi_reply;
4302        struct mpt3sas_facts *facts;
4303        int mpi_reply_sz, mpi_request_sz, r;
4304
4305        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4306            __func__));
4307
4308        r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
4309        if (r) {
4310                dfailprintk(ioc, printk(MPT3SAS_FMT
4311                    "%s: failed getting to correct state\n",
4312                    ioc->name, __func__));
4313                return r;
4314        }
4315        mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
4316        mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
4317        memset(&mpi_request, 0, mpi_request_sz);
4318        mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
4319        r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4320            (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
4321
4322        if (r != 0) {
4323                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4324                    ioc->name, __func__, r);
4325                return r;
4326        }
4327
4328        facts = &ioc->facts;
4329        memset(facts, 0, sizeof(struct mpt3sas_facts));
4330        facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
4331        facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
4332        facts->VP_ID = mpi_reply.VP_ID;
4333        facts->VF_ID = mpi_reply.VF_ID;
4334        facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
4335        facts->MaxChainDepth = mpi_reply.MaxChainDepth;
4336        facts->WhoInit = mpi_reply.WhoInit;
4337        facts->NumberOfPorts = mpi_reply.NumberOfPorts;
4338        facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
4339        facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
4340        facts->MaxReplyDescriptorPostQueueDepth =
4341            le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
4342        facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
4343        facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
4344        if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
4345                ioc->ir_firmware = 1;
4346        if ((facts->IOCCapabilities &
4347              MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
4348                ioc->rdpq_array_capable = 1;
4349        facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
4350        facts->IOCRequestFrameSize =
4351            le16_to_cpu(mpi_reply.IOCRequestFrameSize);
4352        if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4353                facts->IOCMaxChainSegmentSize =
4354                        le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
4355        }
4356        facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
4357        facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
4358        ioc->shost->max_id = -1;
4359        facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
4360        facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
4361        facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
4362        facts->HighPriorityCredit =
4363            le16_to_cpu(mpi_reply.HighPriorityCredit);
4364        facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
4365        facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
4366
4367        dinitprintk(ioc, pr_info(MPT3SAS_FMT
4368                "hba queue depth(%d), max chains per io(%d)\n",
4369                ioc->name, facts->RequestCredit,
4370            facts->MaxChainDepth));
4371        dinitprintk(ioc, pr_info(MPT3SAS_FMT
4372                "request frame size(%d), reply frame size(%d)\n", ioc->name,
4373            facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
4374        return 0;
4375}
4376
4377/**
4378 * _base_send_ioc_init - send ioc_init to firmware
4379 * @ioc: per adapter object
4380 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4381 *
4382 * Returns 0 for success, non-zero for failure.
4383 */
4384static int
4385_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4386{
4387        Mpi2IOCInitRequest_t mpi_request;
4388        Mpi2IOCInitReply_t mpi_reply;
4389        int i, r = 0;
4390        struct timeval current_time;
4391        u16 ioc_status;
4392        u32 reply_post_free_array_sz = 0;
4393        Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
4394        dma_addr_t reply_post_free_array_dma;
4395
4396        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4397            __func__));
4398
4399        memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
4400        mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
4401        mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
4402        mpi_request.VF_ID = 0; /* TODO */
4403        mpi_request.VP_ID = 0;
4404        mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
4405        mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
4406
4407        if (_base_is_controller_msix_enabled(ioc))
4408                mpi_request.HostMSIxVectors = ioc->reply_queue_count;
4409        mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
4410        mpi_request.ReplyDescriptorPostQueueDepth =
4411            cpu_to_le16(ioc->reply_post_queue_depth);
4412        mpi_request.ReplyFreeQueueDepth =
4413            cpu_to_le16(ioc->reply_free_queue_depth);
4414
4415        mpi_request.SenseBufferAddressHigh =
4416            cpu_to_le32((u64)ioc->sense_dma >> 32);
4417        mpi_request.SystemReplyAddressHigh =
4418            cpu_to_le32((u64)ioc->reply_dma >> 32);
4419        mpi_request.SystemRequestFrameBaseAddress =
4420            cpu_to_le64((u64)ioc->request_dma);
4421        mpi_request.ReplyFreeQueueAddress =
4422            cpu_to_le64((u64)ioc->reply_free_dma);
4423
4424        if (ioc->rdpq_array_enable) {
4425                reply_post_free_array_sz = ioc->reply_queue_count *
4426                    sizeof(Mpi2IOCInitRDPQArrayEntry);
4427                reply_post_free_array = pci_alloc_consistent(ioc->pdev,
4428                        reply_post_free_array_sz, &reply_post_free_array_dma);
4429                if (!reply_post_free_array) {
4430                        pr_err(MPT3SAS_FMT
4431                        "reply_post_free_array: pci_alloc_consistent failed\n",
4432                        ioc->name);
4433                        r = -ENOMEM;
4434                        goto out;
4435                }
4436                memset(reply_post_free_array, 0, reply_post_free_array_sz);
4437                for (i = 0; i < ioc->reply_queue_count; i++)
4438                        reply_post_free_array[i].RDPQBaseAddress =
4439                            cpu_to_le64(
4440                                (u64)ioc->reply_post[i].reply_post_free_dma);
4441                mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
4442                mpi_request.ReplyDescriptorPostQueueAddress =
4443                    cpu_to_le64((u64)reply_post_free_array_dma);
4444        } else {
4445                mpi_request.ReplyDescriptorPostQueueAddress =
4446                    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
4447        }
4448
4449        /* This time stamp specifies number of milliseconds
4450         * since epoch ~ midnight January 1, 1970.
4451         */
4452        do_gettimeofday(&current_time);
4453        mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
4454            (current_time.tv_usec / 1000));
4455
4456        if (ioc->logging_level & MPT_DEBUG_INIT) {
4457                __le32 *mfp;
4458                int i;
4459
4460                mfp = (__le32 *)&mpi_request;
4461                pr_info("\toffset:data\n");
4462                for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
4463                        pr_info("\t[0x%02x]:%08x\n", i*4,
4464                            le32_to_cpu(mfp[i]));
4465        }
4466
4467        r = _base_handshake_req_reply_wait(ioc,
4468            sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
4469            sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
4470            sleep_flag);
4471
4472        if (r != 0) {
4473                pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4474                    ioc->name, __func__, r);
4475                goto out;
4476        }
4477
4478        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
4479        if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
4480            mpi_reply.IOCLogInfo) {
4481                pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
4482                r = -EIO;
4483        }
4484
4485out:
4486        if (reply_post_free_array)
4487                pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
4488                                    reply_post_free_array,
4489                                    reply_post_free_array_dma);
4490        return r;
4491}
4492
4493/**
4494 * mpt3sas_port_enable_done - command completion routine for port enable
4495 * @ioc: per adapter object
4496 * @smid: system request message index
4497 * @msix_index: MSIX table index supplied by the OS
4498 * @reply: reply message frame(lower 32bit addr)
4499 *
4500 * Return 1 meaning mf should be freed from _base_interrupt
4501 *        0 means the mf is freed from this function.
4502 */
4503u8
4504mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4505        u32 reply)
4506{
4507        MPI2DefaultReply_t *mpi_reply;
4508        u16 ioc_status;
4509
4510        if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
4511                return 1;
4512
4513        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4514        if (!mpi_reply)
4515                return 1;
4516
4517        if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
4518                return 1;
4519
4520        ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
4521        ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
4522        ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
4523        memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
4524        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4525        if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
4526                ioc->port_enable_failed = 1;
4527
4528        if (ioc->is_driver_loading) {
4529                if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4530                        mpt3sas_port_enable_complete(ioc);
4531                        return 1;
4532                } else {
4533                        ioc->start_scan_failed = ioc_status;
4534                        ioc->start_scan = 0;
4535                        return 1;
4536                }
4537        }
4538        complete(&ioc->port_enable_cmds.done);
4539        return 1;
4540}
4541
4542/**
4543 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
4544 * @ioc: per adapter object
4545 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4546 *
4547 * Returns 0 for success, non-zero for failure.
4548 */
4549static int
4550_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4551{
4552        Mpi2PortEnableRequest_t *mpi_request;
4553        Mpi2PortEnableReply_t *mpi_reply;
4554        unsigned long timeleft;
4555        int r = 0;
4556        u16 smid;
4557        u16 ioc_status;
4558
4559        pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4560
4561        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4562                pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4563                    ioc->name, __func__);
4564                return -EAGAIN;
4565        }
4566
4567        smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4568        if (!smid) {
4569                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4570                    ioc->name, __func__);
4571                return -EAGAIN;
4572        }
4573
4574        ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4575        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4576        ioc->port_enable_cmds.smid = smid;
4577        memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4578        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4579
4580        init_completion(&ioc->port_enable_cmds.done);
4581        mpt3sas_base_put_smid_default(ioc, smid);
4582        timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
4583            300*HZ);
4584        if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
4585                pr_err(MPT3SAS_FMT "%s: timeout\n",
4586                    ioc->name, __func__);
4587                _debug_dump_mf(mpi_request,
4588                    sizeof(Mpi2PortEnableRequest_t)/4);
4589                if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
4590                        r = -EFAULT;
4591                else
4592                        r = -ETIME;
4593                goto out;
4594        }
4595
4596        mpi_reply = ioc->port_enable_cmds.reply;
4597        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4598        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4599                pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
4600                    ioc->name, __func__, ioc_status);
4601                r = -EFAULT;
4602                goto out;
4603        }
4604
4605 out:
4606        ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4607        pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
4608            "SUCCESS" : "FAILED"));
4609        return r;
4610}
4611
4612/**
4613 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
4614 * @ioc: per adapter object
4615 *
4616 * Returns 0 for success, non-zero for failure.
4617 */
4618int
4619mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
4620{
4621        Mpi2PortEnableRequest_t *mpi_request;
4622        u16 smid;
4623
4624        pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4625
4626        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4627                pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4628                    ioc->name, __func__);
4629                return -EAGAIN;
4630        }
4631
4632        smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4633        if (!smid) {
4634                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4635                    ioc->name, __func__);
4636                return -EAGAIN;
4637        }
4638
4639        ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4640        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4641        ioc->port_enable_cmds.smid = smid;
4642        memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4643        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4644
4645        mpt3sas_base_put_smid_default(ioc, smid);
4646        return 0;
4647}
4648
4649/**
4650 * _base_determine_wait_on_discovery - desposition
4651 * @ioc: per adapter object
4652 *
4653 * Decide whether to wait on discovery to complete. Used to either
4654 * locate boot device, or report volumes ahead of physical devices.
4655 *
4656 * Returns 1 for wait, 0 for don't wait
4657 */
4658static int
4659_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
4660{
4661        /* We wait for discovery to complete if IR firmware is loaded.
4662         * The sas topology events arrive before PD events, so we need time to
4663         * turn on the bit in ioc->pd_handles to indicate PD
4664         * Also, it maybe required to report Volumes ahead of physical
4665         * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
4666         */
4667        if (ioc->ir_firmware)
4668                return 1;
4669
4670        /* if no Bios, then we don't need to wait */
4671        if (!ioc->bios_pg3.BiosVersion)
4672                return 0;
4673
4674        /* Bios is present, then we drop down here.
4675         *
4676         * If there any entries in the Bios Page 2, then we wait
4677         * for discovery to complete.
4678         */
4679
4680        /* Current Boot Device */
4681        if ((ioc->bios_pg2.CurrentBootDeviceForm &
4682            MPI2_BIOSPAGE2_FORM_MASK) ==
4683            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4684        /* Request Boot Device */
4685           (ioc->bios_pg2.ReqBootDeviceForm &
4686            MPI2_BIOSPAGE2_FORM_MASK) ==
4687            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4688        /* Alternate Request Boot Device */
4689           (ioc->bios_pg2.ReqAltBootDeviceForm &
4690            MPI2_BIOSPAGE2_FORM_MASK) ==
4691            MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
4692                return 0;
4693
4694        return 1;
4695}
4696
4697/**
4698 * _base_unmask_events - turn on notification for this event
4699 * @ioc: per adapter object
4700 * @event: firmware event
4701 *
4702 * The mask is stored in ioc->event_masks.
4703 */
4704static void
4705_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
4706{
4707        u32 desired_event;
4708
4709        if (event >= 128)
4710                return;
4711
4712        desired_event = (1 << (event % 32));
4713
4714        if (event < 32)
4715                ioc->event_masks[0] &= ~desired_event;
4716        else if (event < 64)
4717                ioc->event_masks[1] &= ~desired_event;
4718        else if (event < 96)
4719                ioc->event_masks[2] &= ~desired_event;
4720        else if (event < 128)
4721                ioc->event_masks[3] &= ~desired_event;
4722}
4723
4724/**
4725 * _base_event_notification - send event notification
4726 * @ioc: per adapter object
4727 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4728 *
4729 * Returns 0 for success, non-zero for failure.
4730 */
4731static int
4732_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4733{
4734        Mpi2EventNotificationRequest_t *mpi_request;
4735        unsigned long timeleft;
4736        u16 smid;
4737        int r = 0;
4738        int i;
4739
4740        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4741            __func__));
4742
4743        if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4744                pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4745                    ioc->name, __func__);
4746                return -EAGAIN;
4747        }
4748
4749        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4750        if (!smid) {
4751                pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4752                    ioc->name, __func__);
4753                return -EAGAIN;
4754        }
4755        ioc->base_cmds.status = MPT3_CMD_PENDING;
4756        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4757        ioc->base_cmds.smid = smid;
4758        memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
4759        mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
4760        mpi_request->VF_ID = 0; /* TODO */
4761        mpi_request->VP_ID = 0;
4762        for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4763                mpi_request->EventMasks[i] =
4764                    cpu_to_le32(ioc->event_masks[i]);
4765        init_completion(&ioc->base_cmds.done);
4766        mpt3sas_base_put_smid_default(ioc, smid);
4767        timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
4768        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4769                pr_err(MPT3SAS_FMT "%s: timeout\n",
4770                    ioc->name, __func__);
4771                _debug_dump_mf(mpi_request,
4772                    sizeof(Mpi2EventNotificationRequest_t)/4);
4773                if (ioc->base_cmds.status & MPT3_CMD_RESET)
4774                        r = -EFAULT;
4775                else
4776                        r = -ETIME;
4777        } else
4778                dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
4779                    ioc->name, __func__));
4780        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4781        return r;
4782}
4783
4784/**
4785 * mpt3sas_base_validate_event_type - validating event types
4786 * @ioc: per adapter object
4787 * @event: firmware event
4788 *
4789 * This will turn on firmware event notification when application
4790 * ask for that event. We don't mask events that are already enabled.
4791 */
4792void
4793mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4794{
4795        int i, j;
4796        u32 event_mask, desired_event;
4797        u8 send_update_to_fw;
4798
4799        for (i = 0, send_update_to_fw = 0; i <
4800            MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4801                event_mask = ~event_type[i];
4802                desired_event = 1;
4803                for (j = 0; j < 32; j++) {
4804                        if (!(event_mask & desired_event) &&
4805                            (ioc->event_masks[i] & desired_event)) {
4806                                ioc->event_masks[i] &= ~desired_event;
4807                                send_update_to_fw = 1;
4808                        }
4809                        desired_event = (desired_event << 1);
4810                }
4811        }
4812
4813        if (!send_update_to_fw)
4814                return;
4815
4816        mutex_lock(&ioc->base_cmds.mutex);
4817        _base_event_notification(ioc, CAN_SLEEP);
4818        mutex_unlock(&ioc->base_cmds.mutex);
4819}
4820
4821/**
4822 * _base_diag_reset - the "big hammer" start of day reset
4823 * @ioc: per adapter object
4824 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4825 *
4826 * Returns 0 for success, non-zero for failure.
4827 */
4828static int
4829_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4830{
4831        u32 host_diagnostic;
4832        u32 ioc_state;
4833        u32 count;
4834        u32 hcb_size;
4835
4836        pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4837
4838        drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4839            ioc->name));
4840
4841        count = 0;
4842        do {
4843                /* Write magic sequence to WriteSequence register
4844                 * Loop until in diagnostic mode
4845                 */
4846                drsprintk(ioc, pr_info(MPT3SAS_FMT
4847                        "write magic sequence\n", ioc->name));
4848                writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4849                writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4850                writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4851                writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4852                writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4853                writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4854                writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4855
4856                /* wait 100 msec */
4857                if (sleep_flag == CAN_SLEEP)
4858                        msleep(100);
4859                else
4860                        mdelay(100);
4861
4862                if (count++ > 20)
4863                        goto out;
4864
4865                host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4866                drsprintk(ioc, pr_info(MPT3SAS_FMT
4867                        "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4868                    ioc->name, count, host_diagnostic));
4869
4870        } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4871
4872        hcb_size = readl(&ioc->chip->HCBSize);
4873
4874        drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4875            ioc->name));
4876        writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4877             &ioc->chip->HostDiagnostic);
4878
4879        /*This delay allows the chip PCIe hardware time to finish reset tasks*/
4880        if (sleep_flag == CAN_SLEEP)
4881                msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4882        else
4883                mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4884
4885        /* Approximately 300 second max wait */
4886        for (count = 0; count < (300000000 /
4887                MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
4888
4889                host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4890
4891                if (host_diagnostic == 0xFFFFFFFF)
4892                        goto out;
4893                if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4894                        break;
4895
4896                /* Wait to pass the second read delay window */
4897                if (sleep_flag == CAN_SLEEP)
4898                        msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4899                                                                / 1000);
4900                else
4901                        mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4902                                                                / 1000);
4903        }
4904
4905        if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4906
4907                drsprintk(ioc, pr_info(MPT3SAS_FMT
4908                "restart the adapter assuming the HCB Address points to good F/W\n",
4909                    ioc->name));
4910                host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4911                host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4912                writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4913
4914                drsprintk(ioc, pr_info(MPT3SAS_FMT
4915                    "re-enable the HCDW\n", ioc->name));
4916                writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4917                    &ioc->chip->HCBSize);
4918        }
4919
4920        drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
4921            ioc->name));
4922        writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4923            &ioc->chip->HostDiagnostic);
4924
4925        drsprintk(ioc, pr_info(MPT3SAS_FMT
4926                "disable writes to the diagnostic register\n", ioc->name));
4927        writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4928
4929        drsprintk(ioc, pr_info(MPT3SAS_FMT
4930                "Wait for FW to go to the READY state\n", ioc->name));
4931        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
4932            sleep_flag);
4933        if (ioc_state) {
4934                pr_err(MPT3SAS_FMT
4935                        "%s: failed going to ready state (ioc_state=0x%x)\n",
4936                        ioc->name, __func__, ioc_state);
4937                goto out;
4938        }
4939
4940        pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
4941        return 0;
4942
4943 out:
4944        pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
4945        return -EFAULT;
4946}
4947
4948/**
4949 * _base_make_ioc_ready - put controller in READY state
4950 * @ioc: per adapter object
4951 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4952 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4953 *
4954 * Returns 0 for success, non-zero for failure.
4955 */
4956static int
4957_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4958        enum reset_type type)
4959{
4960        u32 ioc_state;
4961        int rc;
4962        int count;
4963
4964        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4965            __func__));
4966
4967        if (ioc->pci_error_recovery)
4968                return 0;
4969
4970        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4971        dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4972            ioc->name, __func__, ioc_state));
4973
4974        /* if in RESET state, it should move to READY state shortly */
4975        count = 0;
4976        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
4977                while ((ioc_state & MPI2_IOC_STATE_MASK) !=
4978                    MPI2_IOC_STATE_READY) {
4979                        if (count++ == 10) {
4980                                pr_err(MPT3SAS_FMT
4981                                        "%s: failed going to ready state (ioc_state=0x%x)\n",
4982                                    ioc->name, __func__, ioc_state);
4983                                return -EFAULT;
4984                        }
4985                        if (sleep_flag == CAN_SLEEP)
4986                                ssleep(1);
4987                        else
4988                                mdelay(1000);
4989                        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4990                }
4991        }
4992
4993        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4994                return 0;
4995
4996        if (ioc_state & MPI2_DOORBELL_USED) {
4997                dhsprintk(ioc, pr_info(MPT3SAS_FMT
4998                        "unexpected doorbell active!\n",
4999                        ioc->name));
5000                goto issue_diag_reset;
5001        }
5002
5003        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
5004                mpt3sas_base_fault_info(ioc, ioc_state &
5005                    MPI2_DOORBELL_DATA_MASK);
5006                goto issue_diag_reset;
5007        }
5008
5009        if (type == FORCE_BIG_HAMMER)
5010                goto issue_diag_reset;
5011
5012        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5013                if (!(_base_send_ioc_reset(ioc,
5014                    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
5015                        return 0;
5016        }
5017
5018 issue_diag_reset:
5019        rc = _base_diag_reset(ioc, CAN_SLEEP);
5020        return rc;
5021}
5022
5023/**
5024 * _base_make_ioc_operational - put controller in OPERATIONAL state
5025 * @ioc: per adapter object
5026 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5027 *
5028 * Returns 0 for success, non-zero for failure.
5029 */
5030static int
5031_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5032{
5033        int r, i, index;
5034        unsigned long   flags;
5035        u32 reply_address;
5036        u16 smid;
5037        struct _tr_list *delayed_tr, *delayed_tr_next;
5038        struct _sc_list *delayed_sc, *delayed_sc_next;
5039        struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
5040        u8 hide_flag;
5041        struct adapter_reply_queue *reply_q;
5042        Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
5043
5044        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5045            __func__));
5046
5047        /* clean the delayed target reset list */
5048        list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5049            &ioc->delayed_tr_list, list) {
5050                list_del(&delayed_tr->list);
5051                kfree(delayed_tr);
5052        }
5053
5054
5055        list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5056            &ioc->delayed_tr_volume_list, list) {
5057                list_del(&delayed_tr->list);
5058                kfree(delayed_tr);
5059        }
5060
5061        list_for_each_entry_safe(delayed_sc, delayed_sc_next,
5062            &ioc->delayed_sc_list, list) {
5063                list_del(&delayed_sc->list);
5064                kfree(delayed_sc);
5065        }
5066
5067        list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
5068            &ioc->delayed_event_ack_list, list) {
5069                list_del(&delayed_event_ack->list);
5070                kfree(delayed_event_ack);
5071        }
5072
5073        /* initialize the scsi lookup free list */
5074        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5075        INIT_LIST_HEAD(&ioc->free_list);
5076        smid = 1;
5077        for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
5078                INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
5079                ioc->scsi_lookup[i].cb_idx = 0xFF;
5080                ioc->scsi_lookup[i].smid = smid;
5081                ioc->scsi_lookup[i].scmd = NULL;
5082                ioc->scsi_lookup[i].direct_io = 0;
5083                list_add_tail(&ioc->scsi_lookup[i].tracker_list,
5084                    &ioc->free_list);
5085        }
5086
5087        /* hi-priority queue */
5088        INIT_LIST_HEAD(&ioc->hpr_free_list);
5089        smid = ioc->hi_priority_smid;
5090        for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
5091                ioc->hpr_lookup[i].cb_idx = 0xFF;
5092                ioc->hpr_lookup[i].smid = smid;
5093                list_add_tail(&ioc->hpr_lookup[i].tracker_list,
5094                    &ioc->hpr_free_list);
5095        }
5096
5097        /* internal queue */
5098        INIT_LIST_HEAD(&ioc->internal_free_list);
5099        smid = ioc->internal_smid;
5100        for (i = 0; i < ioc->internal_depth; i++, smid++) {
5101                ioc->internal_lookup[i].cb_idx = 0xFF;
5102                ioc->internal_lookup[i].smid = smid;
5103                list_add_tail(&ioc->internal_lookup[i].tracker_list,
5104                    &ioc->internal_free_list);
5105        }
5106
5107        /* chain pool */
5108        INIT_LIST_HEAD(&ioc->free_chain_list);
5109        for (i = 0; i < ioc->chain_depth; i++)
5110                list_add_tail(&ioc->chain_lookup[i].tracker_list,
5111                    &ioc->free_chain_list);
5112
5113        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5114
5115        /* initialize Reply Free Queue */
5116        for (i = 0, reply_address = (u32)ioc->reply_dma ;
5117            i < ioc->reply_free_queue_depth ; i++, reply_address +=
5118            ioc->reply_sz)
5119                ioc->reply_free[i] = cpu_to_le32(reply_address);
5120
5121        /* initialize reply queues */
5122        if (ioc->is_driver_loading)
5123                _base_assign_reply_queues(ioc);
5124
5125        /* initialize Reply Post Free Queue */
5126        index = 0;
5127        reply_post_free_contig = ioc->reply_post[0].reply_post_free;
5128        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5129                /*
5130                 * If RDPQ is enabled, switch to the next allocation.
5131                 * Otherwise advance within the contiguous region.
5132                 */
5133                if (ioc->rdpq_array_enable) {
5134                        reply_q->reply_post_free =
5135                                ioc->reply_post[index++].reply_post_free;
5136                } else {
5137                        reply_q->reply_post_free = reply_post_free_contig;
5138                        reply_post_free_contig += ioc->reply_post_queue_depth;
5139                }
5140
5141                reply_q->reply_post_host_index = 0;
5142                for (i = 0; i < ioc->reply_post_queue_depth; i++)
5143                        reply_q->reply_post_free[i].Words =
5144                            cpu_to_le64(ULLONG_MAX);
5145                if (!_base_is_controller_msix_enabled(ioc))
5146                        goto skip_init_reply_post_free_queue;
5147        }
5148 skip_init_reply_post_free_queue:
5149
5150        r = _base_send_ioc_init(ioc, sleep_flag);
5151        if (r)
5152                return r;
5153
5154        /* initialize reply free host index */
5155        ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
5156        writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
5157
5158        /* initialize reply post host index */
5159        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5160                if (ioc->msix96_vector)
5161                        writel((reply_q->msix_index & 7)<<
5162                           MPI2_RPHI_MSIX_INDEX_SHIFT,
5163                           ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
5164                else
5165                        writel(reply_q->msix_index <<
5166                                MPI2_RPHI_MSIX_INDEX_SHIFT,
5167                                &ioc->chip->ReplyPostHostIndex);
5168
5169                if (!_base_is_controller_msix_enabled(ioc))
5170                        goto skip_init_reply_post_host_index;
5171        }
5172
5173 skip_init_reply_post_host_index:
5174
5175        _base_unmask_interrupts(ioc);
5176        r = _base_event_notification(ioc, sleep_flag);
5177        if (r)
5178                return r;
5179
5180        if (sleep_flag == CAN_SLEEP)
5181                _base_static_config_pages(ioc);
5182
5183
5184        if (ioc->is_driver_loading) {
5185
5186                if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
5187                    == 0x80) {
5188                        hide_flag = (u8) (
5189                            le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
5190                            MFG_PAGE10_HIDE_SSDS_MASK);
5191                        if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
5192                                ioc->mfg_pg10_hide_flag = hide_flag;
5193                }
5194
5195                ioc->wait_for_discovery_to_complete =
5196                    _base_determine_wait_on_discovery(ioc);
5197
5198                return r; /* scan_start and scan_finished support */
5199        }
5200
5201        r = _base_send_port_enable(ioc, sleep_flag);
5202        if (r)
5203                return r;
5204
5205        return r;
5206}
5207
5208/**
5209 * mpt3sas_base_free_resources - free resources controller resources
5210 * @ioc: per adapter object
5211 *
5212 * Return nothing.
5213 */
5214void
5215mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
5216{
5217        dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5218            __func__));
5219
5220        /* synchronizing freeing resource with pci_access_mutex lock */
5221        mutex_lock(&ioc->pci_access_mutex);
5222        if (ioc->chip_phys && ioc->chip) {
5223                _base_mask_interrupts(ioc);
5224                ioc->shost_recovery = 1;
5225                _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
5226                ioc->shost_recovery = 0;
5227        }
5228
5229        mpt3sas_base_unmap_resources(ioc);
5230        mutex_unlock(&ioc->pci_access_mutex);
5231        return;
5232}
5233
5234/**
5235 * mpt3sas_base_attach - attach controller instance
5236 * @ioc: per adapter object
5237 *
5238 * Returns 0 for success, non-zero for failure.
5239 */
5240int
5241mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5242{
5243        int r, i;
5244        int cpu_id, last_cpu_id = 0;
5245
5246        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5247            __func__));
5248
5249        /* setup cpu_msix_table */
5250        ioc->cpu_count = num_online_cpus();
5251        for_each_online_cpu(cpu_id)
5252                last_cpu_id = cpu_id;
5253        ioc->cpu_msix_table_sz = last_cpu_id + 1;
5254        ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
5255        ioc->reply_queue_count = 1;
5256        if (!ioc->cpu_msix_table) {
5257                dfailprintk(ioc, pr_info(MPT3SAS_FMT
5258                        "allocation for cpu_msix_table failed!!!\n",
5259                        ioc->name));
5260                r = -ENOMEM;
5261                goto out_free_resources;
5262        }
5263
5264        if (ioc->is_warpdrive) {
5265                ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
5266                    sizeof(resource_size_t *), GFP_KERNEL);
5267                if (!ioc->reply_post_host_index) {
5268                        dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
5269                                "for cpu_msix_table failed!!!\n", ioc->name));
5270                        r = -ENOMEM;
5271                        goto out_free_resources;
5272                }
5273        }
5274
5275        ioc->rdpq_array_enable_assigned = 0;
5276        ioc->dma_mask = 0;
5277        r = mpt3sas_base_map_resources(ioc);
5278        if (r)
5279                goto out_free_resources;
5280
5281        if (ioc->is_warpdrive) {
5282                ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
5283                    &ioc->chip->ReplyPostHostIndex;
5284
5285                for (i = 1; i < ioc->cpu_msix_table_sz; i++)
5286                        ioc->reply_post_host_index[i] =
5287                        (resource_size_t __iomem *)
5288                        ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
5289                        * 4)));
5290        }
5291
5292        pci_set_drvdata(ioc->pdev, ioc->shost);
5293        r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5294        if (r)
5295                goto out_free_resources;
5296
5297        switch (ioc->hba_mpi_version_belonged) {
5298        case MPI2_VERSION:
5299                ioc->build_sg_scmd = &_base_build_sg_scmd;
5300                ioc->build_sg = &_base_build_sg;
5301                ioc->build_zero_len_sge = &_base_build_zero_len_sge;
5302                break;
5303        case MPI25_VERSION:
5304        case MPI26_VERSION:
5305                /*
5306                 * In SAS3.0,
5307                 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
5308                 * Target Status - all require the IEEE formated scatter gather
5309                 * elements.
5310                 */
5311                ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
5312                ioc->build_sg = &_base_build_sg_ieee;
5313                ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
5314                ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
5315                break;
5316        }
5317
5318        /*
5319         * These function pointers for other requests that don't
5320         * the require IEEE scatter gather elements.
5321         *
5322         * For example Configuration Pages and SAS IOUNIT Control don't.
5323         */
5324        ioc->build_sg_mpi = &_base_build_sg;
5325        ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
5326
5327        r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
5328        if (r)
5329                goto out_free_resources;
5330
5331        ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
5332            sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
5333        if (!ioc->pfacts) {
5334                r = -ENOMEM;
5335                goto out_free_resources;
5336        }
5337
5338        for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
5339                r = _base_get_port_facts(ioc, i, CAN_SLEEP);
5340                if (r)
5341                        goto out_free_resources;
5342        }
5343
5344        r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
5345        if (r)
5346                goto out_free_resources;
5347
5348        init_waitqueue_head(&ioc->reset_wq);
5349
5350        /* allocate memory pd handle bitmask list */
5351        ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
5352        if (ioc->facts.MaxDevHandle % 8)
5353                ioc->pd_handles_sz++;
5354        ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
5355            GFP_KERNEL);
5356        if (!ioc->pd_handles) {
5357                r = -ENOMEM;
5358                goto out_free_resources;
5359        }
5360        ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
5361            GFP_KERNEL);
5362        if (!ioc->blocking_handles) {
5363                r = -ENOMEM;
5364                goto out_free_resources;
5365        }
5366
5367        ioc->fwfault_debug = mpt3sas_fwfault_debug;
5368
5369        /* base internal command bits */
5370        mutex_init(&ioc->base_cmds.mutex);
5371        ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5372        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5373
5374        /* port_enable command bits */
5375        ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5376        ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5377
5378        /* transport internal command bits */
5379        ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5380        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
5381        mutex_init(&ioc->transport_cmds.mutex);
5382
5383        /* scsih internal command bits */
5384        ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5385        ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
5386        mutex_init(&ioc->scsih_cmds.mutex);
5387
5388        /* task management internal command bits */
5389        ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5390        ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
5391        mutex_init(&ioc->tm_cmds.mutex);
5392
5393        /* config page internal command bits */
5394        ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5395        ioc->config_cmds.status = MPT3_CMD_NOT_USED;
5396        mutex_init(&ioc->config_cmds.mutex);
5397
5398        /* ctl module internal command bits */
5399        ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5400        ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5401        ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
5402        mutex_init(&ioc->ctl_cmds.mutex);
5403
5404        if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
5405            !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
5406            !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
5407            !ioc->ctl_cmds.sense) {
5408                r = -ENOMEM;
5409                goto out_free_resources;
5410        }
5411
5412        for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5413                ioc->event_masks[i] = -1;
5414
5415        /* here we enable the events we care about */
5416        _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
5417        _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
5418        _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
5419        _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5420        _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
5421        _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
5422        _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
5423        _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
5424        _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
5425        _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
5426        _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
5427
5428        r = _base_make_ioc_operational(ioc, CAN_SLEEP);
5429        if (r)
5430                goto out_free_resources;
5431
5432        ioc->non_operational_loop = 0;
5433        return 0;
5434
5435 out_free_resources:
5436
5437        ioc->remove_host = 1;
5438
5439        mpt3sas_base_free_resources(ioc);
5440        _base_release_memory_pools(ioc);
5441        pci_set_drvdata(ioc->pdev, NULL);
5442        kfree(ioc->cpu_msix_table);
5443        if (ioc->is_warpdrive)
5444                kfree(ioc->reply_post_host_index);
5445        kfree(ioc->pd_handles);
5446        kfree(ioc->blocking_handles);
5447        kfree(ioc->tm_cmds.reply);
5448        kfree(ioc->transport_cmds.reply);
5449        kfree(ioc->scsih_cmds.reply);
5450        kfree(ioc->config_cmds.reply);
5451        kfree(ioc->base_cmds.reply);
5452        kfree(ioc->port_enable_cmds.reply);
5453        kfree(ioc->ctl_cmds.reply);
5454        kfree(ioc->ctl_cmds.sense);
5455        kfree(ioc->pfacts);
5456        ioc->ctl_cmds.reply = NULL;
5457        ioc->base_cmds.reply = NULL;
5458        ioc->tm_cmds.reply = NULL;
5459        ioc->scsih_cmds.reply = NULL;
5460        ioc->transport_cmds.reply = NULL;
5461        ioc->config_cmds.reply = NULL;
5462        ioc->pfacts = NULL;
5463        return r;
5464}
5465
5466
5467/**
5468 * mpt3sas_base_detach - remove controller instance
5469 * @ioc: per adapter object
5470 *
5471 * Return nothing.
5472 */
5473void
5474mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
5475{
5476        dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5477            __func__));
5478
5479        mpt3sas_base_stop_watchdog(ioc);
5480        mpt3sas_base_free_resources(ioc);
5481        _base_release_memory_pools(ioc);
5482        pci_set_drvdata(ioc->pdev, NULL);
5483        kfree(ioc->cpu_msix_table);
5484        if (ioc->is_warpdrive)
5485                kfree(ioc->reply_post_host_index);
5486        kfree(ioc->pd_handles);
5487        kfree(ioc->blocking_handles);
5488        kfree(ioc->pfacts);
5489        kfree(ioc->ctl_cmds.reply);
5490        kfree(ioc->ctl_cmds.sense);
5491        kfree(ioc->base_cmds.reply);
5492        kfree(ioc->port_enable_cmds.reply);
5493        kfree(ioc->tm_cmds.reply);
5494        kfree(ioc->transport_cmds.reply);
5495        kfree(ioc->scsih_cmds.reply);
5496        kfree(ioc->config_cmds.reply);
5497}
5498
5499/**
5500 * _base_reset_handler - reset callback handler (for base)
5501 * @ioc: per adapter object
5502 * @reset_phase: phase
5503 *
5504 * The handler for doing any required cleanup or initialization.
5505 *
5506 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
5507 * MPT3_IOC_DONE_RESET
5508 *
5509 * Return nothing.
5510 */
5511static void
5512_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
5513{
5514        mpt3sas_scsih_reset_handler(ioc, reset_phase);
5515        mpt3sas_ctl_reset_handler(ioc, reset_phase);
5516        switch (reset_phase) {
5517        case MPT3_IOC_PRE_RESET:
5518                dtmprintk(ioc, pr_info(MPT3SAS_FMT
5519                "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
5520                break;
5521        case MPT3_IOC_AFTER_RESET:
5522                dtmprintk(ioc, pr_info(MPT3SAS_FMT
5523                "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
5524                if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
5525                        ioc->transport_cmds.status |= MPT3_CMD_RESET;
5526                        mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
5527                        complete(&ioc->transport_cmds.done);
5528                }
5529                if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
5530                        ioc->base_cmds.status |= MPT3_CMD_RESET;
5531                        mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
5532                        complete(&ioc->base_cmds.done);
5533                }
5534                if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5535                        ioc->port_enable_failed = 1;
5536                        ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
5537                        mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
5538                        if (ioc->is_driver_loading) {
5539                                ioc->start_scan_failed =
5540                                    MPI2_IOCSTATUS_INTERNAL_ERROR;
5541                                ioc->start_scan = 0;
5542                                ioc->port_enable_cmds.status =
5543                                    MPT3_CMD_NOT_USED;
5544                        } else
5545                                complete(&ioc->port_enable_cmds.done);
5546                }
5547                if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
5548                        ioc->config_cmds.status |= MPT3_CMD_RESET;
5549                        mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
5550                        ioc->config_cmds.smid = USHRT_MAX;
5551                        complete(&ioc->config_cmds.done);
5552                }
5553                break;
5554        case MPT3_IOC_DONE_RESET:
5555                dtmprintk(ioc, pr_info(MPT3SAS_FMT
5556                        "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
5557                break;
5558        }
5559}
5560
5561/**
5562 * _wait_for_commands_to_complete - reset controller
5563 * @ioc: Pointer to MPT_ADAPTER structure
5564 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5565 *
5566 * This function waiting(3s) for all pending commands to complete
5567 * prior to putting controller in reset.
5568 */
5569static void
5570_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5571{
5572        u32 ioc_state;
5573        unsigned long flags;
5574        u16 i;
5575
5576        ioc->pending_io_count = 0;
5577        if (sleep_flag != CAN_SLEEP)
5578                return;
5579
5580        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5581        if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
5582                return;
5583
5584        /* pending command count */
5585        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5586        for (i = 0; i < ioc->scsiio_depth; i++)
5587                if (ioc->scsi_lookup[i].cb_idx != 0xFF)
5588                        ioc->pending_io_count++;
5589        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5590
5591        if (!ioc->pending_io_count)
5592                return;
5593
5594        /* wait for pending commands to complete */
5595        wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
5596}
5597
5598/**
5599 * mpt3sas_base_hard_reset_handler - reset controller
5600 * @ioc: Pointer to MPT_ADAPTER structure
5601 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5602 * @type: FORCE_BIG_HAMMER or SOFT_RESET
5603 *
5604 * Returns 0 for success, non-zero for failure.
5605 */
5606int
5607mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5608        enum reset_type type)
5609{
5610        int r;
5611        unsigned long flags;
5612        u32 ioc_state;
5613        u8 is_fault = 0, is_trigger = 0;
5614
5615        dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
5616            __func__));
5617
5618        if (ioc->pci_error_recovery) {
5619                pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
5620                    ioc->name, __func__);
5621                r = 0;
5622                goto out_unlocked;
5623        }
5624
5625        if (mpt3sas_fwfault_debug)
5626                mpt3sas_halt_firmware(ioc);
5627
5628        /* TODO - What we really should be doing is pulling
5629         * out all the code associated with NO_SLEEP; its never used.
5630         * That is legacy code from mpt fusion driver, ported over.
5631         * I will leave this BUG_ON here for now till its been resolved.
5632         */
5633        BUG_ON(sleep_flag == NO_SLEEP);
5634
5635        /* wait for an active reset in progress to complete */
5636        if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
5637                do {
5638                        ssleep(1);
5639                } while (ioc->shost_recovery == 1);
5640                dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5641                    __func__));
5642                return ioc->ioc_reset_in_progress_status;
5643        }
5644
5645        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5646        ioc->shost_recovery = 1;
5647        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5648
5649        if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5650            MPT3_DIAG_BUFFER_IS_REGISTERED) &&
5651            (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5652            MPT3_DIAG_BUFFER_IS_RELEASED))) {
5653                is_trigger = 1;
5654                ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5655                if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
5656                        is_fault = 1;
5657        }
5658        _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
5659        _wait_for_commands_to_complete(ioc, sleep_flag);
5660        _base_mask_interrupts(ioc);
5661        r = _base_make_ioc_ready(ioc, sleep_flag, type);
5662        if (r)
5663                goto out;
5664        _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
5665
5666        /* If this hard reset is called while port enable is active, then
5667         * there is no reason to call make_ioc_operational
5668         */
5669        if (ioc->is_driver_loading && ioc->port_enable_failed) {
5670                ioc->remove_host = 1;
5671                r = -EFAULT;
5672                goto out;
5673        }
5674        r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5675        if (r)
5676                goto out;
5677
5678        if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
5679                panic("%s: Issue occurred with flashing controller firmware."
5680                      "Please reboot the system and ensure that the correct"
5681                      " firmware version is running\n", ioc->name);
5682
5683        r = _base_make_ioc_operational(ioc, sleep_flag);
5684        if (!r)
5685                _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
5686
5687 out:
5688        dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
5689            ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
5690
5691        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5692        ioc->ioc_reset_in_progress_status = r;
5693        ioc->shost_recovery = 0;
5694        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5695        ioc->ioc_reset_count++;
5696        mutex_unlock(&ioc->reset_in_progress_mutex);
5697
5698 out_unlocked:
5699        if ((r == 0) && is_trigger) {
5700                if (is_fault)
5701                        mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
5702                else
5703                        mpt3sas_trigger_master(ioc,
5704                            MASTER_TRIGGER_ADAPTER_RESET);
5705        }
5706        dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5707            __func__));
5708        return r;
5709}
5710