linux/drivers/scsi/mpt3sas/mpt3sas_ctl.c
<<
>>
Prefs
   1/*
   2 * Management Module Support for MPT (Message Passing Technology) based
   3 * controllers
   4 *
   5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
   6 * Copyright (C) 2012-2014  LSI Corporation
   7 * Copyright (C) 2013-2014 Avago Technologies
   8 *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * as published by the Free Software Foundation; either version 2
  13 * of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * NO WARRANTY
  21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  25 * solely responsible for determining the appropriateness of using and
  26 * distributing the Program and assumes all risks associated with its
  27 * exercise of rights under this Agreement, including but not limited to
  28 * the risks and costs of program errors, damage to or loss of data,
  29 * programs or equipment, and unavailability or interruption of operations.
  30
  31 * DISCLAIMER OF LIABILITY
  32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  39
  40 * You should have received a copy of the GNU General Public License
  41 * along with this program; if not, write to the Free Software
  42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
  43 * USA.
  44 */
  45
  46#include <linux/kernel.h>
  47#include <linux/module.h>
  48#include <linux/errno.h>
  49#include <linux/init.h>
  50#include <linux/slab.h>
  51#include <linux/types.h>
  52#include <linux/pci.h>
  53#include <linux/delay.h>
  54#include <linux/compat.h>
  55#include <linux/poll.h>
  56
  57#include <linux/io.h>
  58#include <linux/uaccess.h>
  59
  60#include "mpt3sas_base.h"
  61#include "mpt3sas_ctl.h"
  62
  63
  64static struct fasync_struct *async_queue;
  65static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
  66
  67
  68/**
  69 * enum block_state - blocking state
  70 * @NON_BLOCKING: non blocking
  71 * @BLOCKING: blocking
  72 *
  73 * These states are for ioctls that need to wait for a response
  74 * from firmware, so they probably require sleep.
  75 */
  76enum block_state {
  77        NON_BLOCKING,
  78        BLOCKING,
  79};
  80
  81/**
  82 * _ctl_display_some_debug - debug routine
  83 * @ioc: per adapter object
  84 * @smid: system request message index
  85 * @calling_function_name: string pass from calling function
  86 * @mpi_reply: reply message frame
  87 * Context: none.
  88 *
  89 * Function for displaying debug info helpful when debugging issues
  90 * in this module.
  91 */
  92static void
  93_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  94        char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
  95{
  96        Mpi2ConfigRequest_t *mpi_request;
  97        char *desc = NULL;
  98
  99        if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
 100                return;
 101
 102        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 103        switch (mpi_request->Function) {
 104        case MPI2_FUNCTION_SCSI_IO_REQUEST:
 105        {
 106                Mpi2SCSIIORequest_t *scsi_request =
 107                    (Mpi2SCSIIORequest_t *)mpi_request;
 108
 109                snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
 110                    "scsi_io, cmd(0x%02x), cdb_len(%d)",
 111                    scsi_request->CDB.CDB32[0],
 112                    le16_to_cpu(scsi_request->IoFlags) & 0xF);
 113                desc = ioc->tmp_string;
 114                break;
 115        }
 116        case MPI2_FUNCTION_SCSI_TASK_MGMT:
 117                desc = "task_mgmt";
 118                break;
 119        case MPI2_FUNCTION_IOC_INIT:
 120                desc = "ioc_init";
 121                break;
 122        case MPI2_FUNCTION_IOC_FACTS:
 123                desc = "ioc_facts";
 124                break;
 125        case MPI2_FUNCTION_CONFIG:
 126        {
 127                Mpi2ConfigRequest_t *config_request =
 128                    (Mpi2ConfigRequest_t *)mpi_request;
 129
 130                snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
 131                    "config, type(0x%02x), ext_type(0x%02x), number(%d)",
 132                    (config_request->Header.PageType &
 133                     MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
 134                    config_request->Header.PageNumber);
 135                desc = ioc->tmp_string;
 136                break;
 137        }
 138        case MPI2_FUNCTION_PORT_FACTS:
 139                desc = "port_facts";
 140                break;
 141        case MPI2_FUNCTION_PORT_ENABLE:
 142                desc = "port_enable";
 143                break;
 144        case MPI2_FUNCTION_EVENT_NOTIFICATION:
 145                desc = "event_notification";
 146                break;
 147        case MPI2_FUNCTION_FW_DOWNLOAD:
 148                desc = "fw_download";
 149                break;
 150        case MPI2_FUNCTION_FW_UPLOAD:
 151                desc = "fw_upload";
 152                break;
 153        case MPI2_FUNCTION_RAID_ACTION:
 154                desc = "raid_action";
 155                break;
 156        case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
 157        {
 158                Mpi2SCSIIORequest_t *scsi_request =
 159                    (Mpi2SCSIIORequest_t *)mpi_request;
 160
 161                snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
 162                    "raid_pass, cmd(0x%02x), cdb_len(%d)",
 163                    scsi_request->CDB.CDB32[0],
 164                    le16_to_cpu(scsi_request->IoFlags) & 0xF);
 165                desc = ioc->tmp_string;
 166                break;
 167        }
 168        case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
 169                desc = "sas_iounit_cntl";
 170                break;
 171        case MPI2_FUNCTION_SATA_PASSTHROUGH:
 172                desc = "sata_pass";
 173                break;
 174        case MPI2_FUNCTION_DIAG_BUFFER_POST:
 175                desc = "diag_buffer_post";
 176                break;
 177        case MPI2_FUNCTION_DIAG_RELEASE:
 178                desc = "diag_release";
 179                break;
 180        case MPI2_FUNCTION_SMP_PASSTHROUGH:
 181                desc = "smp_passthrough";
 182                break;
 183        case MPI2_FUNCTION_TOOLBOX:
 184                desc = "toolbox";
 185                break;
 186        case MPI2_FUNCTION_NVME_ENCAPSULATED:
 187                desc = "nvme_encapsulated";
 188                break;
 189        }
 190
 191        if (!desc)
 192                return;
 193
 194        ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
 195
 196        if (!mpi_reply)
 197                return;
 198
 199        if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
 200                ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
 201                         le16_to_cpu(mpi_reply->IOCStatus),
 202                         le32_to_cpu(mpi_reply->IOCLogInfo));
 203
 204        if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 205            mpi_request->Function ==
 206            MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
 207                Mpi2SCSIIOReply_t *scsi_reply =
 208                    (Mpi2SCSIIOReply_t *)mpi_reply;
 209                struct _sas_device *sas_device = NULL;
 210                struct _pcie_device *pcie_device = NULL;
 211
 212                sas_device = mpt3sas_get_sdev_by_handle(ioc,
 213                    le16_to_cpu(scsi_reply->DevHandle));
 214                if (sas_device) {
 215                        ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
 216                                 (u64)sas_device->sas_address,
 217                                 sas_device->phy);
 218                        ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
 219                                 (u64)sas_device->enclosure_logical_id,
 220                                 sas_device->slot);
 221                        sas_device_put(sas_device);
 222                }
 223                if (!sas_device) {
 224                        pcie_device = mpt3sas_get_pdev_by_handle(ioc,
 225                                le16_to_cpu(scsi_reply->DevHandle));
 226                        if (pcie_device) {
 227                                ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
 228                                         (unsigned long long)pcie_device->wwid,
 229                                         pcie_device->port_num);
 230                                if (pcie_device->enclosure_handle != 0)
 231                                        ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
 232                                                 (u64)pcie_device->enclosure_logical_id,
 233                                                 pcie_device->slot);
 234                                pcie_device_put(pcie_device);
 235                        }
 236                }
 237                if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
 238                        ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
 239                                 scsi_reply->SCSIState,
 240                                 scsi_reply->SCSIStatus);
 241        }
 242}
 243
 244/**
 245 * mpt3sas_ctl_done - ctl module completion routine
 246 * @ioc: per adapter object
 247 * @smid: system request message index
 248 * @msix_index: MSIX table index supplied by the OS
 249 * @reply: reply message frame(lower 32bit addr)
 250 * Context: none.
 251 *
 252 * The callback handler when using ioc->ctl_cb_idx.
 253 *
 254 * Return: 1 meaning mf should be freed from _base_interrupt
 255 *         0 means the mf is freed from this function.
 256 */
 257u8
 258mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 259        u32 reply)
 260{
 261        MPI2DefaultReply_t *mpi_reply;
 262        Mpi2SCSIIOReply_t *scsiio_reply;
 263        Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
 264        const void *sense_data;
 265        u32 sz;
 266
 267        if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
 268                return 1;
 269        if (ioc->ctl_cmds.smid != smid)
 270                return 1;
 271        ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
 272        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 273        if (mpi_reply) {
 274                memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
 275                ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
 276                /* get sense data */
 277                if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 278                    mpi_reply->Function ==
 279                    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
 280                        scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
 281                        if (scsiio_reply->SCSIState &
 282                            MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 283                                sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
 284                                    le32_to_cpu(scsiio_reply->SenseCount));
 285                                sense_data = mpt3sas_base_get_sense_buffer(ioc,
 286                                    smid);
 287                                memcpy(ioc->ctl_cmds.sense, sense_data, sz);
 288                        }
 289                }
 290                /*
 291                 * Get Error Response data for NVMe device. The ctl_cmds.sense
 292                 * buffer is used to store the Error Response data.
 293                 */
 294                if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
 295                        nvme_error_reply =
 296                            (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
 297                        sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
 298                            le16_to_cpu(nvme_error_reply->ErrorResponseCount));
 299                        sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
 300                        memcpy(ioc->ctl_cmds.sense, sense_data, sz);
 301                }
 302        }
 303
 304        _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
 305        ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
 306        complete(&ioc->ctl_cmds.done);
 307        return 1;
 308}
 309
 310/**
 311 * _ctl_check_event_type - determines when an event needs logging
 312 * @ioc: per adapter object
 313 * @event: firmware event
 314 *
 315 * The bitmask in ioc->event_type[] indicates which events should be
 316 * be saved in the driver event_log.  This bitmask is set by application.
 317 *
 318 * Return: 1 when event should be captured, or zero means no match.
 319 */
 320static int
 321_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
 322{
 323        u16 i;
 324        u32 desired_event;
 325
 326        if (event >= 128 || !event || !ioc->event_log)
 327                return 0;
 328
 329        desired_event = (1 << (event % 32));
 330        if (!desired_event)
 331                desired_event = 1;
 332        i = event / 32;
 333        return desired_event & ioc->event_type[i];
 334}
 335
 336/**
 337 * mpt3sas_ctl_add_to_event_log - add event
 338 * @ioc: per adapter object
 339 * @mpi_reply: reply message frame
 340 */
 341void
 342mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
 343        Mpi2EventNotificationReply_t *mpi_reply)
 344{
 345        struct MPT3_IOCTL_EVENTS *event_log;
 346        u16 event;
 347        int i;
 348        u32 sz, event_data_sz;
 349        u8 send_aen = 0;
 350
 351        if (!ioc->event_log)
 352                return;
 353
 354        event = le16_to_cpu(mpi_reply->Event);
 355
 356        if (_ctl_check_event_type(ioc, event)) {
 357
 358                /* insert entry into circular event_log */
 359                i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
 360                event_log = ioc->event_log;
 361                event_log[i].event = event;
 362                event_log[i].context = ioc->event_context++;
 363
 364                event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
 365                sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
 366                memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
 367                memcpy(event_log[i].data, mpi_reply->EventData, sz);
 368                send_aen = 1;
 369        }
 370
 371        /* This aen_event_read_flag flag is set until the
 372         * application has read the event log.
 373         * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
 374         */
 375        if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
 376            (send_aen && !ioc->aen_event_read_flag)) {
 377                ioc->aen_event_read_flag = 1;
 378                wake_up_interruptible(&ctl_poll_wait);
 379                if (async_queue)
 380                        kill_fasync(&async_queue, SIGIO, POLL_IN);
 381        }
 382}
 383
 384/**
 385 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
 386 * @ioc: per adapter object
 387 * @msix_index: MSIX table index supplied by the OS
 388 * @reply: reply message frame(lower 32bit addr)
 389 * Context: interrupt.
 390 *
 391 * This function merely adds a new work task into ioc->firmware_event_thread.
 392 * The tasks are worked from _firmware_event_work in user context.
 393 *
 394 * Return: 1 meaning mf should be freed from _base_interrupt
 395 *         0 means the mf is freed from this function.
 396 */
 397u8
 398mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
 399        u32 reply)
 400{
 401        Mpi2EventNotificationReply_t *mpi_reply;
 402
 403        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 404        if (mpi_reply)
 405                mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
 406        return 1;
 407}
 408
 409/**
 410 * _ctl_verify_adapter - validates ioc_number passed from application
 411 * @ioc_number: ?
 412 * @iocpp: The ioc pointer is returned in this.
 413 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
 414 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
 415 *
 416 * Return: (-1) means error, else ioc_number.
 417 */
 418static int
 419_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
 420                                                        int mpi_version)
 421{
 422        struct MPT3SAS_ADAPTER *ioc;
 423        int version = 0;
 424        /* global ioc lock to protect controller on list operations */
 425        spin_lock(&gioc_lock);
 426        list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
 427                if (ioc->id != ioc_number)
 428                        continue;
 429                /* Check whether this ioctl command is from right
 430                 * ioctl device or not, if not continue the search.
 431                 */
 432                version = ioc->hba_mpi_version_belonged;
 433                /* MPI25_VERSION and MPI26_VERSION uses same ioctl
 434                 * device.
 435                 */
 436                if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
 437                        if ((version == MPI25_VERSION) ||
 438                                (version == MPI26_VERSION))
 439                                goto out;
 440                        else
 441                                continue;
 442                } else {
 443                        if (version != mpi_version)
 444                                continue;
 445                }
 446out:
 447                spin_unlock(&gioc_lock);
 448                *iocpp = ioc;
 449                return ioc_number;
 450        }
 451        spin_unlock(&gioc_lock);
 452        *iocpp = NULL;
 453        return -1;
 454}
 455
 456/**
 457 * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl)
 458 * @ioc: per adapter object
 459 *
 460 * The handler for doing any required cleanup or initialization.
 461 */
 462void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 463{
 464        int i;
 465        u8 issue_reset;
 466
 467        dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
 468        for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
 469                if (!(ioc->diag_buffer_status[i] &
 470                      MPT3_DIAG_BUFFER_IS_REGISTERED))
 471                        continue;
 472                if ((ioc->diag_buffer_status[i] &
 473                     MPT3_DIAG_BUFFER_IS_RELEASED))
 474                        continue;
 475
 476                /*
 477                 * add a log message to indicate the release
 478                 */
 479                ioc_info(ioc,
 480                    "%s: Releasing the trace buffer due to adapter reset.",
 481                    __func__);
 482                ioc->htb_rel.buffer_rel_condition =
 483                    MPT3_DIAG_BUFFER_REL_TRIGGER;
 484                mpt3sas_send_diag_release(ioc, i, &issue_reset);
 485        }
 486}
 487
 488/**
 489 * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd.
 490 * @ioc: per adapter object
 491 *
 492 * The handler for doing any required cleanup or initialization.
 493 */
 494void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
 495{
 496        dtmprintk(ioc,
 497            ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__));
 498        if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
 499                ioc->ctl_cmds.status |= MPT3_CMD_RESET;
 500                mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
 501                complete(&ioc->ctl_cmds.done);
 502        }
 503}
 504
 505/**
 506 * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl)
 507 * @ioc: per adapter object
 508 *
 509 * The handler for doing any required cleanup or initialization.
 510 */
 511void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
 512{
 513        int i;
 514
 515        dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
 516
 517        for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
 518                if (!(ioc->diag_buffer_status[i] &
 519                      MPT3_DIAG_BUFFER_IS_REGISTERED))
 520                        continue;
 521                if ((ioc->diag_buffer_status[i] &
 522                     MPT3_DIAG_BUFFER_IS_RELEASED))
 523                        continue;
 524                ioc->diag_buffer_status[i] |=
 525                        MPT3_DIAG_BUFFER_IS_DIAG_RESET;
 526        }
 527}
 528
 529/**
 530 * _ctl_fasync -
 531 * @fd: ?
 532 * @filep: ?
 533 * @mode: ?
 534 *
 535 * Called when application request fasyn callback handler.
 536 */
 537static int
 538_ctl_fasync(int fd, struct file *filep, int mode)
 539{
 540        return fasync_helper(fd, filep, mode, &async_queue);
 541}
 542
 543/**
 544 * _ctl_poll -
 545 * @filep: ?
 546 * @wait: ?
 547 *
 548 */
 549static __poll_t
 550_ctl_poll(struct file *filep, poll_table *wait)
 551{
 552        struct MPT3SAS_ADAPTER *ioc;
 553
 554        poll_wait(filep, &ctl_poll_wait, wait);
 555
 556        /* global ioc lock to protect controller on list operations */
 557        spin_lock(&gioc_lock);
 558        list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
 559                if (ioc->aen_event_read_flag) {
 560                        spin_unlock(&gioc_lock);
 561                        return EPOLLIN | EPOLLRDNORM;
 562                }
 563        }
 564        spin_unlock(&gioc_lock);
 565        return 0;
 566}
 567
 568/**
 569 * _ctl_set_task_mid - assign an active smid to tm request
 570 * @ioc: per adapter object
 571 * @karg: (struct mpt3_ioctl_command)
 572 * @tm_request: pointer to mf from user space
 573 *
 574 * Return: 0 when an smid if found, else fail.
 575 * during failure, the reply frame is filled.
 576 */
 577static int
 578_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
 579        Mpi2SCSITaskManagementRequest_t *tm_request)
 580{
 581        u8 found = 0;
 582        u16 smid;
 583        u16 handle;
 584        struct scsi_cmnd *scmd;
 585        struct MPT3SAS_DEVICE *priv_data;
 586        Mpi2SCSITaskManagementReply_t *tm_reply;
 587        u32 sz;
 588        u32 lun;
 589        char *desc = NULL;
 590
 591        if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
 592                desc = "abort_task";
 593        else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
 594                desc = "query_task";
 595        else
 596                return 0;
 597
 598        lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
 599
 600        handle = le16_to_cpu(tm_request->DevHandle);
 601        for (smid = ioc->scsiio_depth; smid && !found; smid--) {
 602                struct scsiio_tracker *st;
 603
 604                scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
 605                if (!scmd)
 606                        continue;
 607                if (lun != scmd->device->lun)
 608                        continue;
 609                priv_data = scmd->device->hostdata;
 610                if (priv_data->sas_target == NULL)
 611                        continue;
 612                if (priv_data->sas_target->handle != handle)
 613                        continue;
 614                st = scsi_cmd_priv(scmd);
 615
 616                /*
 617                 * If the given TaskMID from the user space is zero, then the
 618                 * first outstanding smid will be picked up.  Otherwise,
 619                 * targeted smid will be the one.
 620                 */
 621                if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
 622                        tm_request->TaskMID = cpu_to_le16(st->smid);
 623                        found = 1;
 624                }
 625        }
 626
 627        if (!found) {
 628                dctlprintk(ioc,
 629                           ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
 630                                    desc, le16_to_cpu(tm_request->DevHandle),
 631                                    lun));
 632                tm_reply = ioc->ctl_cmds.reply;
 633                tm_reply->DevHandle = tm_request->DevHandle;
 634                tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 635                tm_reply->TaskType = tm_request->TaskType;
 636                tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
 637                tm_reply->VP_ID = tm_request->VP_ID;
 638                tm_reply->VF_ID = tm_request->VF_ID;
 639                sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
 640                if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
 641                    sz))
 642                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
 643                            __LINE__, __func__);
 644                return 1;
 645        }
 646
 647        dctlprintk(ioc,
 648                   ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
 649                            desc, le16_to_cpu(tm_request->DevHandle), lun,
 650                            le16_to_cpu(tm_request->TaskMID)));
 651        return 0;
 652}
 653
 654/**
 655 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
 656 * @ioc: per adapter object
 657 * @karg: (struct mpt3_ioctl_command)
 658 * @mf: pointer to mf in user space
 659 */
 660static long
 661_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 662        void __user *mf)
 663{
 664        MPI2RequestHeader_t *mpi_request = NULL, *request;
 665        MPI2DefaultReply_t *mpi_reply;
 666        Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
 667        struct _pcie_device *pcie_device = NULL;
 668        u16 smid;
 669        unsigned long timeout;
 670        u8 issue_reset;
 671        u32 sz, sz_arg;
 672        void *psge;
 673        void *data_out = NULL;
 674        dma_addr_t data_out_dma = 0;
 675        size_t data_out_sz = 0;
 676        void *data_in = NULL;
 677        dma_addr_t data_in_dma = 0;
 678        size_t data_in_sz = 0;
 679        long ret;
 680        u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
 681
 682        issue_reset = 0;
 683
 684        if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
 685                ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
 686                ret = -EAGAIN;
 687                goto out;
 688        }
 689
 690        ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
 691        if (ret)
 692                goto out;
 693
 694        mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
 695        if (!mpi_request) {
 696                ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
 697                        __func__);
 698                ret = -ENOMEM;
 699                goto out;
 700        }
 701
 702        /* Check for overflow and wraparound */
 703        if (karg.data_sge_offset * 4 > ioc->request_sz ||
 704            karg.data_sge_offset > (UINT_MAX / 4)) {
 705                ret = -EINVAL;
 706                goto out;
 707        }
 708
 709        /* copy in request message frame from user */
 710        if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
 711                pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
 712                    __func__);
 713                ret = -EFAULT;
 714                goto out;
 715        }
 716
 717        if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
 718                smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
 719                if (!smid) {
 720                        ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 721                        ret = -EAGAIN;
 722                        goto out;
 723                }
 724        } else {
 725                /* Use first reserved smid for passthrough ioctls */
 726                smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
 727        }
 728
 729        ret = 0;
 730        ioc->ctl_cmds.status = MPT3_CMD_PENDING;
 731        memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
 732        request = mpt3sas_base_get_msg_frame(ioc, smid);
 733        memset(request, 0, ioc->request_sz);
 734        memcpy(request, mpi_request, karg.data_sge_offset*4);
 735        ioc->ctl_cmds.smid = smid;
 736        data_out_sz = karg.data_out_size;
 737        data_in_sz = karg.data_in_size;
 738
 739        if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 740            mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
 741            mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
 742            mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
 743            mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
 744
 745                device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
 746                if (!device_handle || (device_handle >
 747                    ioc->facts.MaxDevHandle)) {
 748                        ret = -EINVAL;
 749                        mpt3sas_base_free_smid(ioc, smid);
 750                        goto out;
 751                }
 752        }
 753
 754        /* obtain dma-able memory for data transfer */
 755        if (data_out_sz) /* WRITE */ {
 756                data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
 757                                &data_out_dma, GFP_KERNEL);
 758                if (!data_out) {
 759                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
 760                            __LINE__, __func__);
 761                        ret = -ENOMEM;
 762                        mpt3sas_base_free_smid(ioc, smid);
 763                        goto out;
 764                }
 765                if (copy_from_user(data_out, karg.data_out_buf_ptr,
 766                        data_out_sz)) {
 767                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
 768                            __LINE__, __func__);
 769                        ret =  -EFAULT;
 770                        mpt3sas_base_free_smid(ioc, smid);
 771                        goto out;
 772                }
 773        }
 774
 775        if (data_in_sz) /* READ */ {
 776                data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
 777                                &data_in_dma, GFP_KERNEL);
 778                if (!data_in) {
 779                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
 780                            __LINE__, __func__);
 781                        ret = -ENOMEM;
 782                        mpt3sas_base_free_smid(ioc, smid);
 783                        goto out;
 784                }
 785        }
 786
 787        psge = (void *)request + (karg.data_sge_offset*4);
 788
 789        /* send command to firmware */
 790        _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
 791
 792        init_completion(&ioc->ctl_cmds.done);
 793        switch (mpi_request->Function) {
 794        case MPI2_FUNCTION_NVME_ENCAPSULATED:
 795        {
 796                nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
 797                if (!ioc->pcie_sg_lookup) {
 798                        dtmprintk(ioc, ioc_info(ioc,
 799                            "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
 800                            ));
 801
 802                        if (ioc->logging_level & MPT_DEBUG_TM)
 803                                _debug_dump_mf(nvme_encap_request,
 804                                    ioc->request_sz/4);
 805                        mpt3sas_base_free_smid(ioc, smid);
 806                        ret = -EINVAL;
 807                        goto out;
 808                }
 809                /*
 810                 * Get the Physical Address of the sense buffer.
 811                 * Use Error Response buffer address field to hold the sense
 812                 * buffer address.
 813                 * Clear the internal sense buffer, which will potentially hold
 814                 * the Completion Queue Entry on return, or 0 if no Entry.
 815                 * Build the PRPs and set direction bits.
 816                 * Send the request.
 817                 */
 818                nvme_encap_request->ErrorResponseBaseAddress =
 819                    cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
 820                nvme_encap_request->ErrorResponseBaseAddress |=
 821                   cpu_to_le64(le32_to_cpu(
 822                   mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
 823                nvme_encap_request->ErrorResponseAllocationLength =
 824                                        cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
 825                memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
 826                ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
 827                    data_out_dma, data_out_sz, data_in_dma, data_in_sz);
 828                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
 829                        dtmprintk(ioc,
 830                                  ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
 831                                           device_handle));
 832                        mpt3sas_base_free_smid(ioc, smid);
 833                        ret = -EINVAL;
 834                        goto out;
 835                }
 836                mpt3sas_base_put_smid_nvme_encap(ioc, smid);
 837                break;
 838        }
 839        case MPI2_FUNCTION_SCSI_IO_REQUEST:
 840        case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
 841        {
 842                Mpi2SCSIIORequest_t *scsiio_request =
 843                    (Mpi2SCSIIORequest_t *)request;
 844                scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
 845                scsiio_request->SenseBufferLowAddress =
 846                    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
 847                memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
 848                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
 849                        dtmprintk(ioc,
 850                                  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
 851                                           device_handle));
 852                        mpt3sas_base_free_smid(ioc, smid);
 853                        ret = -EINVAL;
 854                        goto out;
 855                }
 856                ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
 857                    data_in_dma, data_in_sz);
 858                if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
 859                        ioc->put_smid_scsi_io(ioc, smid, device_handle);
 860                else
 861                        ioc->put_smid_default(ioc, smid);
 862                break;
 863        }
 864        case MPI2_FUNCTION_SCSI_TASK_MGMT:
 865        {
 866                Mpi2SCSITaskManagementRequest_t *tm_request =
 867                    (Mpi2SCSITaskManagementRequest_t *)request;
 868
 869                dtmprintk(ioc,
 870                          ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
 871                                   le16_to_cpu(tm_request->DevHandle),
 872                                   tm_request->TaskType));
 873                ioc->got_task_abort_from_ioctl = 1;
 874                if (tm_request->TaskType ==
 875                    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
 876                    tm_request->TaskType ==
 877                    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
 878                        if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
 879                                mpt3sas_base_free_smid(ioc, smid);
 880                                ioc->got_task_abort_from_ioctl = 0;
 881                                goto out;
 882                        }
 883                }
 884                ioc->got_task_abort_from_ioctl = 0;
 885
 886                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
 887                        dtmprintk(ioc,
 888                                  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
 889                                           device_handle));
 890                        mpt3sas_base_free_smid(ioc, smid);
 891                        ret = -EINVAL;
 892                        goto out;
 893                }
 894                mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
 895                    tm_request->DevHandle));
 896                ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
 897                    data_in_dma, data_in_sz);
 898                ioc->put_smid_hi_priority(ioc, smid, 0);
 899                break;
 900        }
 901        case MPI2_FUNCTION_SMP_PASSTHROUGH:
 902        {
 903                Mpi2SmpPassthroughRequest_t *smp_request =
 904                    (Mpi2SmpPassthroughRequest_t *)mpi_request;
 905                u8 *data;
 906
 907                if (!ioc->multipath_on_hba) {
 908                        /* ioc determines which port to use */
 909                        smp_request->PhysicalPort = 0xFF;
 910                }
 911                if (smp_request->PassthroughFlags &
 912                    MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
 913                        data = (u8 *)&smp_request->SGL;
 914                else {
 915                        if (unlikely(data_out == NULL)) {
 916                                pr_err("failure at %s:%d/%s()!\n",
 917                                    __FILE__, __LINE__, __func__);
 918                                mpt3sas_base_free_smid(ioc, smid);
 919                                ret = -EINVAL;
 920                                goto out;
 921                        }
 922                        data = data_out;
 923                }
 924
 925                if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
 926                        ioc->ioc_link_reset_in_progress = 1;
 927                        ioc->ignore_loginfos = 1;
 928                }
 929                ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 930                    data_in_sz);
 931                ioc->put_smid_default(ioc, smid);
 932                break;
 933        }
 934        case MPI2_FUNCTION_SATA_PASSTHROUGH:
 935        {
 936                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
 937                        dtmprintk(ioc,
 938                                  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
 939                                           device_handle));
 940                        mpt3sas_base_free_smid(ioc, smid);
 941                        ret = -EINVAL;
 942                        goto out;
 943                }
 944                ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 945                    data_in_sz);
 946                ioc->put_smid_default(ioc, smid);
 947                break;
 948        }
 949        case MPI2_FUNCTION_FW_DOWNLOAD:
 950        case MPI2_FUNCTION_FW_UPLOAD:
 951        {
 952                ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 953                    data_in_sz);
 954                ioc->put_smid_default(ioc, smid);
 955                break;
 956        }
 957        case MPI2_FUNCTION_TOOLBOX:
 958        {
 959                Mpi2ToolboxCleanRequest_t *toolbox_request =
 960                        (Mpi2ToolboxCleanRequest_t *)mpi_request;
 961
 962                if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
 963                    || (toolbox_request->Tool ==
 964                    MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN))
 965                        ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
 966                                data_in_dma, data_in_sz);
 967                else if (toolbox_request->Tool ==
 968                                MPI2_TOOLBOX_MEMORY_MOVE_TOOL) {
 969                        Mpi2ToolboxMemMoveRequest_t *mem_move_request =
 970                                        (Mpi2ToolboxMemMoveRequest_t *)request;
 971                        Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL;
 972
 973                        ioc->build_sg_mpi(ioc, psge, data_out_dma,
 974                                        data_out_sz, data_in_dma, data_in_sz);
 975                        if (data_out_sz && !data_in_sz) {
 976                                dst =
 977                                    (Mpi2SGESimple64_t *)&mem_move_request->SGL;
 978                                src = (void *)dst + ioc->sge_size;
 979
 980                                memcpy(&tmp, src, ioc->sge_size);
 981                                memcpy(src, dst, ioc->sge_size);
 982                                memcpy(dst, &tmp, ioc->sge_size);
 983                        }
 984                        if (ioc->logging_level & MPT_DEBUG_TM) {
 985                                ioc_info(ioc,
 986                                  "Mpi2ToolboxMemMoveRequest_t request msg\n");
 987                                _debug_dump_mf(mem_move_request,
 988                                                        ioc->request_sz/4);
 989                        }
 990                } else
 991                        ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
 992                            data_in_dma, data_in_sz);
 993                ioc->put_smid_default(ioc, smid);
 994                break;
 995        }
 996        case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
 997        {
 998                Mpi2SasIoUnitControlRequest_t *sasiounit_request =
 999                    (Mpi2SasIoUnitControlRequest_t *)mpi_request;
1000
1001                if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
1002                    || sasiounit_request->Operation ==
1003                    MPI2_SAS_OP_PHY_LINK_RESET) {
1004                        ioc->ioc_link_reset_in_progress = 1;
1005                        ioc->ignore_loginfos = 1;
1006                }
1007                /* drop to default case for posting the request */
1008        }
1009                fallthrough;
1010        default:
1011                ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
1012                    data_in_dma, data_in_sz);
1013                ioc->put_smid_default(ioc, smid);
1014                break;
1015        }
1016
1017        if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
1018                timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
1019        else
1020                timeout = karg.timeout;
1021        wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
1022        if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
1023                Mpi2SCSITaskManagementRequest_t *tm_request =
1024                    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
1025                mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
1026                    tm_request->DevHandle));
1027                mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
1028        } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
1029            mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
1030                ioc->ioc_link_reset_in_progress) {
1031                ioc->ioc_link_reset_in_progress = 0;
1032                ioc->ignore_loginfos = 0;
1033        }
1034        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1035                mpt3sas_check_cmd_timeout(ioc,
1036                    ioc->ctl_cmds.status, mpi_request,
1037                    karg.data_sge_offset, issue_reset);
1038                goto issue_host_reset;
1039        }
1040
1041        mpi_reply = ioc->ctl_cmds.reply;
1042
1043        if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
1044            (ioc->logging_level & MPT_DEBUG_TM)) {
1045                Mpi2SCSITaskManagementReply_t *tm_reply =
1046                    (Mpi2SCSITaskManagementReply_t *)mpi_reply;
1047
1048                ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
1049                         le16_to_cpu(tm_reply->IOCStatus),
1050                         le32_to_cpu(tm_reply->IOCLogInfo),
1051                         le32_to_cpu(tm_reply->TerminationCount));
1052        }
1053
1054        /* copy out xdata to user */
1055        if (data_in_sz) {
1056                if (copy_to_user(karg.data_in_buf_ptr, data_in,
1057                    data_in_sz)) {
1058                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
1059                            __LINE__, __func__);
1060                        ret = -ENODATA;
1061                        goto out;
1062                }
1063        }
1064
1065        /* copy out reply message frame to user */
1066        if (karg.max_reply_bytes) {
1067                sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
1068                if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
1069                    sz)) {
1070                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
1071                            __LINE__, __func__);
1072                        ret = -ENODATA;
1073                        goto out;
1074                }
1075        }
1076
1077        /* copy out sense/NVMe Error Response to user */
1078        if (karg.max_sense_bytes && (mpi_request->Function ==
1079            MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1080            MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1081            MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1082                if (karg.sense_data_ptr == NULL) {
1083                        ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
1084                        goto out;
1085                }
1086                sz_arg = (mpi_request->Function ==
1087                MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
1088                                                        SCSI_SENSE_BUFFERSIZE;
1089                sz = min_t(u32, karg.max_sense_bytes, sz_arg);
1090                if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1091                    sz)) {
1092                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
1093                                __LINE__, __func__);
1094                        ret = -ENODATA;
1095                        goto out;
1096                }
1097        }
1098
1099 issue_host_reset:
1100        if (issue_reset) {
1101                ret = -ENODATA;
1102                if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
1103                    mpi_request->Function ==
1104                    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1105                    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1106                        ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
1107                                 le16_to_cpu(mpi_request->FunctionDependent1));
1108                        mpt3sas_halt_firmware(ioc);
1109                        pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1110                                le16_to_cpu(mpi_request->FunctionDependent1));
1111                        if (pcie_device && (!ioc->tm_custom_handling) &&
1112                            (!(mpt3sas_scsih_is_pcie_scsi_device(
1113                            pcie_device->device_info))))
1114                                mpt3sas_scsih_issue_locked_tm(ioc,
1115                                  le16_to_cpu(mpi_request->FunctionDependent1),
1116                                  0, 0, 0,
1117                                  MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1118                                  0, pcie_device->reset_timeout,
1119                        MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
1120                        else
1121                                mpt3sas_scsih_issue_locked_tm(ioc,
1122                                  le16_to_cpu(mpi_request->FunctionDependent1),
1123                                  0, 0, 0,
1124                                  MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1125                                  0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
1126                } else
1127                        mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1128        }
1129
1130 out:
1131        if (pcie_device)
1132                pcie_device_put(pcie_device);
1133
1134        /* free memory associated with sg buffers */
1135        if (data_in)
1136                dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
1137                    data_in_dma);
1138
1139        if (data_out)
1140                dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
1141                    data_out_dma);
1142
1143        kfree(mpi_request);
1144        ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1145        return ret;
1146}
1147
1148/**
1149 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
1150 * @ioc: per adapter object
1151 * @arg: user space buffer containing ioctl content
1152 */
1153static long
1154_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1155{
1156        struct mpt3_ioctl_iocinfo karg;
1157
1158        dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1159                                 __func__));
1160
1161        memset(&karg, 0 , sizeof(karg));
1162        if (ioc->pfacts)
1163                karg.port_number = ioc->pfacts[0].PortNumber;
1164        karg.hw_rev = ioc->pdev->revision;
1165        karg.pci_id = ioc->pdev->device;
1166        karg.subsystem_device = ioc->pdev->subsystem_device;
1167        karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1168        karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1169        karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1170        karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1171        karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1172        karg.firmware_version = ioc->facts.FWVersion.Word;
1173        strcpy(karg.driver_version, ioc->driver_name);
1174        strcat(karg.driver_version, "-");
1175        switch  (ioc->hba_mpi_version_belonged) {
1176        case MPI2_VERSION:
1177                if (ioc->is_warpdrive)
1178                        karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1179                else
1180                        karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1181                strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
1182                break;
1183        case MPI25_VERSION:
1184        case MPI26_VERSION:
1185                if (ioc->is_gen35_ioc)
1186                        karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1187                else
1188                        karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1189                strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1190                break;
1191        }
1192        karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1193
1194        if (copy_to_user(arg, &karg, sizeof(karg))) {
1195                pr_err("failure at %s:%d/%s()!\n",
1196                    __FILE__, __LINE__, __func__);
1197                return -EFAULT;
1198        }
1199        return 0;
1200}
1201
1202/**
1203 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
1204 * @ioc: per adapter object
1205 * @arg: user space buffer containing ioctl content
1206 */
1207static long
1208_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1209{
1210        struct mpt3_ioctl_eventquery karg;
1211
1212        if (copy_from_user(&karg, arg, sizeof(karg))) {
1213                pr_err("failure at %s:%d/%s()!\n",
1214                    __FILE__, __LINE__, __func__);
1215                return -EFAULT;
1216        }
1217
1218        dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1219                                 __func__));
1220
1221        karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1222        memcpy(karg.event_types, ioc->event_type,
1223            MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1224
1225        if (copy_to_user(arg, &karg, sizeof(karg))) {
1226                pr_err("failure at %s:%d/%s()!\n",
1227                    __FILE__, __LINE__, __func__);
1228                return -EFAULT;
1229        }
1230        return 0;
1231}
1232
1233/**
1234 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
1235 * @ioc: per adapter object
1236 * @arg: user space buffer containing ioctl content
1237 */
1238static long
1239_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1240{
1241        struct mpt3_ioctl_eventenable karg;
1242
1243        if (copy_from_user(&karg, arg, sizeof(karg))) {
1244                pr_err("failure at %s:%d/%s()!\n",
1245                    __FILE__, __LINE__, __func__);
1246                return -EFAULT;
1247        }
1248
1249        dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1250                                 __func__));
1251
1252        memcpy(ioc->event_type, karg.event_types,
1253            MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1254        mpt3sas_base_validate_event_type(ioc, ioc->event_type);
1255
1256        if (ioc->event_log)
1257                return 0;
1258        /* initialize event_log */
1259        ioc->event_context = 0;
1260        ioc->aen_event_read_flag = 0;
1261        ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
1262            sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
1263        if (!ioc->event_log) {
1264                pr_err("failure at %s:%d/%s()!\n",
1265                    __FILE__, __LINE__, __func__);
1266                return -ENOMEM;
1267        }
1268        return 0;
1269}
1270
1271/**
1272 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
1273 * @ioc: per adapter object
1274 * @arg: user space buffer containing ioctl content
1275 */
1276static long
1277_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1278{
1279        struct mpt3_ioctl_eventreport karg;
1280        u32 number_bytes, max_events, max;
1281        struct mpt3_ioctl_eventreport __user *uarg = arg;
1282
1283        if (copy_from_user(&karg, arg, sizeof(karg))) {
1284                pr_err("failure at %s:%d/%s()!\n",
1285                    __FILE__, __LINE__, __func__);
1286                return -EFAULT;
1287        }
1288
1289        dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1290                                 __func__));
1291
1292        number_bytes = karg.hdr.max_data_size -
1293            sizeof(struct mpt3_ioctl_header);
1294        max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
1295        max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
1296
1297        /* If fewer than 1 event is requested, there must have
1298         * been some type of error.
1299         */
1300        if (!max || !ioc->event_log)
1301                return -ENODATA;
1302
1303        number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
1304        if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1305                pr_err("failure at %s:%d/%s()!\n",
1306                    __FILE__, __LINE__, __func__);
1307                return -EFAULT;
1308        }
1309
1310        /* reset flag so SIGIO can restart */
1311        ioc->aen_event_read_flag = 0;
1312        return 0;
1313}
1314
1315/**
1316 * _ctl_do_reset - main handler for MPT3HARDRESET opcode
1317 * @ioc: per adapter object
1318 * @arg: user space buffer containing ioctl content
1319 */
1320static long
1321_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1322{
1323        struct mpt3_ioctl_diag_reset karg;
1324        int retval;
1325
1326        if (copy_from_user(&karg, arg, sizeof(karg))) {
1327                pr_err("failure at %s:%d/%s()!\n",
1328                    __FILE__, __LINE__, __func__);
1329                return -EFAULT;
1330        }
1331
1332        if (ioc->shost_recovery || ioc->pci_error_recovery ||
1333            ioc->is_driver_loading)
1334                return -EAGAIN;
1335
1336        dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1337                                 __func__));
1338
1339        ioc->reset_from_user = 1;
1340        retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1341        ioc_info(ioc,
1342            "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
1343        return 0;
1344}
1345
1346/**
1347 * _ctl_btdh_search_sas_device - searching for sas device
1348 * @ioc: per adapter object
1349 * @btdh: btdh ioctl payload
1350 */
1351static int
1352_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1353        struct mpt3_ioctl_btdh_mapping *btdh)
1354{
1355        struct _sas_device *sas_device;
1356        unsigned long flags;
1357        int rc = 0;
1358
1359        if (list_empty(&ioc->sas_device_list))
1360                return rc;
1361
1362        spin_lock_irqsave(&ioc->sas_device_lock, flags);
1363        list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1364                if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1365                    btdh->handle == sas_device->handle) {
1366                        btdh->bus = sas_device->channel;
1367                        btdh->id = sas_device->id;
1368                        rc = 1;
1369                        goto out;
1370                } else if (btdh->bus == sas_device->channel && btdh->id ==
1371                    sas_device->id && btdh->handle == 0xFFFF) {
1372                        btdh->handle = sas_device->handle;
1373                        rc = 1;
1374                        goto out;
1375                }
1376        }
1377 out:
1378        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1379        return rc;
1380}
1381
1382/**
1383 * _ctl_btdh_search_pcie_device - searching for pcie device
1384 * @ioc: per adapter object
1385 * @btdh: btdh ioctl payload
1386 */
1387static int
1388_ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
1389        struct mpt3_ioctl_btdh_mapping *btdh)
1390{
1391        struct _pcie_device *pcie_device;
1392        unsigned long flags;
1393        int rc = 0;
1394
1395        if (list_empty(&ioc->pcie_device_list))
1396                return rc;
1397
1398        spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1399        list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1400                if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1401                           btdh->handle == pcie_device->handle) {
1402                        btdh->bus = pcie_device->channel;
1403                        btdh->id = pcie_device->id;
1404                        rc = 1;
1405                        goto out;
1406                } else if (btdh->bus == pcie_device->channel && btdh->id ==
1407                           pcie_device->id && btdh->handle == 0xFFFF) {
1408                        btdh->handle = pcie_device->handle;
1409                        rc = 1;
1410                        goto out;
1411                }
1412        }
1413 out:
1414        spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1415        return rc;
1416}
1417
1418/**
1419 * _ctl_btdh_search_raid_device - searching for raid device
1420 * @ioc: per adapter object
1421 * @btdh: btdh ioctl payload
1422 */
1423static int
1424_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
1425        struct mpt3_ioctl_btdh_mapping *btdh)
1426{
1427        struct _raid_device *raid_device;
1428        unsigned long flags;
1429        int rc = 0;
1430
1431        if (list_empty(&ioc->raid_device_list))
1432                return rc;
1433
1434        spin_lock_irqsave(&ioc->raid_device_lock, flags);
1435        list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1436                if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1437                    btdh->handle == raid_device->handle) {
1438                        btdh->bus = raid_device->channel;
1439                        btdh->id = raid_device->id;
1440                        rc = 1;
1441                        goto out;
1442                } else if (btdh->bus == raid_device->channel && btdh->id ==
1443                    raid_device->id && btdh->handle == 0xFFFF) {
1444                        btdh->handle = raid_device->handle;
1445                        rc = 1;
1446                        goto out;
1447                }
1448        }
1449 out:
1450        spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1451        return rc;
1452}
1453
1454/**
1455 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
1456 * @ioc: per adapter object
1457 * @arg: user space buffer containing ioctl content
1458 */
1459static long
1460_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1461{
1462        struct mpt3_ioctl_btdh_mapping karg;
1463        int rc;
1464
1465        if (copy_from_user(&karg, arg, sizeof(karg))) {
1466                pr_err("failure at %s:%d/%s()!\n",
1467                    __FILE__, __LINE__, __func__);
1468                return -EFAULT;
1469        }
1470
1471        dctlprintk(ioc, ioc_info(ioc, "%s\n",
1472                                 __func__));
1473
1474        rc = _ctl_btdh_search_sas_device(ioc, &karg);
1475        if (!rc)
1476                rc = _ctl_btdh_search_pcie_device(ioc, &karg);
1477        if (!rc)
1478                _ctl_btdh_search_raid_device(ioc, &karg);
1479
1480        if (copy_to_user(arg, &karg, sizeof(karg))) {
1481                pr_err("failure at %s:%d/%s()!\n",
1482                    __FILE__, __LINE__, __func__);
1483                return -EFAULT;
1484        }
1485        return 0;
1486}
1487
1488/**
1489 * _ctl_diag_capability - return diag buffer capability
1490 * @ioc: per adapter object
1491 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1492 *
1493 * returns 1 when diag buffer support is enabled in firmware
1494 */
1495static u8
1496_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
1497{
1498        u8 rc = 0;
1499
1500        switch (buffer_type) {
1501        case MPI2_DIAG_BUF_TYPE_TRACE:
1502                if (ioc->facts.IOCCapabilities &
1503                    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1504                        rc = 1;
1505                break;
1506        case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1507                if (ioc->facts.IOCCapabilities &
1508                    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1509                        rc = 1;
1510                break;
1511        case MPI2_DIAG_BUF_TYPE_EXTENDED:
1512                if (ioc->facts.IOCCapabilities &
1513                    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1514                        rc = 1;
1515        }
1516
1517        return rc;
1518}
1519
1520/**
1521 * _ctl_diag_get_bufftype - return diag buffer type
1522 *              either TRACE, SNAPSHOT, or EXTENDED
1523 * @ioc: per adapter object
1524 * @unique_id: specifies the unique_id for the buffer
1525 *
1526 * returns MPT3_DIAG_UID_NOT_FOUND if the id not found
1527 */
1528static u8
1529_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
1530{
1531        u8  index;
1532
1533        for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
1534                if (ioc->unique_id[index] == unique_id)
1535                        return index;
1536        }
1537
1538        return MPT3_DIAG_UID_NOT_FOUND;
1539}
1540
1541/**
1542 * _ctl_diag_register_2 - wrapper for registering diag buffer support
1543 * @ioc: per adapter object
1544 * @diag_register: the diag_register struct passed in from user space
1545 *
1546 */
1547static long
1548_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1549        struct mpt3_diag_register *diag_register)
1550{
1551        int rc, i;
1552        void *request_data = NULL;
1553        dma_addr_t request_data_dma;
1554        u32 request_data_sz = 0;
1555        Mpi2DiagBufferPostRequest_t *mpi_request;
1556        Mpi2DiagBufferPostReply_t *mpi_reply;
1557        u8 buffer_type;
1558        u16 smid;
1559        u16 ioc_status;
1560        u32 ioc_state;
1561        u8 issue_reset = 0;
1562
1563        dctlprintk(ioc, ioc_info(ioc, "%s\n",
1564                                 __func__));
1565
1566        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1567        if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1568                ioc_err(ioc, "%s: failed due to ioc not operational\n",
1569                        __func__);
1570                rc = -EAGAIN;
1571                goto out;
1572        }
1573
1574        if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1575                ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
1576                rc = -EAGAIN;
1577                goto out;
1578        }
1579
1580        buffer_type = diag_register->buffer_type;
1581        if (!_ctl_diag_capability(ioc, buffer_type)) {
1582                ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1583                        __func__, buffer_type);
1584                return -EPERM;
1585        }
1586
1587        if (diag_register->unique_id == 0) {
1588                ioc_err(ioc,
1589                    "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
1590                    diag_register->unique_id, buffer_type);
1591                return -EINVAL;
1592        }
1593
1594        if ((ioc->diag_buffer_status[buffer_type] &
1595            MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
1596            !(ioc->diag_buffer_status[buffer_type] &
1597            MPT3_DIAG_BUFFER_IS_RELEASED)) {
1598                ioc_err(ioc,
1599                    "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
1600                    __func__, buffer_type, ioc->unique_id[buffer_type]);
1601                return -EINVAL;
1602        }
1603
1604        if (ioc->diag_buffer_status[buffer_type] &
1605            MPT3_DIAG_BUFFER_IS_REGISTERED) {
1606                /*
1607                 * If driver posts buffer initially, then an application wants
1608                 * to Register that buffer (own it) without Releasing first,
1609                 * the application Register command MUST have the same buffer
1610                 * type and size in the Register command (obtained from the
1611                 * Query command). Otherwise that Register command will be
1612                 * failed. If the application has released the buffer but wants
1613                 * to re-register it, it should be allowed as long as the
1614                 * Unique-Id/Size match.
1615                 */
1616
1617                if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
1618                    ioc->diag_buffer_sz[buffer_type] ==
1619                    diag_register->requested_buffer_size) {
1620
1621                        if (!(ioc->diag_buffer_status[buffer_type] &
1622                             MPT3_DIAG_BUFFER_IS_RELEASED)) {
1623                                dctlprintk(ioc, ioc_info(ioc,
1624                                    "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
1625                                    __func__, buffer_type,
1626                                    ioc->unique_id[buffer_type],
1627                                    diag_register->unique_id));
1628
1629                                /*
1630                                 * Application wants to own the buffer with
1631                                 * the same size.
1632                                 */
1633                                ioc->unique_id[buffer_type] =
1634                                    diag_register->unique_id;
1635                                rc = 0; /* success */
1636                                goto out;
1637                        }
1638                } else if (ioc->unique_id[buffer_type] !=
1639                    MPT3DIAGBUFFUNIQUEID) {
1640                        if (ioc->unique_id[buffer_type] !=
1641                            diag_register->unique_id ||
1642                            ioc->diag_buffer_sz[buffer_type] !=
1643                            diag_register->requested_buffer_size ||
1644                            !(ioc->diag_buffer_status[buffer_type] &
1645                            MPT3_DIAG_BUFFER_IS_RELEASED)) {
1646                                ioc_err(ioc,
1647                                    "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1648                                    __func__, buffer_type);
1649                                return -EINVAL;
1650                        }
1651                } else {
1652                        ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1653                            __func__, buffer_type);
1654                        return -EINVAL;
1655                }
1656        } else if (ioc->diag_buffer_status[buffer_type] &
1657            MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
1658
1659                if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
1660                    ioc->diag_buffer_sz[buffer_type] !=
1661                    diag_register->requested_buffer_size) {
1662
1663                        ioc_err(ioc,
1664                            "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
1665                             __func__, buffer_type,
1666                            ioc->diag_buffer_sz[buffer_type]);
1667                        return -EINVAL;
1668                }
1669        }
1670
1671        if (diag_register->requested_buffer_size % 4)  {
1672                ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
1673                        __func__);
1674                return -EINVAL;
1675        }
1676
1677        smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1678        if (!smid) {
1679                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1680                rc = -EAGAIN;
1681                goto out;
1682        }
1683
1684        rc = 0;
1685        ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1686        memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1687        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1688        ioc->ctl_cmds.smid = smid;
1689
1690        request_data = ioc->diag_buffer[buffer_type];
1691        request_data_sz = diag_register->requested_buffer_size;
1692        ioc->unique_id[buffer_type] = diag_register->unique_id;
1693        /* Reset ioc variables used for additional query commands */
1694        ioc->reset_from_user = 0;
1695        memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query));
1696        ioc->diag_buffer_status[buffer_type] &=
1697            MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1698        memcpy(ioc->product_specific[buffer_type],
1699            diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
1700        ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1701
1702        if (request_data) {
1703                request_data_dma = ioc->diag_buffer_dma[buffer_type];
1704                if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1705                        dma_free_coherent(&ioc->pdev->dev,
1706                                        ioc->diag_buffer_sz[buffer_type],
1707                                        request_data, request_data_dma);
1708                        request_data = NULL;
1709                }
1710        }
1711
1712        if (request_data == NULL) {
1713                ioc->diag_buffer_sz[buffer_type] = 0;
1714                ioc->diag_buffer_dma[buffer_type] = 0;
1715                request_data = dma_alloc_coherent(&ioc->pdev->dev,
1716                                request_data_sz, &request_data_dma, GFP_KERNEL);
1717                if (request_data == NULL) {
1718                        ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
1719                                __func__, request_data_sz);
1720                        mpt3sas_base_free_smid(ioc, smid);
1721                        rc = -ENOMEM;
1722                        goto out;
1723                }
1724                ioc->diag_buffer[buffer_type] = request_data;
1725                ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1726                ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1727        }
1728
1729        mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1730        mpi_request->BufferType = diag_register->buffer_type;
1731        mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1732        mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1733        mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1734        mpi_request->VF_ID = 0; /* TODO */
1735        mpi_request->VP_ID = 0;
1736
1737        dctlprintk(ioc,
1738                   ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1739                            __func__, request_data,
1740                            (unsigned long long)request_data_dma,
1741                            le32_to_cpu(mpi_request->BufferLength)));
1742
1743        for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1744                mpi_request->ProductSpecific[i] =
1745                        cpu_to_le32(ioc->product_specific[buffer_type][i]);
1746
1747        init_completion(&ioc->ctl_cmds.done);
1748        ioc->put_smid_default(ioc, smid);
1749        wait_for_completion_timeout(&ioc->ctl_cmds.done,
1750            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1751
1752        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1753                mpt3sas_check_cmd_timeout(ioc,
1754                    ioc->ctl_cmds.status, mpi_request,
1755                    sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
1756                goto issue_host_reset;
1757        }
1758
1759        /* process the completed Reply Message Frame */
1760        if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1761                ioc_err(ioc, "%s: no reply message\n", __func__);
1762                rc = -EFAULT;
1763                goto out;
1764        }
1765
1766        mpi_reply = ioc->ctl_cmds.reply;
1767        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1768
1769        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1770                ioc->diag_buffer_status[buffer_type] |=
1771                        MPT3_DIAG_BUFFER_IS_REGISTERED;
1772                dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
1773        } else {
1774                ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1775                         __func__,
1776                         ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1777                rc = -EFAULT;
1778        }
1779
1780 issue_host_reset:
1781        if (issue_reset)
1782                mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1783
1784 out:
1785
1786        if (rc && request_data) {
1787                dma_free_coherent(&ioc->pdev->dev, request_data_sz,
1788                    request_data, request_data_dma);
1789                ioc->diag_buffer_status[buffer_type] &=
1790                    ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1791        }
1792
1793        ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1794        return rc;
1795}
1796
1797/**
1798 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
1799 * @ioc: per adapter object
1800 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1801 *
1802 * This is called when command line option diag_buffer_enable is enabled
1803 * at driver load time.
1804 */
1805void
1806mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1807{
1808        struct mpt3_diag_register diag_register;
1809        u32 ret_val;
1810        u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
1811        u32 min_trace_buff_size = 0;
1812        u32 decr_trace_buff_size = 0;
1813
1814        memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1815
1816        if (bits_to_register & 1) {
1817                ioc_info(ioc, "registering trace buffer support\n");
1818                ioc->diag_trigger_master.MasterData =
1819                    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1820                diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1821                diag_register.unique_id =
1822                    (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
1823                    (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
1824
1825                if (trace_buff_size != 0) {
1826                        diag_register.requested_buffer_size = trace_buff_size;
1827                        min_trace_buff_size =
1828                            ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
1829                        decr_trace_buff_size =
1830                            ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
1831
1832                        if (min_trace_buff_size > trace_buff_size) {
1833                                /* The buff size is not set correctly */
1834                                ioc_err(ioc,
1835                                    "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
1836                                     min_trace_buff_size>>10,
1837                                     trace_buff_size>>10);
1838                                ioc_err(ioc,
1839                                    "Using zero Min Trace Buff Size\n");
1840                                min_trace_buff_size = 0;
1841                        }
1842
1843                        if (decr_trace_buff_size == 0) {
1844                                /*
1845                                 * retry the min size if decrement
1846                                 * is not available.
1847                                 */
1848                                decr_trace_buff_size =
1849                                    trace_buff_size - min_trace_buff_size;
1850                        }
1851                } else {
1852                        /* register for 2MB buffers  */
1853                        diag_register.requested_buffer_size = 2 * (1024 * 1024);
1854                }
1855
1856                do {
1857                        ret_val = _ctl_diag_register_2(ioc,  &diag_register);
1858
1859                        if (ret_val == -ENOMEM && min_trace_buff_size &&
1860                            (trace_buff_size - decr_trace_buff_size) >=
1861                            min_trace_buff_size) {
1862                                /* adjust the buffer size */
1863                                trace_buff_size -= decr_trace_buff_size;
1864                                diag_register.requested_buffer_size =
1865                                    trace_buff_size;
1866                        } else
1867                                break;
1868                } while (true);
1869
1870                if (ret_val == -ENOMEM)
1871                        ioc_err(ioc,
1872                            "Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
1873                            diag_register.requested_buffer_size>>10);
1874                else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
1875                    & MPT3_DIAG_BUFFER_IS_REGISTERED) {
1876                        ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
1877                            diag_register.requested_buffer_size>>10);
1878                        if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
1879                                ioc->diag_buffer_status[
1880                                    MPI2_DIAG_BUF_TYPE_TRACE] |=
1881                                    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1882                }
1883        }
1884
1885        if (bits_to_register & 2) {
1886                ioc_info(ioc, "registering snapshot buffer support\n");
1887                diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1888                /* register for 2MB buffers  */
1889                diag_register.requested_buffer_size = 2 * (1024 * 1024);
1890                diag_register.unique_id = 0x7075901;
1891                _ctl_diag_register_2(ioc,  &diag_register);
1892        }
1893
1894        if (bits_to_register & 4) {
1895                ioc_info(ioc, "registering extended buffer support\n");
1896                diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1897                /* register for 2MB buffers  */
1898                diag_register.requested_buffer_size = 2 * (1024 * 1024);
1899                diag_register.unique_id = 0x7075901;
1900                _ctl_diag_register_2(ioc,  &diag_register);
1901        }
1902}
1903
1904/**
1905 * _ctl_diag_register - application register with driver
1906 * @ioc: per adapter object
1907 * @arg: user space buffer containing ioctl content
1908 *
1909 * This will allow the driver to setup any required buffers that will be
1910 * needed by firmware to communicate with the driver.
1911 */
1912static long
1913_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1914{
1915        struct mpt3_diag_register karg;
1916        long rc;
1917
1918        if (copy_from_user(&karg, arg, sizeof(karg))) {
1919                pr_err("failure at %s:%d/%s()!\n",
1920                    __FILE__, __LINE__, __func__);
1921                return -EFAULT;
1922        }
1923
1924        rc = _ctl_diag_register_2(ioc, &karg);
1925
1926        if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
1927            MPT3_DIAG_BUFFER_IS_REGISTERED))
1928                ioc->diag_buffer_status[karg.buffer_type] |=
1929                    MPT3_DIAG_BUFFER_IS_APP_OWNED;
1930
1931        return rc;
1932}
1933
1934/**
1935 * _ctl_diag_unregister - application unregister with driver
1936 * @ioc: per adapter object
1937 * @arg: user space buffer containing ioctl content
1938 *
1939 * This will allow the driver to cleanup any memory allocated for diag
1940 * messages and to free up any resources.
1941 */
1942static long
1943_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1944{
1945        struct mpt3_diag_unregister karg;
1946        void *request_data;
1947        dma_addr_t request_data_dma;
1948        u32 request_data_sz;
1949        u8 buffer_type;
1950
1951        if (copy_from_user(&karg, arg, sizeof(karg))) {
1952                pr_err("failure at %s:%d/%s()!\n",
1953                    __FILE__, __LINE__, __func__);
1954                return -EFAULT;
1955        }
1956
1957        dctlprintk(ioc, ioc_info(ioc, "%s\n",
1958                                 __func__));
1959
1960        buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
1961        if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
1962                ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
1963                    __func__, karg.unique_id);
1964                return -EINVAL;
1965        }
1966
1967        if (!_ctl_diag_capability(ioc, buffer_type)) {
1968                ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1969                        __func__, buffer_type);
1970                return -EPERM;
1971        }
1972
1973        if ((ioc->diag_buffer_status[buffer_type] &
1974            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1975                ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
1976                        __func__, buffer_type);
1977                return -EINVAL;
1978        }
1979        if ((ioc->diag_buffer_status[buffer_type] &
1980            MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
1981                ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
1982                        __func__, buffer_type);
1983                return -EINVAL;
1984        }
1985
1986        if (karg.unique_id != ioc->unique_id[buffer_type]) {
1987                ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
1988                        __func__, karg.unique_id);
1989                return -EINVAL;
1990        }
1991
1992        request_data = ioc->diag_buffer[buffer_type];
1993        if (!request_data) {
1994                ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1995                        __func__, buffer_type);
1996                return -ENOMEM;
1997        }
1998
1999        if (ioc->diag_buffer_status[buffer_type] &
2000            MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
2001                ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
2002                ioc->diag_buffer_status[buffer_type] &=
2003                    ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
2004                ioc->diag_buffer_status[buffer_type] &=
2005                    ~MPT3_DIAG_BUFFER_IS_REGISTERED;
2006        } else {
2007                request_data_sz = ioc->diag_buffer_sz[buffer_type];
2008                request_data_dma = ioc->diag_buffer_dma[buffer_type];
2009                dma_free_coherent(&ioc->pdev->dev, request_data_sz,
2010                                request_data, request_data_dma);
2011                ioc->diag_buffer[buffer_type] = NULL;
2012                ioc->diag_buffer_status[buffer_type] = 0;
2013        }
2014        return 0;
2015}
2016
2017/**
2018 * _ctl_diag_query - query relevant info associated with diag buffers
2019 * @ioc: per adapter object
2020 * @arg: user space buffer containing ioctl content
2021 *
2022 * The application will send only buffer_type and unique_id.  Driver will
2023 * inspect unique_id first, if valid, fill in all the info.  If unique_id is
2024 * 0x00, the driver will return info specified by Buffer Type.
2025 */
2026static long
2027_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2028{
2029        struct mpt3_diag_query karg;
2030        void *request_data;
2031        int i;
2032        u8 buffer_type;
2033
2034        if (copy_from_user(&karg, arg, sizeof(karg))) {
2035                pr_err("failure at %s:%d/%s()!\n",
2036                    __FILE__, __LINE__, __func__);
2037                return -EFAULT;
2038        }
2039
2040        dctlprintk(ioc, ioc_info(ioc, "%s\n",
2041                                 __func__));
2042
2043        karg.application_flags = 0;
2044        buffer_type = karg.buffer_type;
2045
2046        if (!_ctl_diag_capability(ioc, buffer_type)) {
2047                ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2048                        __func__, buffer_type);
2049                return -EPERM;
2050        }
2051
2052        if (!(ioc->diag_buffer_status[buffer_type] &
2053            MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
2054                if ((ioc->diag_buffer_status[buffer_type] &
2055                    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2056                        ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2057                                __func__, buffer_type);
2058                        return -EINVAL;
2059                }
2060        }
2061
2062        if (karg.unique_id) {
2063                if (karg.unique_id != ioc->unique_id[buffer_type]) {
2064                        ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2065                                __func__, karg.unique_id);
2066                        return -EINVAL;
2067                }
2068        }
2069
2070        request_data = ioc->diag_buffer[buffer_type];
2071        if (!request_data) {
2072                ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2073                        __func__, buffer_type);
2074                return -ENOMEM;
2075        }
2076
2077        if ((ioc->diag_buffer_status[buffer_type] &
2078            MPT3_DIAG_BUFFER_IS_REGISTERED))
2079                karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
2080
2081        if (!(ioc->diag_buffer_status[buffer_type] &
2082             MPT3_DIAG_BUFFER_IS_RELEASED))
2083                karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
2084
2085        if (!(ioc->diag_buffer_status[buffer_type] &
2086            MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
2087                karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
2088
2089        if ((ioc->diag_buffer_status[buffer_type] &
2090            MPT3_DIAG_BUFFER_IS_APP_OWNED))
2091                karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
2092
2093        for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2094                karg.product_specific[i] =
2095                    ioc->product_specific[buffer_type][i];
2096
2097        karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
2098        karg.driver_added_buffer_size = 0;
2099        karg.unique_id = ioc->unique_id[buffer_type];
2100        karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
2101
2102        if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
2103                ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
2104                        __func__, arg);
2105                return -EFAULT;
2106        }
2107        return 0;
2108}
2109
2110/**
2111 * mpt3sas_send_diag_release - Diag Release Message
2112 * @ioc: per adapter object
2113 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
2114 * @issue_reset: specifies whether host reset is required.
2115 *
2116 */
2117int
2118mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
2119        u8 *issue_reset)
2120{
2121        Mpi2DiagReleaseRequest_t *mpi_request;
2122        Mpi2DiagReleaseReply_t *mpi_reply;
2123        u16 smid;
2124        u16 ioc_status;
2125        u32 ioc_state;
2126        int rc;
2127        u8 reset_needed = 0;
2128
2129        dctlprintk(ioc, ioc_info(ioc, "%s\n",
2130                                 __func__));
2131
2132        rc = 0;
2133        *issue_reset = 0;
2134
2135
2136        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
2137        if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2138                if (ioc->diag_buffer_status[buffer_type] &
2139                    MPT3_DIAG_BUFFER_IS_REGISTERED)
2140                        ioc->diag_buffer_status[buffer_type] |=
2141                            MPT3_DIAG_BUFFER_IS_RELEASED;
2142                dctlprintk(ioc,
2143                           ioc_info(ioc, "%s: skipping due to FAULT state\n",
2144                                    __func__));
2145                rc = -EAGAIN;
2146                goto out;
2147        }
2148
2149        if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2150                ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2151                rc = -EAGAIN;
2152                goto out;
2153        }
2154
2155        smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2156        if (!smid) {
2157                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2158                rc = -EAGAIN;
2159                goto out;
2160        }
2161
2162        ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2163        memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2164        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2165        ioc->ctl_cmds.smid = smid;
2166
2167        mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
2168        mpi_request->BufferType = buffer_type;
2169        mpi_request->VF_ID = 0; /* TODO */
2170        mpi_request->VP_ID = 0;
2171
2172        init_completion(&ioc->ctl_cmds.done);
2173        ioc->put_smid_default(ioc, smid);
2174        wait_for_completion_timeout(&ioc->ctl_cmds.done,
2175            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2176
2177        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2178                mpt3sas_check_cmd_timeout(ioc,
2179                    ioc->ctl_cmds.status, mpi_request,
2180                    sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
2181                *issue_reset = reset_needed;
2182                rc = -EFAULT;
2183                goto out;
2184        }
2185
2186        /* process the completed Reply Message Frame */
2187        if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2188                ioc_err(ioc, "%s: no reply message\n", __func__);
2189                rc = -EFAULT;
2190                goto out;
2191        }
2192
2193        mpi_reply = ioc->ctl_cmds.reply;
2194        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2195
2196        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2197                ioc->diag_buffer_status[buffer_type] |=
2198                    MPT3_DIAG_BUFFER_IS_RELEASED;
2199                dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
2200        } else {
2201                ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2202                         __func__,
2203                         ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2204                rc = -EFAULT;
2205        }
2206
2207 out:
2208        ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2209        return rc;
2210}
2211
2212/**
2213 * _ctl_diag_release - request to send Diag Release Message to firmware
2214 * @ioc: ?
2215 * @arg: user space buffer containing ioctl content
2216 *
2217 * This allows ownership of the specified buffer to returned to the driver,
2218 * allowing an application to read the buffer without fear that firmware is
2219 * overwriting information in the buffer.
2220 */
2221static long
2222_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2223{
2224        struct mpt3_diag_release karg;
2225        void *request_data;
2226        int rc;
2227        u8 buffer_type;
2228        u8 issue_reset = 0;
2229
2230        if (copy_from_user(&karg, arg, sizeof(karg))) {
2231                pr_err("failure at %s:%d/%s()!\n",
2232                    __FILE__, __LINE__, __func__);
2233                return -EFAULT;
2234        }
2235
2236        dctlprintk(ioc, ioc_info(ioc, "%s\n",
2237                                 __func__));
2238
2239        buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2240        if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2241                ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2242                    __func__, karg.unique_id);
2243                return -EINVAL;
2244        }
2245
2246        if (!_ctl_diag_capability(ioc, buffer_type)) {
2247                ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2248                        __func__, buffer_type);
2249                return -EPERM;
2250        }
2251
2252        if ((ioc->diag_buffer_status[buffer_type] &
2253            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2254                ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2255                        __func__, buffer_type);
2256                return -EINVAL;
2257        }
2258
2259        if (karg.unique_id != ioc->unique_id[buffer_type]) {
2260                ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2261                        __func__, karg.unique_id);
2262                return -EINVAL;
2263        }
2264
2265        if (ioc->diag_buffer_status[buffer_type] &
2266            MPT3_DIAG_BUFFER_IS_RELEASED) {
2267                ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
2268                        __func__, buffer_type);
2269                return -EINVAL;
2270        }
2271
2272        request_data = ioc->diag_buffer[buffer_type];
2273
2274        if (!request_data) {
2275                ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2276                        __func__, buffer_type);
2277                return -ENOMEM;
2278        }
2279
2280        /* buffers were released by due to host reset */
2281        if ((ioc->diag_buffer_status[buffer_type] &
2282            MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
2283                ioc->diag_buffer_status[buffer_type] |=
2284                    MPT3_DIAG_BUFFER_IS_RELEASED;
2285                ioc->diag_buffer_status[buffer_type] &=
2286                    ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
2287                ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
2288                        __func__, buffer_type);
2289                return 0;
2290        }
2291
2292        rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
2293
2294        if (issue_reset)
2295                mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2296
2297        return rc;
2298}
2299
2300/**
2301 * _ctl_diag_read_buffer - request for copy of the diag buffer
2302 * @ioc: per adapter object
2303 * @arg: user space buffer containing ioctl content
2304 */
2305static long
2306_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2307{
2308        struct mpt3_diag_read_buffer karg;
2309        struct mpt3_diag_read_buffer __user *uarg = arg;
2310        void *request_data, *diag_data;
2311        Mpi2DiagBufferPostRequest_t *mpi_request;
2312        Mpi2DiagBufferPostReply_t *mpi_reply;
2313        int rc, i;
2314        u8 buffer_type;
2315        unsigned long request_size, copy_size;
2316        u16 smid;
2317        u16 ioc_status;
2318        u8 issue_reset = 0;
2319
2320        if (copy_from_user(&karg, arg, sizeof(karg))) {
2321                pr_err("failure at %s:%d/%s()!\n",
2322                    __FILE__, __LINE__, __func__);
2323                return -EFAULT;
2324        }
2325
2326        dctlprintk(ioc, ioc_info(ioc, "%s\n",
2327                                 __func__));
2328
2329        buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2330        if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2331                ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2332                    __func__, karg.unique_id);
2333                return -EINVAL;
2334        }
2335
2336        if (!_ctl_diag_capability(ioc, buffer_type)) {
2337                ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2338                        __func__, buffer_type);
2339                return -EPERM;
2340        }
2341
2342        if (karg.unique_id != ioc->unique_id[buffer_type]) {
2343                ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2344                        __func__, karg.unique_id);
2345                return -EINVAL;
2346        }
2347
2348        request_data = ioc->diag_buffer[buffer_type];
2349        if (!request_data) {
2350                ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2351                        __func__, buffer_type);
2352                return -ENOMEM;
2353        }
2354
2355        request_size = ioc->diag_buffer_sz[buffer_type];
2356
2357        if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2358                ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
2359                        __func__);
2360                return -EINVAL;
2361        }
2362
2363        if (karg.starting_offset > request_size)
2364                return -EINVAL;
2365
2366        diag_data = (void *)(request_data + karg.starting_offset);
2367        dctlprintk(ioc,
2368                   ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2369                            __func__, diag_data, karg.starting_offset,
2370                            karg.bytes_to_read));
2371
2372        /* Truncate data on requests that are too large */
2373        if ((diag_data + karg.bytes_to_read < diag_data) ||
2374            (diag_data + karg.bytes_to_read > request_data + request_size))
2375                copy_size = request_size - karg.starting_offset;
2376        else
2377                copy_size = karg.bytes_to_read;
2378
2379        if (copy_to_user((void __user *)uarg->diagnostic_data,
2380            diag_data, copy_size)) {
2381                ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2382                        __func__, diag_data);
2383                return -EFAULT;
2384        }
2385
2386        if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2387                return 0;
2388
2389        dctlprintk(ioc,
2390                   ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
2391                            __func__, buffer_type));
2392        if ((ioc->diag_buffer_status[buffer_type] &
2393            MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2394                dctlprintk(ioc,
2395                           ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
2396                                    __func__, buffer_type));
2397                return 0;
2398        }
2399        /* Get a free request frame and save the message context.
2400        */
2401
2402        if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2403                ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2404                rc = -EAGAIN;
2405                goto out;
2406        }
2407
2408        smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2409        if (!smid) {
2410                ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2411                rc = -EAGAIN;
2412                goto out;
2413        }
2414
2415        rc = 0;
2416        ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2417        memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2418        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2419        ioc->ctl_cmds.smid = smid;
2420
2421        mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
2422        mpi_request->BufferType = buffer_type;
2423        mpi_request->BufferLength =
2424            cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2425        mpi_request->BufferAddress =
2426            cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2427        for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2428                mpi_request->ProductSpecific[i] =
2429                        cpu_to_le32(ioc->product_specific[buffer_type][i]);
2430        mpi_request->VF_ID = 0; /* TODO */
2431        mpi_request->VP_ID = 0;
2432
2433        init_completion(&ioc->ctl_cmds.done);
2434        ioc->put_smid_default(ioc, smid);
2435        wait_for_completion_timeout(&ioc->ctl_cmds.done,
2436            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2437
2438        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2439                mpt3sas_check_cmd_timeout(ioc,
2440                    ioc->ctl_cmds.status, mpi_request,
2441                    sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
2442                goto issue_host_reset;
2443        }
2444
2445        /* process the completed Reply Message Frame */
2446        if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2447                ioc_err(ioc, "%s: no reply message\n", __func__);
2448                rc = -EFAULT;
2449                goto out;
2450        }
2451
2452        mpi_reply = ioc->ctl_cmds.reply;
2453        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2454
2455        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2456                ioc->diag_buffer_status[buffer_type] |=
2457                    MPT3_DIAG_BUFFER_IS_REGISTERED;
2458                ioc->diag_buffer_status[buffer_type] &=
2459                    ~MPT3_DIAG_BUFFER_IS_RELEASED;
2460                dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
2461        } else {
2462                ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2463                         __func__, ioc_status,
2464                         le32_to_cpu(mpi_reply->IOCLogInfo));
2465                rc = -EFAULT;
2466        }
2467
2468 issue_host_reset:
2469        if (issue_reset)
2470                mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2471
2472 out:
2473
2474        ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2475        return rc;
2476}
2477
2478/**
2479 * _ctl_addnl_diag_query - query relevant info associated with diag buffers
2480 * @ioc: per adapter object
2481 * @arg: user space buffer containing ioctl content
2482 *
2483 * The application will send only unique_id.  Driver will
2484 * inspect unique_id first, if valid, fill the details related to cause
2485 * for diag buffer release.
2486 */
2487static long
2488_ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2489{
2490        struct mpt3_addnl_diag_query karg;
2491        u32 buffer_type = 0;
2492
2493        if (copy_from_user(&karg, arg, sizeof(karg))) {
2494                pr_err("%s: failure at %s:%d/%s()!\n",
2495                    ioc->name, __FILE__, __LINE__, __func__);
2496                return -EFAULT;
2497        }
2498        dctlprintk(ioc, ioc_info(ioc, "%s\n",  __func__));
2499        if (karg.unique_id == 0) {
2500                ioc_err(ioc, "%s: unique_id is(0x%08x)\n",
2501                    __func__, karg.unique_id);
2502                return -EPERM;
2503        }
2504        buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2505        if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2506                ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2507                    __func__, karg.unique_id);
2508                return -EPERM;
2509        }
2510        memset(&karg.rel_query, 0, sizeof(karg.rel_query));
2511        if ((ioc->diag_buffer_status[buffer_type] &
2512            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2513                ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2514                    __func__, buffer_type);
2515                goto out;
2516        }
2517        if ((ioc->diag_buffer_status[buffer_type] &
2518            MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2519                ioc_err(ioc, "%s: buffer_type(0x%02x) is not released\n",
2520                    __func__, buffer_type);
2521                return -EPERM;
2522        }
2523        memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
2524out:
2525        if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
2526                ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
2527                    __func__, arg);
2528                return -EFAULT;
2529        }
2530        return 0;
2531}
2532
2533#ifdef CONFIG_COMPAT
2534/**
2535 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2536 * @ioc: per adapter object
2537 * @cmd: ioctl opcode
2538 * @arg: (struct mpt3_ioctl_command32)
2539 *
2540 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
2541 */
2542static long
2543_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
2544        void __user *arg)
2545{
2546        struct mpt3_ioctl_command32 karg32;
2547        struct mpt3_ioctl_command32 __user *uarg;
2548        struct mpt3_ioctl_command karg;
2549
2550        if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
2551                return -EINVAL;
2552
2553        uarg = (struct mpt3_ioctl_command32 __user *) arg;
2554
2555        if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2556                pr_err("failure at %s:%d/%s()!\n",
2557                    __FILE__, __LINE__, __func__);
2558                return -EFAULT;
2559        }
2560
2561        memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
2562        karg.hdr.ioc_number = karg32.hdr.ioc_number;
2563        karg.hdr.port_number = karg32.hdr.port_number;
2564        karg.hdr.max_data_size = karg32.hdr.max_data_size;
2565        karg.timeout = karg32.timeout;
2566        karg.max_reply_bytes = karg32.max_reply_bytes;
2567        karg.data_in_size = karg32.data_in_size;
2568        karg.data_out_size = karg32.data_out_size;
2569        karg.max_sense_bytes = karg32.max_sense_bytes;
2570        karg.data_sge_offset = karg32.data_sge_offset;
2571        karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2572        karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2573        karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2574        karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2575        return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2576}
2577#endif
2578
2579/**
2580 * _ctl_ioctl_main - main ioctl entry point
2581 * @file:  (struct file)
2582 * @cmd:  ioctl opcode
2583 * @arg:  user space data buffer
2584 * @compat:  handles 32 bit applications in 64bit os
2585 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
2586 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
2587 */
2588static long
2589_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2590        u8 compat, u16 mpi_version)
2591{
2592        struct MPT3SAS_ADAPTER *ioc;
2593        struct mpt3_ioctl_header ioctl_header;
2594        enum block_state state;
2595        long ret = -ENOIOCTLCMD;
2596
2597        /* get IOCTL header */
2598        if (copy_from_user(&ioctl_header, (char __user *)arg,
2599            sizeof(struct mpt3_ioctl_header))) {
2600                pr_err("failure at %s:%d/%s()!\n",
2601                    __FILE__, __LINE__, __func__);
2602                return -EFAULT;
2603        }
2604
2605        if (_ctl_verify_adapter(ioctl_header.ioc_number,
2606                                &ioc, mpi_version) == -1 || !ioc)
2607                return -ENODEV;
2608
2609        /* pci_access_mutex lock acquired by ioctl path */
2610        mutex_lock(&ioc->pci_access_mutex);
2611
2612        if (ioc->shost_recovery || ioc->pci_error_recovery ||
2613            ioc->is_driver_loading || ioc->remove_host) {
2614                ret = -EAGAIN;
2615                goto out_unlock_pciaccess;
2616        }
2617
2618        state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2619        if (state == NON_BLOCKING) {
2620                if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2621                        ret = -EAGAIN;
2622                        goto out_unlock_pciaccess;
2623                }
2624        } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2625                ret = -ERESTARTSYS;
2626                goto out_unlock_pciaccess;
2627        }
2628
2629
2630        switch (cmd) {
2631        case MPT3IOCINFO:
2632                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
2633                        ret = _ctl_getiocinfo(ioc, arg);
2634                break;
2635#ifdef CONFIG_COMPAT
2636        case MPT3COMMAND32:
2637#endif
2638        case MPT3COMMAND:
2639        {
2640                struct mpt3_ioctl_command __user *uarg;
2641                struct mpt3_ioctl_command karg;
2642
2643#ifdef CONFIG_COMPAT
2644                if (compat) {
2645                        ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2646                        break;
2647                }
2648#endif
2649                if (copy_from_user(&karg, arg, sizeof(karg))) {
2650                        pr_err("failure at %s:%d/%s()!\n",
2651                            __FILE__, __LINE__, __func__);
2652                        ret = -EFAULT;
2653                        break;
2654                }
2655
2656                if (karg.hdr.ioc_number != ioctl_header.ioc_number) {
2657                        ret = -EINVAL;
2658                        break;
2659                }
2660                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
2661                        uarg = arg;
2662                        ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2663                }
2664                break;
2665        }
2666        case MPT3EVENTQUERY:
2667                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
2668                        ret = _ctl_eventquery(ioc, arg);
2669                break;
2670        case MPT3EVENTENABLE:
2671                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
2672                        ret = _ctl_eventenable(ioc, arg);
2673                break;
2674        case MPT3EVENTREPORT:
2675                ret = _ctl_eventreport(ioc, arg);
2676                break;
2677        case MPT3HARDRESET:
2678                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
2679                        ret = _ctl_do_reset(ioc, arg);
2680                break;
2681        case MPT3BTDHMAPPING:
2682                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
2683                        ret = _ctl_btdh_mapping(ioc, arg);
2684                break;
2685        case MPT3DIAGREGISTER:
2686                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
2687                        ret = _ctl_diag_register(ioc, arg);
2688                break;
2689        case MPT3DIAGUNREGISTER:
2690                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
2691                        ret = _ctl_diag_unregister(ioc, arg);
2692                break;
2693        case MPT3DIAGQUERY:
2694                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
2695                        ret = _ctl_diag_query(ioc, arg);
2696                break;
2697        case MPT3DIAGRELEASE:
2698                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
2699                        ret = _ctl_diag_release(ioc, arg);
2700                break;
2701        case MPT3DIAGREADBUFFER:
2702                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
2703                        ret = _ctl_diag_read_buffer(ioc, arg);
2704                break;
2705        case MPT3ADDNLDIAGQUERY:
2706                if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query))
2707                        ret = _ctl_addnl_diag_query(ioc, arg);
2708                break;
2709        default:
2710                dctlprintk(ioc,
2711                           ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
2712                                    cmd));
2713                break;
2714        }
2715
2716        mutex_unlock(&ioc->ctl_cmds.mutex);
2717out_unlock_pciaccess:
2718        mutex_unlock(&ioc->pci_access_mutex);
2719        return ret;
2720}
2721
2722/**
2723 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
2724 * @file: (struct file)
2725 * @cmd: ioctl opcode
2726 * @arg: ?
2727 */
2728static long
2729_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2730{
2731        long ret;
2732
2733        /* pass MPI25_VERSION | MPI26_VERSION value,
2734         * to indicate that this ioctl cmd
2735         * came from mpt3ctl ioctl device.
2736         */
2737        ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
2738                MPI25_VERSION | MPI26_VERSION);
2739        return ret;
2740}
2741
2742/**
2743 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
2744 * @file: (struct file)
2745 * @cmd: ioctl opcode
2746 * @arg: ?
2747 */
2748static long
2749_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2750{
2751        long ret;
2752
2753        /* pass MPI2_VERSION value, to indicate that this ioctl cmd
2754         * came from mpt2ctl ioctl device.
2755         */
2756        ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
2757        return ret;
2758}
2759#ifdef CONFIG_COMPAT
2760/**
2761 * _ctl_ioctl_compat - main ioctl entry point (compat)
2762 * @file: ?
2763 * @cmd: ?
2764 * @arg: ?
2765 *
2766 * This routine handles 32 bit applications in 64bit os.
2767 */
2768static long
2769_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2770{
2771        long ret;
2772
2773        ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
2774                MPI25_VERSION | MPI26_VERSION);
2775        return ret;
2776}
2777
2778/**
2779 * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
2780 * @file: ?
2781 * @cmd: ?
2782 * @arg: ?
2783 *
2784 * This routine handles 32 bit applications in 64bit os.
2785 */
2786static long
2787_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2788{
2789        long ret;
2790
2791        ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
2792        return ret;
2793}
2794#endif
2795
2796/* scsi host attributes */
2797/**
2798 * version_fw_show - firmware version
2799 * @cdev: pointer to embedded class device
2800 * @attr: ?
2801 * @buf: the buffer returned
2802 *
2803 * A sysfs 'read-only' shost attribute.
2804 */
2805static ssize_t
2806version_fw_show(struct device *cdev, struct device_attribute *attr,
2807        char *buf)
2808{
2809        struct Scsi_Host *shost = class_to_shost(cdev);
2810        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2811
2812        return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2813            (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2814            (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2815            (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2816            ioc->facts.FWVersion.Word & 0x000000FF);
2817}
2818static DEVICE_ATTR_RO(version_fw);
2819
2820/**
2821 * version_bios_show - bios version
2822 * @cdev: pointer to embedded class device
2823 * @attr: ?
2824 * @buf: the buffer returned
2825 *
2826 * A sysfs 'read-only' shost attribute.
2827 */
2828static ssize_t
2829version_bios_show(struct device *cdev, struct device_attribute *attr,
2830        char *buf)
2831{
2832        struct Scsi_Host *shost = class_to_shost(cdev);
2833        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2834
2835        u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2836
2837        return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2838            (version & 0xFF000000) >> 24,
2839            (version & 0x00FF0000) >> 16,
2840            (version & 0x0000FF00) >> 8,
2841            version & 0x000000FF);
2842}
2843static DEVICE_ATTR_RO(version_bios);
2844
2845/**
2846 * version_mpi_show - MPI (message passing interface) version
2847 * @cdev: pointer to embedded class device
2848 * @attr: ?
2849 * @buf: the buffer returned
2850 *
2851 * A sysfs 'read-only' shost attribute.
2852 */
2853static ssize_t
2854version_mpi_show(struct device *cdev, struct device_attribute *attr,
2855        char *buf)
2856{
2857        struct Scsi_Host *shost = class_to_shost(cdev);
2858        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2859
2860        return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
2861            ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2862}
2863static DEVICE_ATTR_RO(version_mpi);
2864
2865/**
2866 * version_product_show - product name
2867 * @cdev: pointer to embedded class device
2868 * @attr: ?
2869 * @buf: the buffer returned
2870 *
2871 * A sysfs 'read-only' shost attribute.
2872 */
2873static ssize_t
2874version_product_show(struct device *cdev, struct device_attribute *attr,
2875        char *buf)
2876{
2877        struct Scsi_Host *shost = class_to_shost(cdev);
2878        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2879
2880        return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2881}
2882static DEVICE_ATTR_RO(version_product);
2883
2884/**
2885 * version_nvdata_persistent_show - ndvata persistent version
2886 * @cdev: pointer to embedded class device
2887 * @attr: ?
2888 * @buf: the buffer returned
2889 *
2890 * A sysfs 'read-only' shost attribute.
2891 */
2892static ssize_t
2893version_nvdata_persistent_show(struct device *cdev,
2894        struct device_attribute *attr, char *buf)
2895{
2896        struct Scsi_Host *shost = class_to_shost(cdev);
2897        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2898
2899        return snprintf(buf, PAGE_SIZE, "%08xh\n",
2900            le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2901}
2902static DEVICE_ATTR_RO(version_nvdata_persistent);
2903
2904/**
2905 * version_nvdata_default_show - nvdata default version
2906 * @cdev: pointer to embedded class device
2907 * @attr: ?
2908 * @buf: the buffer returned
2909 *
2910 * A sysfs 'read-only' shost attribute.
2911 */
2912static ssize_t
2913version_nvdata_default_show(struct device *cdev, struct device_attribute
2914        *attr, char *buf)
2915{
2916        struct Scsi_Host *shost = class_to_shost(cdev);
2917        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2918
2919        return snprintf(buf, PAGE_SIZE, "%08xh\n",
2920            le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2921}
2922static DEVICE_ATTR_RO(version_nvdata_default);
2923
2924/**
2925 * board_name_show - board name
2926 * @cdev: pointer to embedded class device
2927 * @attr: ?
2928 * @buf: the buffer returned
2929 *
2930 * A sysfs 'read-only' shost attribute.
2931 */
2932static ssize_t
2933board_name_show(struct device *cdev, struct device_attribute *attr,
2934        char *buf)
2935{
2936        struct Scsi_Host *shost = class_to_shost(cdev);
2937        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2938
2939        return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2940}
2941static DEVICE_ATTR_RO(board_name);
2942
2943/**
2944 * board_assembly_show - board assembly name
2945 * @cdev: pointer to embedded class device
2946 * @attr: ?
2947 * @buf: the buffer returned
2948 *
2949 * A sysfs 'read-only' shost attribute.
2950 */
2951static ssize_t
2952board_assembly_show(struct device *cdev, struct device_attribute *attr,
2953        char *buf)
2954{
2955        struct Scsi_Host *shost = class_to_shost(cdev);
2956        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2957
2958        return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2959}
2960static DEVICE_ATTR_RO(board_assembly);
2961
2962/**
2963 * board_tracer_show - board tracer number
2964 * @cdev: pointer to embedded class device
2965 * @attr: ?
2966 * @buf: the buffer returned
2967 *
2968 * A sysfs 'read-only' shost attribute.
2969 */
2970static ssize_t
2971board_tracer_show(struct device *cdev, struct device_attribute *attr,
2972        char *buf)
2973{
2974        struct Scsi_Host *shost = class_to_shost(cdev);
2975        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2976
2977        return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2978}
2979static DEVICE_ATTR_RO(board_tracer);
2980
2981/**
2982 * io_delay_show - io missing delay
2983 * @cdev: pointer to embedded class device
2984 * @attr: ?
2985 * @buf: the buffer returned
2986 *
2987 * This is for firmware implemention for deboucing device
2988 * removal events.
2989 *
2990 * A sysfs 'read-only' shost attribute.
2991 */
2992static ssize_t
2993io_delay_show(struct device *cdev, struct device_attribute *attr,
2994        char *buf)
2995{
2996        struct Scsi_Host *shost = class_to_shost(cdev);
2997        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2998
2999        return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
3000}
3001static DEVICE_ATTR_RO(io_delay);
3002
3003/**
3004 * device_delay_show - device missing delay
3005 * @cdev: pointer to embedded class device
3006 * @attr: ?
3007 * @buf: the buffer returned
3008 *
3009 * This is for firmware implemention for deboucing device
3010 * removal events.
3011 *
3012 * A sysfs 'read-only' shost attribute.
3013 */
3014static ssize_t
3015device_delay_show(struct device *cdev, struct device_attribute *attr,
3016        char *buf)
3017{
3018        struct Scsi_Host *shost = class_to_shost(cdev);
3019        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3020
3021        return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
3022}
3023static DEVICE_ATTR_RO(device_delay);
3024
3025/**
3026 * fw_queue_depth_show - global credits
3027 * @cdev: pointer to embedded class device
3028 * @attr: ?
3029 * @buf: the buffer returned
3030 *
3031 * This is firmware queue depth limit
3032 *
3033 * A sysfs 'read-only' shost attribute.
3034 */
3035static ssize_t
3036fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
3037        char *buf)
3038{
3039        struct Scsi_Host *shost = class_to_shost(cdev);
3040        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3041
3042        return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
3043}
3044static DEVICE_ATTR_RO(fw_queue_depth);
3045
3046/**
3047 * host_sas_address_show - sas address
3048 * @cdev: pointer to embedded class device
3049 * @attr: ?
3050 * @buf: the buffer returned
3051 *
3052 * This is the controller sas address
3053 *
3054 * A sysfs 'read-only' shost attribute.
3055 */
3056static ssize_t
3057host_sas_address_show(struct device *cdev, struct device_attribute *attr,
3058        char *buf)
3059
3060{
3061        struct Scsi_Host *shost = class_to_shost(cdev);
3062        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3063
3064        return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3065            (unsigned long long)ioc->sas_hba.sas_address);
3066}
3067static DEVICE_ATTR_RO(host_sas_address);
3068
3069/**
3070 * logging_level_show - logging level
3071 * @cdev: pointer to embedded class device
3072 * @attr: ?
3073 * @buf: the buffer returned
3074 *
3075 * A sysfs 'read/write' shost attribute.
3076 */
3077static ssize_t
3078logging_level_show(struct device *cdev, struct device_attribute *attr,
3079        char *buf)
3080{
3081        struct Scsi_Host *shost = class_to_shost(cdev);
3082        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3083
3084        return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
3085}
3086static ssize_t
3087logging_level_store(struct device *cdev, struct device_attribute *attr,
3088        const char *buf, size_t count)
3089{
3090        struct Scsi_Host *shost = class_to_shost(cdev);
3091        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3092        int val = 0;
3093
3094        if (sscanf(buf, "%x", &val) != 1)
3095                return -EINVAL;
3096
3097        ioc->logging_level = val;
3098        ioc_info(ioc, "logging_level=%08xh\n",
3099                 ioc->logging_level);
3100        return strlen(buf);
3101}
3102static DEVICE_ATTR_RW(logging_level);
3103
3104/**
3105 * fwfault_debug_show - show/store fwfault_debug
3106 * @cdev: pointer to embedded class device
3107 * @attr: ?
3108 * @buf: the buffer returned
3109 *
3110 * mpt3sas_fwfault_debug is command line option
3111 * A sysfs 'read/write' shost attribute.
3112 */
3113static ssize_t
3114fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
3115        char *buf)
3116{
3117        struct Scsi_Host *shost = class_to_shost(cdev);
3118        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3119
3120        return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
3121}
3122static ssize_t
3123fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
3124        const char *buf, size_t count)
3125{
3126        struct Scsi_Host *shost = class_to_shost(cdev);
3127        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3128        int val = 0;
3129
3130        if (sscanf(buf, "%d", &val) != 1)
3131                return -EINVAL;
3132
3133        ioc->fwfault_debug = val;
3134        ioc_info(ioc, "fwfault_debug=%d\n",
3135                 ioc->fwfault_debug);
3136        return strlen(buf);
3137}
3138static DEVICE_ATTR_RW(fwfault_debug);
3139
3140/**
3141 * ioc_reset_count_show - ioc reset count
3142 * @cdev: pointer to embedded class device
3143 * @attr: ?
3144 * @buf: the buffer returned
3145 *
3146 * This is firmware queue depth limit
3147 *
3148 * A sysfs 'read-only' shost attribute.
3149 */
3150static ssize_t
3151ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
3152        char *buf)
3153{
3154        struct Scsi_Host *shost = class_to_shost(cdev);
3155        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3156
3157        return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
3158}
3159static DEVICE_ATTR_RO(ioc_reset_count);
3160
3161/**
3162 * reply_queue_count_show - number of reply queues
3163 * @cdev: pointer to embedded class device
3164 * @attr: ?
3165 * @buf: the buffer returned
3166 *
3167 * This is number of reply queues
3168 *
3169 * A sysfs 'read-only' shost attribute.
3170 */
3171static ssize_t
3172reply_queue_count_show(struct device *cdev,
3173        struct device_attribute *attr, char *buf)
3174{
3175        u8 reply_queue_count;
3176        struct Scsi_Host *shost = class_to_shost(cdev);
3177        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3178
3179        if ((ioc->facts.IOCCapabilities &
3180            MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
3181                reply_queue_count = ioc->reply_queue_count;
3182        else
3183                reply_queue_count = 1;
3184
3185        return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
3186}
3187static DEVICE_ATTR_RO(reply_queue_count);
3188
3189/**
3190 * BRM_status_show - Backup Rail Monitor Status
3191 * @cdev: pointer to embedded class device
3192 * @attr: ?
3193 * @buf: the buffer returned
3194 *
3195 * This is number of reply queues
3196 *
3197 * A sysfs 'read-only' shost attribute.
3198 */
3199static ssize_t
3200BRM_status_show(struct device *cdev, struct device_attribute *attr,
3201        char *buf)
3202{
3203        struct Scsi_Host *shost = class_to_shost(cdev);
3204        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3205        Mpi2IOUnitPage3_t io_unit_pg3;
3206        Mpi2ConfigReply_t mpi_reply;
3207        u16 backup_rail_monitor_status = 0;
3208        u16 ioc_status;
3209        int sz;
3210        ssize_t rc = 0;
3211
3212        if (!ioc->is_warpdrive) {
3213                ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
3214                        __func__);
3215                return 0;
3216        }
3217        /* pci_access_mutex lock acquired by sysfs show path */
3218        mutex_lock(&ioc->pci_access_mutex);
3219        if (ioc->pci_error_recovery || ioc->remove_host)
3220                goto out;
3221
3222        sz = sizeof(io_unit_pg3);
3223        memset(&io_unit_pg3, 0, sz);
3224
3225        if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) !=
3226            0) {
3227                ioc_err(ioc, "%s: failed reading iounit_pg3\n",
3228                        __func__);
3229                rc = -EINVAL;
3230                goto out;
3231        }
3232
3233        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3234        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3235                ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
3236                        __func__, ioc_status);
3237                rc = -EINVAL;
3238                goto out;
3239        }
3240
3241        if (io_unit_pg3.GPIOCount < 25) {
3242                ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n",
3243                        __func__, io_unit_pg3.GPIOCount);
3244                rc = -EINVAL;
3245                goto out;
3246        }
3247
3248        /* BRM status is in bit zero of GPIOVal[24] */
3249        backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]);
3250        rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
3251
3252 out:
3253        mutex_unlock(&ioc->pci_access_mutex);
3254        return rc;
3255}
3256static DEVICE_ATTR_RO(BRM_status);
3257
3258struct DIAG_BUFFER_START {
3259        __le32  Size;
3260        __le32  DiagVersion;
3261        u8      BufferType;
3262        u8      Reserved[3];
3263        __le32  Reserved1;
3264        __le32  Reserved2;
3265        __le32  Reserved3;
3266};
3267
3268/**
3269 * host_trace_buffer_size_show - host buffer size (trace only)
3270 * @cdev: pointer to embedded class device
3271 * @attr: ?
3272 * @buf: the buffer returned
3273 *
3274 * A sysfs 'read-only' shost attribute.
3275 */
3276static ssize_t
3277host_trace_buffer_size_show(struct device *cdev,
3278        struct device_attribute *attr, char *buf)
3279{
3280        struct Scsi_Host *shost = class_to_shost(cdev);
3281        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3282        u32 size = 0;
3283        struct DIAG_BUFFER_START *request_data;
3284
3285        if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3286                ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3287                        __func__);
3288                return 0;
3289        }
3290
3291        if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3292            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3293                ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3294                        __func__);
3295                return 0;
3296        }
3297
3298        request_data = (struct DIAG_BUFFER_START *)
3299            ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
3300        if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
3301            le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
3302            le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
3303            le32_to_cpu(request_data->Reserved3) == 0x4742444c)
3304                size = le32_to_cpu(request_data->Size);
3305
3306        ioc->ring_buffer_sz = size;
3307        return snprintf(buf, PAGE_SIZE, "%d\n", size);
3308}
3309static DEVICE_ATTR_RO(host_trace_buffer_size);
3310
3311/**
3312 * host_trace_buffer_show - firmware ring buffer (trace only)
3313 * @cdev: pointer to embedded class device
3314 * @attr: ?
3315 * @buf: the buffer returned
3316 *
3317 * A sysfs 'read/write' shost attribute.
3318 *
3319 * You will only be able to read 4k bytes of ring buffer at a time.
3320 * In order to read beyond 4k bytes, you will have to write out the
3321 * offset to the same attribute, it will move the pointer.
3322 */
3323static ssize_t
3324host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
3325        char *buf)
3326{
3327        struct Scsi_Host *shost = class_to_shost(cdev);
3328        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3329        void *request_data;
3330        u32 size;
3331
3332        if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3333                ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3334                        __func__);
3335                return 0;
3336        }
3337
3338        if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3339            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3340                ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3341                        __func__);
3342                return 0;
3343        }
3344
3345        if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
3346                return 0;
3347
3348        size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
3349        size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3350        request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
3351        memcpy(buf, request_data, size);
3352        return size;
3353}
3354
3355static ssize_t
3356host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
3357        const char *buf, size_t count)
3358{
3359        struct Scsi_Host *shost = class_to_shost(cdev);
3360        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3361        int val = 0;
3362
3363        if (sscanf(buf, "%d", &val) != 1)
3364                return -EINVAL;
3365
3366        ioc->ring_buffer_offset = val;
3367        return strlen(buf);
3368}
3369static DEVICE_ATTR_RW(host_trace_buffer);
3370
3371
3372/*****************************************/
3373
3374/**
3375 * host_trace_buffer_enable_show - firmware ring buffer (trace only)
3376 * @cdev: pointer to embedded class device
3377 * @attr: ?
3378 * @buf: the buffer returned
3379 *
3380 * A sysfs 'read/write' shost attribute.
3381 *
3382 * This is a mechnism to post/release host_trace_buffers
3383 */
3384static ssize_t
3385host_trace_buffer_enable_show(struct device *cdev,
3386        struct device_attribute *attr, char *buf)
3387{
3388        struct Scsi_Host *shost = class_to_shost(cdev);
3389        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3390
3391        if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
3392           ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3393            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
3394                return snprintf(buf, PAGE_SIZE, "off\n");
3395        else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3396            MPT3_DIAG_BUFFER_IS_RELEASED))
3397                return snprintf(buf, PAGE_SIZE, "release\n");
3398        else
3399                return snprintf(buf, PAGE_SIZE, "post\n");
3400}
3401
3402static ssize_t
3403host_trace_buffer_enable_store(struct device *cdev,
3404        struct device_attribute *attr, const char *buf, size_t count)
3405{
3406        struct Scsi_Host *shost = class_to_shost(cdev);
3407        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3408        char str[10] = "";
3409        struct mpt3_diag_register diag_register;
3410        u8 issue_reset = 0;
3411
3412        /* don't allow post/release occurr while recovery is active */
3413        if (ioc->shost_recovery || ioc->remove_host ||
3414            ioc->pci_error_recovery || ioc->is_driver_loading)
3415                return -EBUSY;
3416
3417        if (sscanf(buf, "%9s", str) != 1)
3418                return -EINVAL;
3419
3420        if (!strcmp(str, "post")) {
3421                /* exit out if host buffers are already posted */
3422                if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
3423                    (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3424                    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
3425                    ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3426                    MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3427                        goto out;
3428                memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3429                ioc_info(ioc, "posting host trace buffers\n");
3430                diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3431
3432                if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
3433                    ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
3434                        /* post the same buffer allocated previously */
3435                        diag_register.requested_buffer_size =
3436                            ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
3437                } else {
3438                        /*
3439                         * Free the diag buffer memory which was previously
3440                         * allocated by an application.
3441                         */
3442                        if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
3443                            &&
3444                            (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3445                            MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
3446                                dma_free_coherent(&ioc->pdev->dev,
3447                                                  ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE],
3448                                                  ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
3449                                                  ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]);
3450                                ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
3451                                    NULL;
3452                        }
3453
3454                        diag_register.requested_buffer_size = (1024 * 1024);
3455                }
3456
3457                diag_register.unique_id =
3458                    (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
3459                    (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
3460                ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
3461                _ctl_diag_register_2(ioc,  &diag_register);
3462                if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3463                    MPT3_DIAG_BUFFER_IS_REGISTERED) {
3464                        ioc_info(ioc,
3465                            "Trace buffer %d KB allocated through sysfs\n",
3466                            diag_register.requested_buffer_size>>10);
3467                        if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
3468                                ioc->diag_buffer_status[
3469                                    MPI2_DIAG_BUF_TYPE_TRACE] |=
3470                                    MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
3471                }
3472        } else if (!strcmp(str, "release")) {
3473                /* exit out if host buffers are already released */
3474                if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
3475                        goto out;
3476                if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3477                    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
3478                        goto out;
3479                if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3480                    MPT3_DIAG_BUFFER_IS_RELEASED))
3481                        goto out;
3482                ioc_info(ioc, "releasing host trace buffer\n");
3483                ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS;
3484                mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3485                    &issue_reset);
3486        }
3487
3488 out:
3489        return strlen(buf);
3490}
3491static DEVICE_ATTR_RW(host_trace_buffer_enable);
3492
3493/*********** diagnostic trigger suppport *********************************/
3494
3495/**
3496 * diag_trigger_master_show - show the diag_trigger_master attribute
3497 * @cdev: pointer to embedded class device
3498 * @attr: ?
3499 * @buf: the buffer returned
3500 *
3501 * A sysfs 'read/write' shost attribute.
3502 */
3503static ssize_t
3504diag_trigger_master_show(struct device *cdev,
3505        struct device_attribute *attr, char *buf)
3506
3507{
3508        struct Scsi_Host *shost = class_to_shost(cdev);
3509        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3510        unsigned long flags;
3511        ssize_t rc;
3512
3513        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3514        rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
3515        memcpy(buf, &ioc->diag_trigger_master, rc);
3516        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3517        return rc;
3518}
3519
3520/**
3521 * diag_trigger_master_store - store the diag_trigger_master attribute
3522 * @cdev: pointer to embedded class device
3523 * @attr: ?
3524 * @buf: the buffer returned
3525 * @count: ?
3526 *
3527 * A sysfs 'read/write' shost attribute.
3528 */
3529static ssize_t
3530diag_trigger_master_store(struct device *cdev,
3531        struct device_attribute *attr, const char *buf, size_t count)
3532
3533{
3534        struct Scsi_Host *shost = class_to_shost(cdev);
3535        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3536        unsigned long flags;
3537        ssize_t rc;
3538
3539        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3540        rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
3541        memset(&ioc->diag_trigger_master, 0,
3542            sizeof(struct SL_WH_MASTER_TRIGGER_T));
3543        memcpy(&ioc->diag_trigger_master, buf, rc);
3544        ioc->diag_trigger_master.MasterData |=
3545            (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
3546        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3547        return rc;
3548}
3549static DEVICE_ATTR_RW(diag_trigger_master);
3550
3551
3552/**
3553 * diag_trigger_event_show - show the diag_trigger_event attribute
3554 * @cdev: pointer to embedded class device
3555 * @attr: ?
3556 * @buf: the buffer returned
3557 *
3558 * A sysfs 'read/write' shost attribute.
3559 */
3560static ssize_t
3561diag_trigger_event_show(struct device *cdev,
3562        struct device_attribute *attr, char *buf)
3563{
3564        struct Scsi_Host *shost = class_to_shost(cdev);
3565        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3566        unsigned long flags;
3567        ssize_t rc;
3568
3569        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3570        rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
3571        memcpy(buf, &ioc->diag_trigger_event, rc);
3572        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3573        return rc;
3574}
3575
3576/**
3577 * diag_trigger_event_store - store the diag_trigger_event attribute
3578 * @cdev: pointer to embedded class device
3579 * @attr: ?
3580 * @buf: the buffer returned
3581 * @count: ?
3582 *
3583 * A sysfs 'read/write' shost attribute.
3584 */
3585static ssize_t
3586diag_trigger_event_store(struct device *cdev,
3587        struct device_attribute *attr, const char *buf, size_t count)
3588
3589{
3590        struct Scsi_Host *shost = class_to_shost(cdev);
3591        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3592        unsigned long flags;
3593        ssize_t sz;
3594
3595        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3596        sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
3597        memset(&ioc->diag_trigger_event, 0,
3598            sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3599        memcpy(&ioc->diag_trigger_event, buf, sz);
3600        if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
3601                ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
3602        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3603        return sz;
3604}
3605static DEVICE_ATTR_RW(diag_trigger_event);
3606
3607
3608/**
3609 * diag_trigger_scsi_show - show the diag_trigger_scsi attribute
3610 * @cdev: pointer to embedded class device
3611 * @attr: ?
3612 * @buf: the buffer returned
3613 *
3614 * A sysfs 'read/write' shost attribute.
3615 */
3616static ssize_t
3617diag_trigger_scsi_show(struct device *cdev,
3618        struct device_attribute *attr, char *buf)
3619{
3620        struct Scsi_Host *shost = class_to_shost(cdev);
3621        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3622        unsigned long flags;
3623        ssize_t rc;
3624
3625        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3626        rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
3627        memcpy(buf, &ioc->diag_trigger_scsi, rc);
3628        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3629        return rc;
3630}
3631
3632/**
3633 * diag_trigger_scsi_store - store the diag_trigger_scsi attribute
3634 * @cdev: pointer to embedded class device
3635 * @attr: ?
3636 * @buf: the buffer returned
3637 * @count: ?
3638 *
3639 * A sysfs 'read/write' shost attribute.
3640 */
3641static ssize_t
3642diag_trigger_scsi_store(struct device *cdev,
3643        struct device_attribute *attr, const char *buf, size_t count)
3644{
3645        struct Scsi_Host *shost = class_to_shost(cdev);
3646        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3647        unsigned long flags;
3648        ssize_t sz;
3649
3650        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3651        sz = min(sizeof(ioc->diag_trigger_scsi), count);
3652        memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
3653        memcpy(&ioc->diag_trigger_scsi, buf, sz);
3654        if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
3655                ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
3656        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3657        return sz;
3658}
3659static DEVICE_ATTR_RW(diag_trigger_scsi);
3660
3661
3662/**
3663 * diag_trigger_mpi_show - show the diag_trigger_mpi attribute
3664 * @cdev: pointer to embedded class device
3665 * @attr: ?
3666 * @buf: the buffer returned
3667 *
3668 * A sysfs 'read/write' shost attribute.
3669 */
3670static ssize_t
3671diag_trigger_mpi_show(struct device *cdev,
3672        struct device_attribute *attr, char *buf)
3673{
3674        struct Scsi_Host *shost = class_to_shost(cdev);
3675        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3676        unsigned long flags;
3677        ssize_t rc;
3678
3679        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3680        rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
3681        memcpy(buf, &ioc->diag_trigger_mpi, rc);
3682        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3683        return rc;
3684}
3685
3686/**
3687 * diag_trigger_mpi_store - store the diag_trigger_mpi attribute
3688 * @cdev: pointer to embedded class device
3689 * @attr: ?
3690 * @buf: the buffer returned
3691 * @count: ?
3692 *
3693 * A sysfs 'read/write' shost attribute.
3694 */
3695static ssize_t
3696diag_trigger_mpi_store(struct device *cdev,
3697        struct device_attribute *attr, const char *buf, size_t count)
3698{
3699        struct Scsi_Host *shost = class_to_shost(cdev);
3700        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3701        unsigned long flags;
3702        ssize_t sz;
3703
3704        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3705        sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
3706        memset(&ioc->diag_trigger_mpi, 0,
3707            sizeof(ioc->diag_trigger_mpi));
3708        memcpy(&ioc->diag_trigger_mpi, buf, sz);
3709        if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
3710                ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
3711        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3712        return sz;
3713}
3714
3715static DEVICE_ATTR_RW(diag_trigger_mpi);
3716
3717/*********** diagnostic trigger suppport *** END ****************************/
3718
3719/*****************************************/
3720
3721/**
3722 * drv_support_bitmap_show - driver supported feature bitmap
3723 * @cdev: pointer to embedded class device
3724 * @attr: unused
3725 * @buf: the buffer returned
3726 *
3727 * A sysfs 'read-only' shost attribute.
3728 */
3729static ssize_t
3730drv_support_bitmap_show(struct device *cdev,
3731        struct device_attribute *attr, char *buf)
3732{
3733        struct Scsi_Host *shost = class_to_shost(cdev);
3734        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3735
3736        return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
3737}
3738static DEVICE_ATTR_RO(drv_support_bitmap);
3739
3740/**
3741 * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled
3742 * @cdev: pointer to embedded class device
3743 * @attr: unused
3744 * @buf: the buffer returned
3745 *
3746 * A sysfs read/write shost attribute. This attribute is used to set the
3747 * targets queue depth to HBA IO queue depth if this attribute is enabled.
3748 */
3749static ssize_t
3750enable_sdev_max_qd_show(struct device *cdev,
3751        struct device_attribute *attr, char *buf)
3752{
3753        struct Scsi_Host *shost = class_to_shost(cdev);
3754        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3755
3756        return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
3757}
3758
3759/**
3760 * enable_sdev_max_qd_store - Enable/disable sdev max qd
3761 * @cdev: pointer to embedded class device
3762 * @attr: unused
3763 * @buf: the buffer returned
3764 * @count: unused
3765 *
3766 * A sysfs read/write shost attribute. This attribute is used to set the
3767 * targets queue depth to HBA IO queue depth if this attribute is enabled.
3768 * If this attribute is disabled then targets will have corresponding default
3769 * queue depth.
3770 */
3771static ssize_t
3772enable_sdev_max_qd_store(struct device *cdev,
3773        struct device_attribute *attr, const char *buf, size_t count)
3774{
3775        struct Scsi_Host *shost = class_to_shost(cdev);
3776        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3777        struct MPT3SAS_DEVICE *sas_device_priv_data;
3778        struct MPT3SAS_TARGET *sas_target_priv_data;
3779        int val = 0;
3780        struct scsi_device *sdev;
3781        struct _raid_device *raid_device;
3782        int qdepth;
3783
3784        if (kstrtoint(buf, 0, &val) != 0)
3785                return -EINVAL;
3786
3787        switch (val) {
3788        case 0:
3789                ioc->enable_sdev_max_qd = 0;
3790                shost_for_each_device(sdev, ioc->shost) {
3791                        sas_device_priv_data = sdev->hostdata;
3792                        if (!sas_device_priv_data)
3793                                continue;
3794                        sas_target_priv_data = sas_device_priv_data->sas_target;
3795                        if (!sas_target_priv_data)
3796                                continue;
3797
3798                        if (sas_target_priv_data->flags &
3799                            MPT_TARGET_FLAGS_VOLUME) {
3800                                raid_device =
3801                                    mpt3sas_raid_device_find_by_handle(ioc,
3802                                    sas_target_priv_data->handle);
3803
3804                                switch (raid_device->volume_type) {
3805                                case MPI2_RAID_VOL_TYPE_RAID0:
3806                                        if (raid_device->device_info &
3807                                            MPI2_SAS_DEVICE_INFO_SSP_TARGET)
3808                                                qdepth =
3809                                                    MPT3SAS_SAS_QUEUE_DEPTH;
3810                                        else
3811                                                qdepth =
3812                                                    MPT3SAS_SATA_QUEUE_DEPTH;
3813                                        break;
3814                                case MPI2_RAID_VOL_TYPE_RAID1E:
3815                                case MPI2_RAID_VOL_TYPE_RAID1:
3816                                case MPI2_RAID_VOL_TYPE_RAID10:
3817                                case MPI2_RAID_VOL_TYPE_UNKNOWN:
3818                                default:
3819                                        qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
3820                                }
3821                        } else if (sas_target_priv_data->flags &
3822                            MPT_TARGET_FLAGS_PCIE_DEVICE)
3823                                qdepth = ioc->max_nvme_qd;
3824                        else
3825                                qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ?
3826                                    ioc->max_wideport_qd : ioc->max_narrowport_qd;
3827
3828                        mpt3sas_scsih_change_queue_depth(sdev, qdepth);
3829                }
3830                break;
3831        case 1:
3832                ioc->enable_sdev_max_qd = 1;
3833                shost_for_each_device(sdev, ioc->shost)
3834                        mpt3sas_scsih_change_queue_depth(sdev,
3835                            shost->can_queue);
3836                break;
3837        default:
3838                return -EINVAL;
3839        }
3840
3841        return strlen(buf);
3842}
3843static DEVICE_ATTR_RW(enable_sdev_max_qd);
3844
3845struct device_attribute *mpt3sas_host_attrs[] = {
3846        &dev_attr_version_fw,
3847        &dev_attr_version_bios,
3848        &dev_attr_version_mpi,
3849        &dev_attr_version_product,
3850        &dev_attr_version_nvdata_persistent,
3851        &dev_attr_version_nvdata_default,
3852        &dev_attr_board_name,
3853        &dev_attr_board_assembly,
3854        &dev_attr_board_tracer,
3855        &dev_attr_io_delay,
3856        &dev_attr_device_delay,
3857        &dev_attr_logging_level,
3858        &dev_attr_fwfault_debug,
3859        &dev_attr_fw_queue_depth,
3860        &dev_attr_host_sas_address,
3861        &dev_attr_ioc_reset_count,
3862        &dev_attr_host_trace_buffer_size,
3863        &dev_attr_host_trace_buffer,
3864        &dev_attr_host_trace_buffer_enable,
3865        &dev_attr_reply_queue_count,
3866        &dev_attr_diag_trigger_master,
3867        &dev_attr_diag_trigger_event,
3868        &dev_attr_diag_trigger_scsi,
3869        &dev_attr_diag_trigger_mpi,
3870        &dev_attr_drv_support_bitmap,
3871        &dev_attr_BRM_status,
3872        &dev_attr_enable_sdev_max_qd,
3873        NULL,
3874};
3875
3876/* device attributes */
3877
3878/**
3879 * sas_address_show - sas address
3880 * @dev: pointer to embedded class device
3881 * @attr: ?
3882 * @buf: the buffer returned
3883 *
3884 * This is the sas address for the target
3885 *
3886 * A sysfs 'read-only' shost attribute.
3887 */
3888static ssize_t
3889sas_address_show(struct device *dev, struct device_attribute *attr,
3890        char *buf)
3891{
3892        struct scsi_device *sdev = to_scsi_device(dev);
3893        struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3894
3895        return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3896            (unsigned long long)sas_device_priv_data->sas_target->sas_address);
3897}
3898static DEVICE_ATTR_RO(sas_address);
3899
3900/**
3901 * sas_device_handle_show - device handle
3902 * @dev: pointer to embedded class device
3903 * @attr: ?
3904 * @buf: the buffer returned
3905 *
3906 * This is the firmware assigned device handle
3907 *
3908 * A sysfs 'read-only' shost attribute.
3909 */
3910static ssize_t
3911sas_device_handle_show(struct device *dev, struct device_attribute *attr,
3912        char *buf)
3913{
3914        struct scsi_device *sdev = to_scsi_device(dev);
3915        struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3916
3917        return snprintf(buf, PAGE_SIZE, "0x%04x\n",
3918            sas_device_priv_data->sas_target->handle);
3919}
3920static DEVICE_ATTR_RO(sas_device_handle);
3921
3922/**
3923 * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
3924 * @dev: pointer to embedded device
3925 * @attr: sas_ncq_prio_supported attribute descriptor
3926 * @buf: the buffer returned
3927 *
3928 * A sysfs 'read-only' sdev attribute, only works with SATA
3929 */
3930static ssize_t
3931sas_ncq_prio_supported_show(struct device *dev,
3932                            struct device_attribute *attr, char *buf)
3933{
3934        struct scsi_device *sdev = to_scsi_device(dev);
3935
3936        return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
3937}
3938static DEVICE_ATTR_RO(sas_ncq_prio_supported);
3939
3940/**
3941 * sas_ncq_prio_enable_show - send prioritized io commands to device
3942 * @dev: pointer to embedded device
3943 * @attr: ?
3944 * @buf: the buffer returned
3945 *
3946 * A sysfs 'read/write' sdev attribute, only works with SATA
3947 */
3948static ssize_t
3949sas_ncq_prio_enable_show(struct device *dev,
3950                                 struct device_attribute *attr, char *buf)
3951{
3952        struct scsi_device *sdev = to_scsi_device(dev);
3953        struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3954
3955        return snprintf(buf, PAGE_SIZE, "%d\n",
3956                        sas_device_priv_data->ncq_prio_enable);
3957}
3958
3959static ssize_t
3960sas_ncq_prio_enable_store(struct device *dev,
3961                                  struct device_attribute *attr,
3962                                  const char *buf, size_t count)
3963{
3964        struct scsi_device *sdev = to_scsi_device(dev);
3965        struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3966        bool ncq_prio_enable = 0;
3967
3968        if (kstrtobool(buf, &ncq_prio_enable))
3969                return -EINVAL;
3970
3971        if (!scsih_ncq_prio_supp(sdev))
3972                return -EINVAL;
3973
3974        sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
3975        return strlen(buf);
3976}
3977static DEVICE_ATTR_RW(sas_ncq_prio_enable);
3978
3979struct device_attribute *mpt3sas_dev_attrs[] = {
3980        &dev_attr_sas_address,
3981        &dev_attr_sas_device_handle,
3982        &dev_attr_sas_ncq_prio_supported,
3983        &dev_attr_sas_ncq_prio_enable,
3984        NULL,
3985};
3986
3987/* file operations table for mpt3ctl device */
3988static const struct file_operations ctl_fops = {
3989        .owner = THIS_MODULE,
3990        .unlocked_ioctl = _ctl_ioctl,
3991        .poll = _ctl_poll,
3992        .fasync = _ctl_fasync,
3993#ifdef CONFIG_COMPAT
3994        .compat_ioctl = _ctl_ioctl_compat,
3995#endif
3996};
3997
3998/* file operations table for mpt2ctl device */
3999static const struct file_operations ctl_gen2_fops = {
4000        .owner = THIS_MODULE,
4001        .unlocked_ioctl = _ctl_mpt2_ioctl,
4002        .poll = _ctl_poll,
4003        .fasync = _ctl_fasync,
4004#ifdef CONFIG_COMPAT
4005        .compat_ioctl = _ctl_mpt2_ioctl_compat,
4006#endif
4007};
4008
4009static struct miscdevice ctl_dev = {
4010        .minor  = MPT3SAS_MINOR,
4011        .name   = MPT3SAS_DEV_NAME,
4012        .fops   = &ctl_fops,
4013};
4014
4015static struct miscdevice gen2_ctl_dev = {
4016        .minor  = MPT2SAS_MINOR,
4017        .name   = MPT2SAS_DEV_NAME,
4018        .fops   = &ctl_gen2_fops,
4019};
4020
4021/**
4022 * mpt3sas_ctl_init - main entry point for ctl.
4023 * @hbas_to_enumerate: ?
4024 */
4025void
4026mpt3sas_ctl_init(ushort hbas_to_enumerate)
4027{
4028        async_queue = NULL;
4029
4030        /* Don't register mpt3ctl ioctl device if
4031         * hbas_to_enumarate is one.
4032         */
4033        if (hbas_to_enumerate != 1)
4034                if (misc_register(&ctl_dev) < 0)
4035                        pr_err("%s can't register misc device [minor=%d]\n",
4036                            MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
4037
4038        /* Don't register mpt3ctl ioctl device if
4039         * hbas_to_enumarate is two.
4040         */
4041        if (hbas_to_enumerate != 2)
4042                if (misc_register(&gen2_ctl_dev) < 0)
4043                        pr_err("%s can't register misc device [minor=%d]\n",
4044                            MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
4045
4046        init_waitqueue_head(&ctl_poll_wait);
4047}
4048
4049/**
4050 * mpt3sas_ctl_exit - exit point for ctl
4051 * @hbas_to_enumerate: ?
4052 */
4053void
4054mpt3sas_ctl_exit(ushort hbas_to_enumerate)
4055{
4056        struct MPT3SAS_ADAPTER *ioc;
4057        int i;
4058
4059        list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
4060
4061                /* free memory associated to diag buffers */
4062                for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
4063                        if (!ioc->diag_buffer[i])
4064                                continue;
4065                        dma_free_coherent(&ioc->pdev->dev,
4066                                          ioc->diag_buffer_sz[i],
4067                                          ioc->diag_buffer[i],
4068                                          ioc->diag_buffer_dma[i]);
4069                        ioc->diag_buffer[i] = NULL;
4070                        ioc->diag_buffer_status[i] = 0;
4071                }
4072
4073                kfree(ioc->event_log);
4074        }
4075        if (hbas_to_enumerate != 1)
4076                misc_deregister(&ctl_dev);
4077        if (hbas_to_enumerate != 2)
4078                misc_deregister(&gen2_ctl_dev);
4079}
4080