linux/drivers/scsi/smartpqi/smartpqi_init.c
<<
>>
Prefs
   1/*
   2 *    driver for Microsemi PQI-based storage controllers
   3 *    Copyright (c) 2016 Microsemi Corporation
   4 *    Copyright (c) 2016 PMC-Sierra, Inc.
   5 *
   6 *    This program is free software; you can redistribute it and/or modify
   7 *    it under the terms of the GNU General Public License as published by
   8 *    the Free Software Foundation; version 2 of the License.
   9 *
  10 *    This program is distributed in the hope that it will be useful,
  11 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 *    NON INFRINGEMENT.  See the GNU General Public License for more details.
  14 *
  15 *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
  16 *
  17 */
  18
  19#include <linux/module.h>
  20#include <linux/kernel.h>
  21#include <linux/pci.h>
  22#include <linux/delay.h>
  23#include <linux/interrupt.h>
  24#include <linux/sched.h>
  25#include <linux/rtc.h>
  26#include <linux/bcd.h>
  27#include <linux/cciss_ioctl.h>
  28#include <scsi/scsi_host.h>
  29#include <scsi/scsi_cmnd.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_device.h>
  32#include <scsi/scsi_eh.h>
  33#include <scsi/scsi_transport_sas.h>
  34#include <asm/unaligned.h>
  35#include "smartpqi.h"
  36#include "smartpqi_sis.h"
  37
  38#if !defined(BUILD_TIMESTAMP)
  39#define BUILD_TIMESTAMP
  40#endif
  41
  42#define DRIVER_VERSION          "0.9.14-100"
  43#define DRIVER_MAJOR            0
  44#define DRIVER_MINOR            9
  45#define DRIVER_RELEASE          14
  46#define DRIVER_REVISION         100
  47
  48#define DRIVER_NAME             "Microsemi PQI Driver (v" DRIVER_VERSION ")"
  49#define DRIVER_NAME_SHORT       "smartpqi"
  50
  51MODULE_AUTHOR("Microsemi");
  52MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
  53        DRIVER_VERSION);
  54MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
  55MODULE_VERSION(DRIVER_VERSION);
  56MODULE_LICENSE("GPL");
  57
  58#define PQI_ENABLE_MULTI_QUEUE_SUPPORT  0
  59
  60static char *hpe_branded_controller = "HPE Smart Array Controller";
  61static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
  62
  63static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
  64static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
  65static void pqi_scan_start(struct Scsi_Host *shost);
  66static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
  67        struct pqi_queue_group *queue_group, enum pqi_io_path path,
  68        struct pqi_io_request *io_request);
  69static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
  70        struct pqi_iu_header *request, unsigned int flags,
  71        struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
  72static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
  73        struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
  74        unsigned int cdb_length, struct pqi_queue_group *queue_group,
  75        struct pqi_encryption_info *encryption_info);
  76
  77/* for flags argument to pqi_submit_raid_request_synchronous() */
  78#define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
  79
  80static struct scsi_transport_template *pqi_sas_transport_template;
  81
  82static atomic_t pqi_controller_count = ATOMIC_INIT(0);
  83
  84static int pqi_disable_device_id_wildcards;
  85module_param_named(disable_device_id_wildcards,
  86        pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
  87MODULE_PARM_DESC(disable_device_id_wildcards,
  88        "Disable device ID wildcards.");
  89
  90static char *raid_levels[] = {
  91        "RAID-0",
  92        "RAID-4",
  93        "RAID-1(1+0)",
  94        "RAID-5",
  95        "RAID-5+1",
  96        "RAID-ADG",
  97        "RAID-1(ADM)",
  98};
  99
 100static char *pqi_raid_level_to_string(u8 raid_level)
 101{
 102        if (raid_level < ARRAY_SIZE(raid_levels))
 103                return raid_levels[raid_level];
 104
 105        return "";
 106}
 107
 108#define SA_RAID_0               0
 109#define SA_RAID_4               1
 110#define SA_RAID_1               2       /* also used for RAID 10 */
 111#define SA_RAID_5               3       /* also used for RAID 50 */
 112#define SA_RAID_51              4
 113#define SA_RAID_6               5       /* also used for RAID 60 */
 114#define SA_RAID_ADM             6       /* also used for RAID 1+0 ADM */
 115#define SA_RAID_MAX             SA_RAID_ADM
 116#define SA_RAID_UNKNOWN         0xff
 117
 118static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
 119{
 120        scmd->scsi_done(scmd);
 121}
 122
 123static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
 124{
 125        return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
 126}
 127
 128static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
 129{
 130        void *hostdata = shost_priv(shost);
 131
 132        return *((struct pqi_ctrl_info **)hostdata);
 133}
 134
 135static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
 136{
 137        return !device->is_physical_device;
 138}
 139
 140static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
 141{
 142        return !ctrl_info->controller_online;
 143}
 144
 145static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
 146{
 147        if (ctrl_info->controller_online)
 148                if (!sis_is_firmware_running(ctrl_info))
 149                        pqi_take_ctrl_offline(ctrl_info);
 150}
 151
 152static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
 153{
 154        return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
 155}
 156
 157static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
 158        struct pqi_ctrl_info *ctrl_info)
 159{
 160        return sis_read_driver_scratch(ctrl_info);
 161}
 162
 163static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
 164        enum pqi_ctrl_mode mode)
 165{
 166        sis_write_driver_scratch(ctrl_info, mode);
 167}
 168
 169#define PQI_RESCAN_WORK_INTERVAL        (10 * HZ)
 170
 171static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
 172{
 173        schedule_delayed_work(&ctrl_info->rescan_work,
 174                PQI_RESCAN_WORK_INTERVAL);
 175}
 176
 177static int pqi_map_single(struct pci_dev *pci_dev,
 178        struct pqi_sg_descriptor *sg_descriptor, void *buffer,
 179        size_t buffer_length, int data_direction)
 180{
 181        dma_addr_t bus_address;
 182
 183        if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
 184                return 0;
 185
 186        bus_address = pci_map_single(pci_dev, buffer, buffer_length,
 187                data_direction);
 188        if (pci_dma_mapping_error(pci_dev, bus_address))
 189                return -ENOMEM;
 190
 191        put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
 192        put_unaligned_le32(buffer_length, &sg_descriptor->length);
 193        put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
 194
 195        return 0;
 196}
 197
 198static void pqi_pci_unmap(struct pci_dev *pci_dev,
 199        struct pqi_sg_descriptor *descriptors, int num_descriptors,
 200        int data_direction)
 201{
 202        int i;
 203
 204        if (data_direction == PCI_DMA_NONE)
 205                return;
 206
 207        for (i = 0; i < num_descriptors; i++)
 208                pci_unmap_single(pci_dev,
 209                        (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
 210                        get_unaligned_le32(&descriptors[i].length),
 211                        data_direction);
 212}
 213
 214static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
 215        struct pqi_raid_path_request *request, u8 cmd,
 216        u8 *scsi3addr, void *buffer, size_t buffer_length,
 217        u16 vpd_page, int *pci_direction)
 218{
 219        u8 *cdb;
 220        int pci_dir;
 221
 222        memset(request, 0, sizeof(*request));
 223
 224        request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
 225        put_unaligned_le16(offsetof(struct pqi_raid_path_request,
 226                sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
 227                &request->header.iu_length);
 228        put_unaligned_le32(buffer_length, &request->buffer_length);
 229        memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
 230        request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
 231        request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
 232
 233        cdb = request->cdb;
 234
 235        switch (cmd) {
 236        case INQUIRY:
 237                request->data_direction = SOP_READ_FLAG;
 238                cdb[0] = INQUIRY;
 239                if (vpd_page & VPD_PAGE) {
 240                        cdb[1] = 0x1;
 241                        cdb[2] = (u8)vpd_page;
 242                }
 243                cdb[4] = (u8)buffer_length;
 244                break;
 245        case CISS_REPORT_LOG:
 246        case CISS_REPORT_PHYS:
 247                request->data_direction = SOP_READ_FLAG;
 248                cdb[0] = cmd;
 249                if (cmd == CISS_REPORT_PHYS)
 250                        cdb[1] = CISS_REPORT_PHYS_EXTENDED;
 251                else
 252                        cdb[1] = CISS_REPORT_LOG_EXTENDED;
 253                put_unaligned_be32(buffer_length, &cdb[6]);
 254                break;
 255        case CISS_GET_RAID_MAP:
 256                request->data_direction = SOP_READ_FLAG;
 257                cdb[0] = CISS_READ;
 258                cdb[1] = CISS_GET_RAID_MAP;
 259                put_unaligned_be32(buffer_length, &cdb[6]);
 260                break;
 261        case SA_CACHE_FLUSH:
 262                request->data_direction = SOP_WRITE_FLAG;
 263                cdb[0] = BMIC_WRITE;
 264                cdb[6] = BMIC_CACHE_FLUSH;
 265                put_unaligned_be16(buffer_length, &cdb[7]);
 266                break;
 267        case BMIC_IDENTIFY_CONTROLLER:
 268        case BMIC_IDENTIFY_PHYSICAL_DEVICE:
 269                request->data_direction = SOP_READ_FLAG;
 270                cdb[0] = BMIC_READ;
 271                cdb[6] = cmd;
 272                put_unaligned_be16(buffer_length, &cdb[7]);
 273                break;
 274        case BMIC_WRITE_HOST_WELLNESS:
 275                request->data_direction = SOP_WRITE_FLAG;
 276                cdb[0] = BMIC_WRITE;
 277                cdb[6] = cmd;
 278                put_unaligned_be16(buffer_length, &cdb[7]);
 279                break;
 280        default:
 281                dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
 282                        cmd);
 283                WARN_ON(cmd);
 284                break;
 285        }
 286
 287        switch (request->data_direction) {
 288        case SOP_READ_FLAG:
 289                pci_dir = PCI_DMA_FROMDEVICE;
 290                break;
 291        case SOP_WRITE_FLAG:
 292                pci_dir = PCI_DMA_TODEVICE;
 293                break;
 294        case SOP_NO_DIRECTION_FLAG:
 295                pci_dir = PCI_DMA_NONE;
 296                break;
 297        default:
 298                pci_dir = PCI_DMA_BIDIRECTIONAL;
 299                break;
 300        }
 301
 302        *pci_direction = pci_dir;
 303
 304        return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
 305                buffer, buffer_length, pci_dir);
 306}
 307
 308static struct pqi_io_request *pqi_alloc_io_request(
 309        struct pqi_ctrl_info *ctrl_info)
 310{
 311        struct pqi_io_request *io_request;
 312        u16 i = ctrl_info->next_io_request_slot;        /* benignly racy */
 313
 314        while (1) {
 315                io_request = &ctrl_info->io_request_pool[i];
 316                if (atomic_inc_return(&io_request->refcount) == 1)
 317                        break;
 318                atomic_dec(&io_request->refcount);
 319                i = (i + 1) % ctrl_info->max_io_slots;
 320        }
 321
 322        /* benignly racy */
 323        ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
 324
 325        io_request->scmd = NULL;
 326        io_request->status = 0;
 327        io_request->error_info = NULL;
 328
 329        return io_request;
 330}
 331
 332static void pqi_free_io_request(struct pqi_io_request *io_request)
 333{
 334        atomic_dec(&io_request->refcount);
 335}
 336
 337static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
 338        struct bmic_identify_controller *buffer)
 339{
 340        int rc;
 341        int pci_direction;
 342        struct pqi_raid_path_request request;
 343
 344        rc = pqi_build_raid_path_request(ctrl_info, &request,
 345                BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
 346                sizeof(*buffer), 0, &pci_direction);
 347        if (rc)
 348                return rc;
 349
 350        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
 351                NULL, NO_TIMEOUT);
 352
 353        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
 354                pci_direction);
 355
 356        return rc;
 357}
 358
 359static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
 360        u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
 361{
 362        int rc;
 363        int pci_direction;
 364        struct pqi_raid_path_request request;
 365
 366        rc = pqi_build_raid_path_request(ctrl_info, &request,
 367                INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
 368                &pci_direction);
 369        if (rc)
 370                return rc;
 371
 372        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
 373                NULL, NO_TIMEOUT);
 374
 375        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
 376                pci_direction);
 377
 378        return rc;
 379}
 380
 381static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
 382        struct pqi_scsi_dev *device,
 383        struct bmic_identify_physical_device *buffer,
 384        size_t buffer_length)
 385{
 386        int rc;
 387        int pci_direction;
 388        u16 bmic_device_index;
 389        struct pqi_raid_path_request request;
 390
 391        rc = pqi_build_raid_path_request(ctrl_info, &request,
 392                BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
 393                buffer_length, 0, &pci_direction);
 394        if (rc)
 395                return rc;
 396
 397        bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
 398        request.cdb[2] = (u8)bmic_device_index;
 399        request.cdb[9] = (u8)(bmic_device_index >> 8);
 400
 401        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
 402                0, NULL, NO_TIMEOUT);
 403
 404        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
 405                pci_direction);
 406
 407        return rc;
 408}
 409
 410#define SA_CACHE_FLUSH_BUFFER_LENGTH    4
 411
 412static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
 413{
 414        int rc;
 415        struct pqi_raid_path_request request;
 416        int pci_direction;
 417        u8 *buffer;
 418
 419        /*
 420         * Don't bother trying to flush the cache if the controller is
 421         * locked up.
 422         */
 423        if (pqi_ctrl_offline(ctrl_info))
 424                return -ENXIO;
 425
 426        buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
 427        if (!buffer)
 428                return -ENOMEM;
 429
 430        rc = pqi_build_raid_path_request(ctrl_info, &request,
 431                SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
 432                SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
 433        if (rc)
 434                goto out;
 435
 436        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
 437                0, NULL, NO_TIMEOUT);
 438
 439        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
 440                pci_direction);
 441
 442out:
 443        kfree(buffer);
 444
 445        return rc;
 446}
 447
 448static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
 449        void *buffer, size_t buffer_length)
 450{
 451        int rc;
 452        struct pqi_raid_path_request request;
 453        int pci_direction;
 454
 455        rc = pqi_build_raid_path_request(ctrl_info, &request,
 456                BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
 457                buffer_length, 0, &pci_direction);
 458        if (rc)
 459                return rc;
 460
 461        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
 462                0, NULL, NO_TIMEOUT);
 463
 464        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
 465                pci_direction);
 466
 467        return rc;
 468}
 469
 470#pragma pack(1)
 471
 472struct bmic_host_wellness_driver_version {
 473        u8      start_tag[4];
 474        u8      driver_version_tag[2];
 475        __le16  driver_version_length;
 476        char    driver_version[32];
 477        u8      end_tag[2];
 478};
 479
 480#pragma pack()
 481
 482static int pqi_write_driver_version_to_host_wellness(
 483        struct pqi_ctrl_info *ctrl_info)
 484{
 485        int rc;
 486        struct bmic_host_wellness_driver_version *buffer;
 487        size_t buffer_length;
 488
 489        buffer_length = sizeof(*buffer);
 490
 491        buffer = kmalloc(buffer_length, GFP_KERNEL);
 492        if (!buffer)
 493                return -ENOMEM;
 494
 495        buffer->start_tag[0] = '<';
 496        buffer->start_tag[1] = 'H';
 497        buffer->start_tag[2] = 'W';
 498        buffer->start_tag[3] = '>';
 499        buffer->driver_version_tag[0] = 'D';
 500        buffer->driver_version_tag[1] = 'V';
 501        put_unaligned_le16(sizeof(buffer->driver_version),
 502                &buffer->driver_version_length);
 503        strncpy(buffer->driver_version, DRIVER_VERSION,
 504                sizeof(buffer->driver_version) - 1);
 505        buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
 506        buffer->end_tag[0] = 'Z';
 507        buffer->end_tag[1] = 'Z';
 508
 509        rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
 510
 511        kfree(buffer);
 512
 513        return rc;
 514}
 515
 516#pragma pack(1)
 517
 518struct bmic_host_wellness_time {
 519        u8      start_tag[4];
 520        u8      time_tag[2];
 521        __le16  time_length;
 522        u8      time[8];
 523        u8      dont_write_tag[2];
 524        u8      end_tag[2];
 525};
 526
 527#pragma pack()
 528
 529static int pqi_write_current_time_to_host_wellness(
 530        struct pqi_ctrl_info *ctrl_info)
 531{
 532        int rc;
 533        struct bmic_host_wellness_time *buffer;
 534        size_t buffer_length;
 535        time64_t local_time;
 536        unsigned int year;
 537        struct timeval time;
 538        struct rtc_time tm;
 539
 540        buffer_length = sizeof(*buffer);
 541
 542        buffer = kmalloc(buffer_length, GFP_KERNEL);
 543        if (!buffer)
 544                return -ENOMEM;
 545
 546        buffer->start_tag[0] = '<';
 547        buffer->start_tag[1] = 'H';
 548        buffer->start_tag[2] = 'W';
 549        buffer->start_tag[3] = '>';
 550        buffer->time_tag[0] = 'T';
 551        buffer->time_tag[1] = 'D';
 552        put_unaligned_le16(sizeof(buffer->time),
 553                &buffer->time_length);
 554
 555        do_gettimeofday(&time);
 556        local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
 557        rtc_time_to_tm(local_time, &tm);
 558        year = tm.tm_year + 1900;
 559
 560        buffer->time[0] = bin2bcd(tm.tm_hour);
 561        buffer->time[1] = bin2bcd(tm.tm_min);
 562        buffer->time[2] = bin2bcd(tm.tm_sec);
 563        buffer->time[3] = 0;
 564        buffer->time[4] = bin2bcd(tm.tm_mon + 1);
 565        buffer->time[5] = bin2bcd(tm.tm_mday);
 566        buffer->time[6] = bin2bcd(year / 100);
 567        buffer->time[7] = bin2bcd(year % 100);
 568
 569        buffer->dont_write_tag[0] = 'D';
 570        buffer->dont_write_tag[1] = 'W';
 571        buffer->end_tag[0] = 'Z';
 572        buffer->end_tag[1] = 'Z';
 573
 574        rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
 575
 576        kfree(buffer);
 577
 578        return rc;
 579}
 580
 581#define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * HZ)
 582
 583static void pqi_update_time_worker(struct work_struct *work)
 584{
 585        int rc;
 586        struct pqi_ctrl_info *ctrl_info;
 587
 588        ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
 589                update_time_work);
 590
 591        rc = pqi_write_current_time_to_host_wellness(ctrl_info);
 592        if (rc)
 593                dev_warn(&ctrl_info->pci_dev->dev,
 594                        "error updating time on controller\n");
 595
 596        schedule_delayed_work(&ctrl_info->update_time_work,
 597                PQI_UPDATE_TIME_WORK_INTERVAL);
 598}
 599
 600static inline void pqi_schedule_update_time_worker(
 601        struct pqi_ctrl_info *ctrl_info)
 602{
 603        schedule_delayed_work(&ctrl_info->update_time_work, 0);
 604}
 605
 606static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
 607        void *buffer, size_t buffer_length)
 608{
 609        int rc;
 610        int pci_direction;
 611        struct pqi_raid_path_request request;
 612
 613        rc = pqi_build_raid_path_request(ctrl_info, &request,
 614                cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
 615        if (rc)
 616                return rc;
 617
 618        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
 619                NULL, NO_TIMEOUT);
 620
 621        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
 622                pci_direction);
 623
 624        return rc;
 625}
 626
 627static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
 628        void **buffer)
 629{
 630        int rc;
 631        size_t lun_list_length;
 632        size_t lun_data_length;
 633        size_t new_lun_list_length;
 634        void *lun_data = NULL;
 635        struct report_lun_header *report_lun_header;
 636
 637        report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
 638        if (!report_lun_header) {
 639                rc = -ENOMEM;
 640                goto out;
 641        }
 642
 643        rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
 644                sizeof(*report_lun_header));
 645        if (rc)
 646                goto out;
 647
 648        lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
 649
 650again:
 651        lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
 652
 653        lun_data = kmalloc(lun_data_length, GFP_KERNEL);
 654        if (!lun_data) {
 655                rc = -ENOMEM;
 656                goto out;
 657        }
 658
 659        if (lun_list_length == 0) {
 660                memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
 661                goto out;
 662        }
 663
 664        rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
 665        if (rc)
 666                goto out;
 667
 668        new_lun_list_length = get_unaligned_be32(
 669                &((struct report_lun_header *)lun_data)->list_length);
 670
 671        if (new_lun_list_length > lun_list_length) {
 672                lun_list_length = new_lun_list_length;
 673                kfree(lun_data);
 674                goto again;
 675        }
 676
 677out:
 678        kfree(report_lun_header);
 679
 680        if (rc) {
 681                kfree(lun_data);
 682                lun_data = NULL;
 683        }
 684
 685        *buffer = lun_data;
 686
 687        return rc;
 688}
 689
 690static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
 691        void **buffer)
 692{
 693        return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
 694                buffer);
 695}
 696
 697static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
 698        void **buffer)
 699{
 700        return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
 701}
 702
 703static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
 704        struct report_phys_lun_extended **physdev_list,
 705        struct report_log_lun_extended **logdev_list)
 706{
 707        int rc;
 708        size_t logdev_list_length;
 709        size_t logdev_data_length;
 710        struct report_log_lun_extended *internal_logdev_list;
 711        struct report_log_lun_extended *logdev_data;
 712        struct report_lun_header report_lun_header;
 713
 714        rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
 715        if (rc)
 716                dev_err(&ctrl_info->pci_dev->dev,
 717                        "report physical LUNs failed\n");
 718
 719        rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
 720        if (rc)
 721                dev_err(&ctrl_info->pci_dev->dev,
 722                        "report logical LUNs failed\n");
 723
 724        /*
 725         * Tack the controller itself onto the end of the logical device list.
 726         */
 727
 728        logdev_data = *logdev_list;
 729
 730        if (logdev_data) {
 731                logdev_list_length =
 732                        get_unaligned_be32(&logdev_data->header.list_length);
 733        } else {
 734                memset(&report_lun_header, 0, sizeof(report_lun_header));
 735                logdev_data =
 736                        (struct report_log_lun_extended *)&report_lun_header;
 737                logdev_list_length = 0;
 738        }
 739
 740        logdev_data_length = sizeof(struct report_lun_header) +
 741                logdev_list_length;
 742
 743        internal_logdev_list = kmalloc(logdev_data_length +
 744                sizeof(struct report_log_lun_extended), GFP_KERNEL);
 745        if (!internal_logdev_list) {
 746                kfree(*logdev_list);
 747                *logdev_list = NULL;
 748                return -ENOMEM;
 749        }
 750
 751        memcpy(internal_logdev_list, logdev_data, logdev_data_length);
 752        memset((u8 *)internal_logdev_list + logdev_data_length, 0,
 753                sizeof(struct report_log_lun_extended_entry));
 754        put_unaligned_be32(logdev_list_length +
 755                sizeof(struct report_log_lun_extended_entry),
 756                &internal_logdev_list->header.list_length);
 757
 758        kfree(*logdev_list);
 759        *logdev_list = internal_logdev_list;
 760
 761        return 0;
 762}
 763
 764static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
 765        int bus, int target, int lun)
 766{
 767        device->bus = bus;
 768        device->target = target;
 769        device->lun = lun;
 770}
 771
 772static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
 773{
 774        u8 *scsi3addr;
 775        u32 lunid;
 776
 777        scsi3addr = device->scsi3addr;
 778        lunid = get_unaligned_le32(scsi3addr);
 779
 780        if (pqi_is_hba_lunid(scsi3addr)) {
 781                /* The specified device is the controller. */
 782                pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
 783                device->target_lun_valid = true;
 784                return;
 785        }
 786
 787        if (pqi_is_logical_device(device)) {
 788                pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
 789                        lunid & 0x3fff);
 790                device->target_lun_valid = true;
 791                return;
 792        }
 793
 794        /*
 795         * Defer target and LUN assignment for non-controller physical devices
 796         * because the SAS transport layer will make these assignments later.
 797         */
 798        pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
 799}
 800
 801static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
 802        struct pqi_scsi_dev *device)
 803{
 804        int rc;
 805        u8 raid_level;
 806        u8 *buffer;
 807
 808        raid_level = SA_RAID_UNKNOWN;
 809
 810        buffer = kmalloc(64, GFP_KERNEL);
 811        if (buffer) {
 812                rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
 813                        VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
 814                if (rc == 0) {
 815                        raid_level = buffer[8];
 816                        if (raid_level > SA_RAID_MAX)
 817                                raid_level = SA_RAID_UNKNOWN;
 818                }
 819                kfree(buffer);
 820        }
 821
 822        device->raid_level = raid_level;
 823}
 824
 825static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
 826        struct pqi_scsi_dev *device, struct raid_map *raid_map)
 827{
 828        char *err_msg;
 829        u32 raid_map_size;
 830        u32 r5or6_blocks_per_row;
 831        unsigned int num_phys_disks;
 832        unsigned int num_raid_map_entries;
 833
 834        raid_map_size = get_unaligned_le32(&raid_map->structure_size);
 835
 836        if (raid_map_size < offsetof(struct raid_map, disk_data)) {
 837                err_msg = "RAID map too small";
 838                goto bad_raid_map;
 839        }
 840
 841        if (raid_map_size > sizeof(*raid_map)) {
 842                err_msg = "RAID map too large";
 843                goto bad_raid_map;
 844        }
 845
 846        num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
 847                (get_unaligned_le16(&raid_map->data_disks_per_row) +
 848                get_unaligned_le16(&raid_map->metadata_disks_per_row));
 849        num_raid_map_entries = num_phys_disks *
 850                get_unaligned_le16(&raid_map->row_cnt);
 851
 852        if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
 853                err_msg = "invalid number of map entries in RAID map";
 854                goto bad_raid_map;
 855        }
 856
 857        if (device->raid_level == SA_RAID_1) {
 858                if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
 859                        err_msg = "invalid RAID-1 map";
 860                        goto bad_raid_map;
 861                }
 862        } else if (device->raid_level == SA_RAID_ADM) {
 863                if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
 864                        err_msg = "invalid RAID-1(ADM) map";
 865                        goto bad_raid_map;
 866                }
 867        } else if ((device->raid_level == SA_RAID_5 ||
 868                device->raid_level == SA_RAID_6) &&
 869                get_unaligned_le16(&raid_map->layout_map_count) > 1) {
 870                /* RAID 50/60 */
 871                r5or6_blocks_per_row =
 872                        get_unaligned_le16(&raid_map->strip_size) *
 873                        get_unaligned_le16(&raid_map->data_disks_per_row);
 874                if (r5or6_blocks_per_row == 0) {
 875                        err_msg = "invalid RAID-5 or RAID-6 map";
 876                        goto bad_raid_map;
 877                }
 878        }
 879
 880        return 0;
 881
 882bad_raid_map:
 883        dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
 884
 885        return -EINVAL;
 886}
 887
 888static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
 889        struct pqi_scsi_dev *device)
 890{
 891        int rc;
 892        int pci_direction;
 893        struct pqi_raid_path_request request;
 894        struct raid_map *raid_map;
 895
 896        raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
 897        if (!raid_map)
 898                return -ENOMEM;
 899
 900        rc = pqi_build_raid_path_request(ctrl_info, &request,
 901                CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
 902                sizeof(*raid_map), 0, &pci_direction);
 903        if (rc)
 904                goto error;
 905
 906        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
 907                NULL, NO_TIMEOUT);
 908
 909        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
 910                pci_direction);
 911
 912        if (rc)
 913                goto error;
 914
 915        rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
 916        if (rc)
 917                goto error;
 918
 919        device->raid_map = raid_map;
 920
 921        return 0;
 922
 923error:
 924        kfree(raid_map);
 925
 926        return rc;
 927}
 928
 929static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
 930        struct pqi_scsi_dev *device)
 931{
 932        int rc;
 933        u8 *buffer;
 934        u8 offload_status;
 935
 936        buffer = kmalloc(64, GFP_KERNEL);
 937        if (!buffer)
 938                return;
 939
 940        rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
 941                VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
 942        if (rc)
 943                goto out;
 944
 945#define OFFLOAD_STATUS_BYTE     4
 946#define OFFLOAD_CONFIGURED_BIT  0x1
 947#define OFFLOAD_ENABLED_BIT     0x2
 948
 949        offload_status = buffer[OFFLOAD_STATUS_BYTE];
 950        device->offload_configured =
 951                !!(offload_status & OFFLOAD_CONFIGURED_BIT);
 952        if (device->offload_configured) {
 953                device->offload_enabled_pending =
 954                        !!(offload_status & OFFLOAD_ENABLED_BIT);
 955                if (pqi_get_raid_map(ctrl_info, device))
 956                        device->offload_enabled_pending = false;
 957        }
 958
 959out:
 960        kfree(buffer);
 961}
 962
 963/*
 964 * Use vendor-specific VPD to determine online/offline status of a volume.
 965 */
 966
 967static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
 968        struct pqi_scsi_dev *device)
 969{
 970        int rc;
 971        size_t page_length;
 972        u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
 973        bool volume_offline = true;
 974        u32 volume_flags;
 975        struct ciss_vpd_logical_volume_status *vpd;
 976
 977        vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
 978        if (!vpd)
 979                goto no_buffer;
 980
 981        rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
 982                VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
 983        if (rc)
 984                goto out;
 985
 986        page_length = offsetof(struct ciss_vpd_logical_volume_status,
 987                volume_status) + vpd->page_length;
 988        if (page_length < sizeof(*vpd))
 989                goto out;
 990
 991        volume_status = vpd->volume_status;
 992        volume_flags = get_unaligned_be32(&vpd->flags);
 993        volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
 994
 995out:
 996        kfree(vpd);
 997no_buffer:
 998        device->volume_status = volume_status;
 999        device->volume_offline = volume_offline;
1000}
1001
1002static void sanitize_inquiry_string(unsigned char *s, int len)
1003{
1004        bool terminated = false;
1005
1006        for (; len > 0; (--len, ++s)) {
1007                if (*s == 0)
1008                        terminated = true;
1009                if (terminated || *s < 0x20 || *s > 0x7e)
1010                        *s = ' ';
1011        }
1012}
1013
1014static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1015        struct pqi_scsi_dev *device)
1016{
1017        int rc;
1018        u8 *buffer;
1019
1020        buffer = kmalloc(64, GFP_KERNEL);
1021        if (!buffer)
1022                return -ENOMEM;
1023
1024        /* Send an inquiry to the device to see what it is. */
1025        rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1026        if (rc)
1027                goto out;
1028
1029        sanitize_inquiry_string(&buffer[8], 8);
1030        sanitize_inquiry_string(&buffer[16], 16);
1031
1032        device->devtype = buffer[0] & 0x1f;
1033        memcpy(device->vendor, &buffer[8],
1034                sizeof(device->vendor));
1035        memcpy(device->model, &buffer[16],
1036                sizeof(device->model));
1037
1038        if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1039                pqi_get_raid_level(ctrl_info, device);
1040                pqi_get_offload_status(ctrl_info, device);
1041                pqi_get_volume_status(ctrl_info, device);
1042        }
1043
1044out:
1045        kfree(buffer);
1046
1047        return rc;
1048}
1049
1050static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1051        struct pqi_scsi_dev *device,
1052        struct bmic_identify_physical_device *id_phys)
1053{
1054        int rc;
1055
1056        memset(id_phys, 0, sizeof(*id_phys));
1057
1058        rc = pqi_identify_physical_device(ctrl_info, device,
1059                id_phys, sizeof(*id_phys));
1060        if (rc) {
1061                device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1062                return;
1063        }
1064
1065        device->queue_depth =
1066                get_unaligned_le16(&id_phys->current_queue_depth_limit);
1067        device->device_type = id_phys->device_type;
1068        device->active_path_index = id_phys->active_path_number;
1069        device->path_map = id_phys->redundant_path_present_map;
1070        memcpy(&device->box,
1071                &id_phys->alternate_paths_phys_box_on_port,
1072                sizeof(device->box));
1073        memcpy(&device->phys_connector,
1074                &id_phys->alternate_paths_phys_connector,
1075                sizeof(device->phys_connector));
1076        device->bay = id_phys->phys_bay_in_box;
1077}
1078
1079static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1080        struct pqi_scsi_dev *device)
1081{
1082        char *status;
1083        static const char unknown_state_str[] =
1084                "Volume is in an unknown state (%u)";
1085        char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1086
1087        switch (device->volume_status) {
1088        case CISS_LV_OK:
1089                status = "Volume online";
1090                break;
1091        case CISS_LV_FAILED:
1092                status = "Volume failed";
1093                break;
1094        case CISS_LV_NOT_CONFIGURED:
1095                status = "Volume not configured";
1096                break;
1097        case CISS_LV_DEGRADED:
1098                status = "Volume degraded";
1099                break;
1100        case CISS_LV_READY_FOR_RECOVERY:
1101                status = "Volume ready for recovery operation";
1102                break;
1103        case CISS_LV_UNDERGOING_RECOVERY:
1104                status = "Volume undergoing recovery";
1105                break;
1106        case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1107                status = "Wrong physical drive was replaced";
1108                break;
1109        case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1110                status = "A physical drive not properly connected";
1111                break;
1112        case CISS_LV_HARDWARE_OVERHEATING:
1113                status = "Hardware is overheating";
1114                break;
1115        case CISS_LV_HARDWARE_HAS_OVERHEATED:
1116                status = "Hardware has overheated";
1117                break;
1118        case CISS_LV_UNDERGOING_EXPANSION:
1119                status = "Volume undergoing expansion";
1120                break;
1121        case CISS_LV_NOT_AVAILABLE:
1122                status = "Volume waiting for transforming volume";
1123                break;
1124        case CISS_LV_QUEUED_FOR_EXPANSION:
1125                status = "Volume queued for expansion";
1126                break;
1127        case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1128                status = "Volume disabled due to SCSI ID conflict";
1129                break;
1130        case CISS_LV_EJECTED:
1131                status = "Volume has been ejected";
1132                break;
1133        case CISS_LV_UNDERGOING_ERASE:
1134                status = "Volume undergoing background erase";
1135                break;
1136        case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1137                status = "Volume ready for predictive spare rebuild";
1138                break;
1139        case CISS_LV_UNDERGOING_RPI:
1140                status = "Volume undergoing rapid parity initialization";
1141                break;
1142        case CISS_LV_PENDING_RPI:
1143                status = "Volume queued for rapid parity initialization";
1144                break;
1145        case CISS_LV_ENCRYPTED_NO_KEY:
1146                status = "Encrypted volume inaccessible - key not present";
1147                break;
1148        case CISS_LV_UNDERGOING_ENCRYPTION:
1149                status = "Volume undergoing encryption process";
1150                break;
1151        case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1152                status = "Volume undergoing encryption re-keying process";
1153                break;
1154        case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1155                status =
1156                        "Encrypted volume inaccessible - disabled on ctrl";
1157                break;
1158        case CISS_LV_PENDING_ENCRYPTION:
1159                status = "Volume pending migration to encrypted state";
1160                break;
1161        case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1162                status = "Volume pending encryption rekeying";
1163                break;
1164        case CISS_LV_NOT_SUPPORTED:
1165                status = "Volume not supported on this controller";
1166                break;
1167        case CISS_LV_STATUS_UNAVAILABLE:
1168                status = "Volume status not available";
1169                break;
1170        default:
1171                snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1172                        unknown_state_str, device->volume_status);
1173                status = unknown_state_buffer;
1174                break;
1175        }
1176
1177        dev_info(&ctrl_info->pci_dev->dev,
1178                "scsi %d:%d:%d:%d %s\n",
1179                ctrl_info->scsi_host->host_no,
1180                device->bus, device->target, device->lun, status);
1181}
1182
1183static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1184        struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1185{
1186        struct pqi_scsi_dev *device;
1187
1188        list_for_each_entry(device, &ctrl_info->scsi_device_list,
1189                scsi_device_list_entry) {
1190                if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1191                        continue;
1192                if (pqi_is_logical_device(device))
1193                        continue;
1194                if (device->aio_handle == aio_handle)
1195                        return device;
1196        }
1197
1198        return NULL;
1199}
1200
1201static void pqi_update_logical_drive_queue_depth(
1202        struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1203{
1204        unsigned int i;
1205        struct raid_map *raid_map;
1206        struct raid_map_disk_data *disk_data;
1207        struct pqi_scsi_dev *phys_disk;
1208        unsigned int num_phys_disks;
1209        unsigned int num_raid_map_entries;
1210        unsigned int queue_depth;
1211
1212        logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1213
1214        raid_map = logical_drive->raid_map;
1215        if (!raid_map)
1216                return;
1217
1218        disk_data = raid_map->disk_data;
1219        num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1220                (get_unaligned_le16(&raid_map->data_disks_per_row) +
1221                get_unaligned_le16(&raid_map->metadata_disks_per_row));
1222        num_raid_map_entries = num_phys_disks *
1223                get_unaligned_le16(&raid_map->row_cnt);
1224
1225        queue_depth = 0;
1226        for (i = 0; i < num_raid_map_entries; i++) {
1227                phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1228                        disk_data[i].aio_handle);
1229
1230                if (!phys_disk) {
1231                        dev_warn(&ctrl_info->pci_dev->dev,
1232                                "failed to find physical disk for logical drive %016llx\n",
1233                                get_unaligned_be64(logical_drive->scsi3addr));
1234                        logical_drive->offload_enabled = false;
1235                        logical_drive->offload_enabled_pending = false;
1236                        kfree(raid_map);
1237                        logical_drive->raid_map = NULL;
1238                        return;
1239                }
1240
1241                queue_depth += phys_disk->queue_depth;
1242        }
1243
1244        logical_drive->queue_depth = queue_depth;
1245}
1246
1247static void pqi_update_all_logical_drive_queue_depths(
1248        struct pqi_ctrl_info *ctrl_info)
1249{
1250        struct pqi_scsi_dev *device;
1251
1252        list_for_each_entry(device, &ctrl_info->scsi_device_list,
1253                scsi_device_list_entry) {
1254                if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1255                        continue;
1256                if (!pqi_is_logical_device(device))
1257                        continue;
1258                pqi_update_logical_drive_queue_depth(ctrl_info, device);
1259        }
1260}
1261
1262static void pqi_rescan_worker(struct work_struct *work)
1263{
1264        struct pqi_ctrl_info *ctrl_info;
1265
1266        ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1267                rescan_work);
1268
1269        pqi_scan_scsi_devices(ctrl_info);
1270}
1271
1272static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1273        struct pqi_scsi_dev *device)
1274{
1275        int rc;
1276
1277        if (pqi_is_logical_device(device))
1278                rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1279                        device->target, device->lun);
1280        else
1281                rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1282
1283        return rc;
1284}
1285
1286static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1287        struct pqi_scsi_dev *device)
1288{
1289        if (pqi_is_logical_device(device))
1290                scsi_remove_device(device->sdev);
1291        else
1292                pqi_remove_sas_device(device);
1293}
1294
1295/* Assumes the SCSI device list lock is held. */
1296
1297static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1298        int bus, int target, int lun)
1299{
1300        struct pqi_scsi_dev *device;
1301
1302        list_for_each_entry(device, &ctrl_info->scsi_device_list,
1303                scsi_device_list_entry)
1304                if (device->bus == bus && device->target == target &&
1305                        device->lun == lun)
1306                        return device;
1307
1308        return NULL;
1309}
1310
1311static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1312        struct pqi_scsi_dev *dev2)
1313{
1314        if (dev1->is_physical_device != dev2->is_physical_device)
1315                return false;
1316
1317        if (dev1->is_physical_device)
1318                return dev1->wwid == dev2->wwid;
1319
1320        return memcmp(dev1->volume_id, dev2->volume_id,
1321                sizeof(dev1->volume_id)) == 0;
1322}
1323
1324enum pqi_find_result {
1325        DEVICE_NOT_FOUND,
1326        DEVICE_CHANGED,
1327        DEVICE_SAME,
1328};
1329
1330static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1331        struct pqi_scsi_dev *device_to_find,
1332        struct pqi_scsi_dev **matching_device)
1333{
1334        struct pqi_scsi_dev *device;
1335
1336        list_for_each_entry(device, &ctrl_info->scsi_device_list,
1337                scsi_device_list_entry) {
1338                if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1339                        device->scsi3addr)) {
1340                        *matching_device = device;
1341                        if (pqi_device_equal(device_to_find, device)) {
1342                                if (device_to_find->volume_offline)
1343                                        return DEVICE_CHANGED;
1344                                return DEVICE_SAME;
1345                        }
1346                        return DEVICE_CHANGED;
1347                }
1348        }
1349
1350        return DEVICE_NOT_FOUND;
1351}
1352
1353static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1354        char *action, struct pqi_scsi_dev *device)
1355{
1356        dev_info(&ctrl_info->pci_dev->dev,
1357                "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1358                action,
1359                ctrl_info->scsi_host->host_no,
1360                device->bus,
1361                device->target,
1362                device->lun,
1363                scsi_device_type(device->devtype),
1364                device->vendor,
1365                device->model,
1366                pqi_raid_level_to_string(device->raid_level),
1367                device->offload_configured ? '+' : '-',
1368                device->offload_enabled_pending ? '+' : '-',
1369                device->expose_device ? '+' : '-',
1370                device->queue_depth);
1371}
1372
1373/* Assumes the SCSI device list lock is held. */
1374
1375static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1376        struct pqi_scsi_dev *new_device)
1377{
1378        existing_device->devtype = new_device->devtype;
1379        existing_device->device_type = new_device->device_type;
1380        existing_device->bus = new_device->bus;
1381        if (new_device->target_lun_valid) {
1382                existing_device->target = new_device->target;
1383                existing_device->lun = new_device->lun;
1384                existing_device->target_lun_valid = true;
1385        }
1386
1387        /* By definition, the scsi3addr and wwid fields are already the same. */
1388
1389        existing_device->is_physical_device = new_device->is_physical_device;
1390        existing_device->expose_device = new_device->expose_device;
1391        existing_device->no_uld_attach = new_device->no_uld_attach;
1392        existing_device->aio_enabled = new_device->aio_enabled;
1393        memcpy(existing_device->vendor, new_device->vendor,
1394                sizeof(existing_device->vendor));
1395        memcpy(existing_device->model, new_device->model,
1396                sizeof(existing_device->model));
1397        existing_device->sas_address = new_device->sas_address;
1398        existing_device->raid_level = new_device->raid_level;
1399        existing_device->queue_depth = new_device->queue_depth;
1400        existing_device->aio_handle = new_device->aio_handle;
1401        existing_device->volume_status = new_device->volume_status;
1402        existing_device->active_path_index = new_device->active_path_index;
1403        existing_device->path_map = new_device->path_map;
1404        existing_device->bay = new_device->bay;
1405        memcpy(existing_device->box, new_device->box,
1406                sizeof(existing_device->box));
1407        memcpy(existing_device->phys_connector, new_device->phys_connector,
1408                sizeof(existing_device->phys_connector));
1409        existing_device->offload_configured = new_device->offload_configured;
1410        existing_device->offload_enabled = false;
1411        existing_device->offload_enabled_pending =
1412                new_device->offload_enabled_pending;
1413        existing_device->offload_to_mirror = 0;
1414        kfree(existing_device->raid_map);
1415        existing_device->raid_map = new_device->raid_map;
1416
1417        /* To prevent this from being freed later. */
1418        new_device->raid_map = NULL;
1419}
1420
1421static inline void pqi_free_device(struct pqi_scsi_dev *device)
1422{
1423        if (device) {
1424                kfree(device->raid_map);
1425                kfree(device);
1426        }
1427}
1428
1429/*
1430 * Called when exposing a new device to the OS fails in order to re-adjust
1431 * our internal SCSI device list to match the SCSI ML's view.
1432 */
1433
1434static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1435        struct pqi_scsi_dev *device)
1436{
1437        unsigned long flags;
1438
1439        spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1440        list_del(&device->scsi_device_list_entry);
1441        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1442
1443        /* Allow the device structure to be freed later. */
1444        device->keep_device = false;
1445}
1446
1447static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1448        struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1449{
1450        int rc;
1451        unsigned int i;
1452        unsigned long flags;
1453        enum pqi_find_result find_result;
1454        struct pqi_scsi_dev *device;
1455        struct pqi_scsi_dev *next;
1456        struct pqi_scsi_dev *matching_device;
1457        struct list_head add_list;
1458        struct list_head delete_list;
1459
1460        INIT_LIST_HEAD(&add_list);
1461        INIT_LIST_HEAD(&delete_list);
1462
1463        /*
1464         * The idea here is to do as little work as possible while holding the
1465         * spinlock.  That's why we go to great pains to defer anything other
1466         * than updating the internal device list until after we release the
1467         * spinlock.
1468         */
1469
1470        spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1471
1472        /* Assume that all devices in the existing list have gone away. */
1473        list_for_each_entry(device, &ctrl_info->scsi_device_list,
1474                scsi_device_list_entry)
1475                device->device_gone = true;
1476
1477        for (i = 0; i < num_new_devices; i++) {
1478                device = new_device_list[i];
1479
1480                find_result = pqi_scsi_find_entry(ctrl_info, device,
1481                                                &matching_device);
1482
1483                switch (find_result) {
1484                case DEVICE_SAME:
1485                        /*
1486                         * The newly found device is already in the existing
1487                         * device list.
1488                         */
1489                        device->new_device = false;
1490                        matching_device->device_gone = false;
1491                        pqi_scsi_update_device(matching_device, device);
1492                        break;
1493                case DEVICE_NOT_FOUND:
1494                        /*
1495                         * The newly found device is NOT in the existing device
1496                         * list.
1497                         */
1498                        device->new_device = true;
1499                        break;
1500                case DEVICE_CHANGED:
1501                        /*
1502                         * The original device has gone away and we need to add
1503                         * the new device.
1504                         */
1505                        device->new_device = true;
1506                        break;
1507                default:
1508                        WARN_ON(find_result);
1509                        break;
1510                }
1511        }
1512
1513        /* Process all devices that have gone away. */
1514        list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1515                scsi_device_list_entry) {
1516                if (device->device_gone) {
1517                        list_del(&device->scsi_device_list_entry);
1518                        list_add_tail(&device->delete_list_entry, &delete_list);
1519                }
1520        }
1521
1522        /* Process all new devices. */
1523        for (i = 0; i < num_new_devices; i++) {
1524                device = new_device_list[i];
1525                if (!device->new_device)
1526                        continue;
1527                if (device->volume_offline)
1528                        continue;
1529                list_add_tail(&device->scsi_device_list_entry,
1530                        &ctrl_info->scsi_device_list);
1531                list_add_tail(&device->add_list_entry, &add_list);
1532                /* To prevent this device structure from being freed later. */
1533                device->keep_device = true;
1534        }
1535
1536        pqi_update_all_logical_drive_queue_depths(ctrl_info);
1537
1538        list_for_each_entry(device, &ctrl_info->scsi_device_list,
1539                scsi_device_list_entry)
1540                device->offload_enabled =
1541                        device->offload_enabled_pending;
1542
1543        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1544
1545        /* Remove all devices that have gone away. */
1546        list_for_each_entry_safe(device, next, &delete_list,
1547                delete_list_entry) {
1548                if (device->sdev)
1549                        pqi_remove_device(ctrl_info, device);
1550                if (device->volume_offline) {
1551                        pqi_dev_info(ctrl_info, "offline", device);
1552                        pqi_show_volume_status(ctrl_info, device);
1553                } else {
1554                        pqi_dev_info(ctrl_info, "removed", device);
1555                }
1556                list_del(&device->delete_list_entry);
1557                pqi_free_device(device);
1558        }
1559
1560        /*
1561         * Notify the SCSI ML if the queue depth of any existing device has
1562         * changed.
1563         */
1564        list_for_each_entry(device, &ctrl_info->scsi_device_list,
1565                scsi_device_list_entry) {
1566                if (device->sdev && device->queue_depth !=
1567                        device->advertised_queue_depth) {
1568                        device->advertised_queue_depth = device->queue_depth;
1569                        scsi_adjust_queue_depth(device->sdev,
1570                                scsi_get_tag_type(device->sdev),
1571                                device->advertised_queue_depth);
1572                }
1573        }
1574
1575        /* Expose any new devices. */
1576        list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1577                if (device->expose_device && !device->sdev) {
1578                        rc = pqi_add_device(ctrl_info, device);
1579                        if (rc) {
1580                                dev_warn(&ctrl_info->pci_dev->dev,
1581                                        "scsi %d:%d:%d:%d addition failed, device not added\n",
1582                                        ctrl_info->scsi_host->host_no,
1583                                        device->bus, device->target,
1584                                        device->lun);
1585                                pqi_fixup_botched_add(ctrl_info, device);
1586                                continue;
1587                        }
1588                }
1589                pqi_dev_info(ctrl_info, "added", device);
1590        }
1591}
1592
1593static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1594{
1595        bool is_supported = false;
1596
1597        switch (device->devtype) {
1598        case TYPE_DISK:
1599        case TYPE_ZBC:
1600        case TYPE_TAPE:
1601        case TYPE_MEDIUM_CHANGER:
1602        case TYPE_ENCLOSURE:
1603                is_supported = true;
1604                break;
1605        case TYPE_RAID:
1606                /*
1607                 * Only support the HBA controller itself as a RAID
1608                 * controller.  If it's a RAID controller other than
1609                 * the HBA itself (an external RAID controller, MSA500
1610                 * or similar), we don't support it.
1611                 */
1612                if (pqi_is_hba_lunid(device->scsi3addr))
1613                        is_supported = true;
1614                break;
1615        }
1616
1617        return is_supported;
1618}
1619
1620static inline bool pqi_skip_device(u8 *scsi3addr,
1621        struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1622{
1623        u8 device_flags;
1624
1625        if (!MASKED_DEVICE(scsi3addr))
1626                return false;
1627
1628        /* The device is masked. */
1629
1630        device_flags = phys_lun_ext_entry->device_flags;
1631
1632        if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1633                /*
1634                 * It's a non-disk device.  We ignore all devices of this type
1635                 * when they're masked.
1636                 */
1637                return true;
1638        }
1639
1640        return false;
1641}
1642
1643static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1644{
1645        /* Expose all devices except for physical devices that are masked. */
1646        if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1647                return false;
1648
1649        return true;
1650}
1651
1652static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1653{
1654        int i;
1655        int rc;
1656        struct list_head new_device_list_head;
1657        struct report_phys_lun_extended *physdev_list = NULL;
1658        struct report_log_lun_extended *logdev_list = NULL;
1659        struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1660        struct report_log_lun_extended_entry *log_lun_ext_entry;
1661        struct bmic_identify_physical_device *id_phys = NULL;
1662        u32 num_physicals;
1663        u32 num_logicals;
1664        struct pqi_scsi_dev **new_device_list = NULL;
1665        struct pqi_scsi_dev *device;
1666        struct pqi_scsi_dev *next;
1667        unsigned int num_new_devices;
1668        unsigned int num_valid_devices;
1669        bool is_physical_device;
1670        u8 *scsi3addr;
1671        static char *out_of_memory_msg =
1672                "out of memory, device discovery stopped";
1673
1674        INIT_LIST_HEAD(&new_device_list_head);
1675
1676        rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1677        if (rc)
1678                goto out;
1679
1680        if (physdev_list)
1681                num_physicals =
1682                        get_unaligned_be32(&physdev_list->header.list_length)
1683                                / sizeof(physdev_list->lun_entries[0]);
1684        else
1685                num_physicals = 0;
1686
1687        if (logdev_list)
1688                num_logicals =
1689                        get_unaligned_be32(&logdev_list->header.list_length)
1690                                / sizeof(logdev_list->lun_entries[0]);
1691        else
1692                num_logicals = 0;
1693
1694        if (num_physicals) {
1695                /*
1696                 * We need this buffer for calls to pqi_get_physical_disk_info()
1697                 * below.  We allocate it here instead of inside
1698                 * pqi_get_physical_disk_info() because it's a fairly large
1699                 * buffer.
1700                 */
1701                id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1702                if (!id_phys) {
1703                        dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1704                                out_of_memory_msg);
1705                        rc = -ENOMEM;
1706                        goto out;
1707                }
1708        }
1709
1710        num_new_devices = num_physicals + num_logicals;
1711
1712        new_device_list = kmalloc(sizeof(*new_device_list) *
1713                num_new_devices, GFP_KERNEL);
1714        if (!new_device_list) {
1715                dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1716                rc = -ENOMEM;
1717                goto out;
1718        }
1719
1720        for (i = 0; i < num_new_devices; i++) {
1721                device = kzalloc(sizeof(*device), GFP_KERNEL);
1722                if (!device) {
1723                        dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1724                                out_of_memory_msg);
1725                        rc = -ENOMEM;
1726                        goto out;
1727                }
1728                list_add_tail(&device->new_device_list_entry,
1729                        &new_device_list_head);
1730        }
1731
1732        device = NULL;
1733        num_valid_devices = 0;
1734
1735        for (i = 0; i < num_new_devices; i++) {
1736
1737                if (i < num_physicals) {
1738                        is_physical_device = true;
1739                        phys_lun_ext_entry = &physdev_list->lun_entries[i];
1740                        log_lun_ext_entry = NULL;
1741                        scsi3addr = phys_lun_ext_entry->lunid;
1742                } else {
1743                        is_physical_device = false;
1744                        phys_lun_ext_entry = NULL;
1745                        log_lun_ext_entry =
1746                                &logdev_list->lun_entries[i - num_physicals];
1747                        scsi3addr = log_lun_ext_entry->lunid;
1748                }
1749
1750                if (is_physical_device &&
1751                        pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1752                        continue;
1753
1754                if (device)
1755                        device = list_next_entry(device, new_device_list_entry);
1756                else
1757                        device = list_first_entry(&new_device_list_head,
1758                                struct pqi_scsi_dev, new_device_list_entry);
1759
1760                memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1761                device->is_physical_device = is_physical_device;
1762                device->raid_level = SA_RAID_UNKNOWN;
1763
1764                /* Gather information about the device. */
1765                rc = pqi_get_device_info(ctrl_info, device);
1766                if (rc == -ENOMEM) {
1767                        dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1768                                out_of_memory_msg);
1769                        goto out;
1770                }
1771                if (rc) {
1772                        dev_warn(&ctrl_info->pci_dev->dev,
1773                                "obtaining device info failed, skipping device %016llx\n",
1774                                get_unaligned_be64(device->scsi3addr));
1775                        rc = 0;
1776                        continue;
1777                }
1778
1779                if (!pqi_is_supported_device(device))
1780                        continue;
1781
1782                pqi_assign_bus_target_lun(device);
1783
1784                device->expose_device = pqi_expose_device(device);
1785
1786                if (device->is_physical_device) {
1787                        device->wwid = phys_lun_ext_entry->wwid;
1788                        if ((phys_lun_ext_entry->device_flags &
1789                                REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1790                                phys_lun_ext_entry->aio_handle)
1791                                device->aio_enabled = true;
1792                } else {
1793                        memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1794                                sizeof(device->volume_id));
1795                }
1796
1797                switch (device->devtype) {
1798                case TYPE_DISK:
1799                case TYPE_ZBC:
1800                case TYPE_ENCLOSURE:
1801                        if (device->is_physical_device) {
1802                                device->sas_address =
1803                                        get_unaligned_be64(&device->wwid);
1804                                if (device->devtype == TYPE_DISK ||
1805                                        device->devtype == TYPE_ZBC) {
1806                                        device->aio_handle =
1807                                                phys_lun_ext_entry->aio_handle;
1808                                        pqi_get_physical_disk_info(ctrl_info,
1809                                                device, id_phys);
1810                                }
1811                        }
1812                        break;
1813                }
1814
1815                new_device_list[num_valid_devices++] = device;
1816        }
1817
1818        pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1819
1820out:
1821        list_for_each_entry_safe(device, next, &new_device_list_head,
1822                new_device_list_entry) {
1823                if (device->keep_device)
1824                        continue;
1825                list_del(&device->new_device_list_entry);
1826                pqi_free_device(device);
1827        }
1828
1829        kfree(new_device_list);
1830        kfree(physdev_list);
1831        kfree(logdev_list);
1832        kfree(id_phys);
1833
1834        return rc;
1835}
1836
1837static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1838{
1839        unsigned long flags;
1840        struct pqi_scsi_dev *device;
1841        struct pqi_scsi_dev *next;
1842
1843        spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1844
1845        list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1846                scsi_device_list_entry) {
1847                if (device->sdev)
1848                        pqi_remove_device(ctrl_info, device);
1849                list_del(&device->scsi_device_list_entry);
1850                pqi_free_device(device);
1851        }
1852
1853        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1854}
1855
1856static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1857{
1858        int rc;
1859
1860        if (pqi_ctrl_offline(ctrl_info))
1861                return -ENXIO;
1862
1863        mutex_lock(&ctrl_info->scan_mutex);
1864
1865        rc = pqi_update_scsi_devices(ctrl_info);
1866        if (rc)
1867                pqi_schedule_rescan_worker(ctrl_info);
1868
1869        mutex_unlock(&ctrl_info->scan_mutex);
1870
1871        return rc;
1872}
1873
1874static void pqi_scan_start(struct Scsi_Host *shost)
1875{
1876        pqi_scan_scsi_devices(shost_to_hba(shost));
1877}
1878
1879/* Returns TRUE if scan is finished. */
1880
1881static int pqi_scan_finished(struct Scsi_Host *shost,
1882        unsigned long elapsed_time)
1883{
1884        struct pqi_ctrl_info *ctrl_info;
1885
1886        ctrl_info = shost_priv(shost);
1887
1888        return !mutex_is_locked(&ctrl_info->scan_mutex);
1889}
1890
1891static inline void pqi_set_encryption_info(
1892        struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1893        u64 first_block)
1894{
1895        u32 volume_blk_size;
1896
1897        /*
1898         * Set the encryption tweak values based on logical block address.
1899         * If the block size is 512, the tweak value is equal to the LBA.
1900         * For other block sizes, tweak value is (LBA * block size) / 512.
1901         */
1902        volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1903        if (volume_blk_size != 512)
1904                first_block = (first_block * volume_blk_size) / 512;
1905
1906        encryption_info->data_encryption_key_index =
1907                get_unaligned_le16(&raid_map->data_encryption_key_index);
1908        encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1909        encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1910}
1911
1912/*
1913 * Attempt to perform offload RAID mapping for a logical volume I/O.
1914 */
1915
1916#define PQI_RAID_BYPASS_INELIGIBLE      1
1917
1918static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1919        struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1920        struct pqi_queue_group *queue_group)
1921{
1922        struct raid_map *raid_map;
1923        bool is_write = false;
1924        u32 map_index;
1925        u64 first_block;
1926        u64 last_block;
1927        u32 block_cnt;
1928        u32 blocks_per_row;
1929        u64 first_row;
1930        u64 last_row;
1931        u32 first_row_offset;
1932        u32 last_row_offset;
1933        u32 first_column;
1934        u32 last_column;
1935        u64 r0_first_row;
1936        u64 r0_last_row;
1937        u32 r5or6_blocks_per_row;
1938        u64 r5or6_first_row;
1939        u64 r5or6_last_row;
1940        u32 r5or6_first_row_offset;
1941        u32 r5or6_last_row_offset;
1942        u32 r5or6_first_column;
1943        u32 r5or6_last_column;
1944        u16 data_disks_per_row;
1945        u32 total_disks_per_row;
1946        u16 layout_map_count;
1947        u32 stripesize;
1948        u16 strip_size;
1949        u32 first_group;
1950        u32 last_group;
1951        u32 current_group;
1952        u32 map_row;
1953        u32 aio_handle;
1954        u64 disk_block;
1955        u32 disk_block_cnt;
1956        u8 cdb[16];
1957        u8 cdb_length;
1958        int offload_to_mirror;
1959        struct pqi_encryption_info *encryption_info_ptr;
1960        struct pqi_encryption_info encryption_info;
1961#if BITS_PER_LONG == 32
1962        u64 tmpdiv;
1963#endif
1964
1965        /* Check for valid opcode, get LBA and block count. */
1966        switch (scmd->cmnd[0]) {
1967        case WRITE_6:
1968                is_write = true;
1969                /* fall through */
1970        case READ_6:
1971                first_block = (u64)get_unaligned_be16(&scmd->cmnd[2]);
1972                block_cnt = (u32)scmd->cmnd[4];
1973                if (block_cnt == 0)
1974                        block_cnt = 256;
1975                break;
1976        case WRITE_10:
1977                is_write = true;
1978                /* fall through */
1979        case READ_10:
1980                first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1981                block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1982                break;
1983        case WRITE_12:
1984                is_write = true;
1985                /* fall through */
1986        case READ_12:
1987                first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1988                block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1989                break;
1990        case WRITE_16:
1991                is_write = true;
1992                /* fall through */
1993        case READ_16:
1994                first_block = get_unaligned_be64(&scmd->cmnd[2]);
1995                block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1996                break;
1997        default:
1998                /* Process via normal I/O path. */
1999                return PQI_RAID_BYPASS_INELIGIBLE;
2000        }
2001
2002        /* Check for write to non-RAID-0. */
2003        if (is_write && device->raid_level != SA_RAID_0)
2004                return PQI_RAID_BYPASS_INELIGIBLE;
2005
2006        if (unlikely(block_cnt == 0))
2007                return PQI_RAID_BYPASS_INELIGIBLE;
2008
2009        last_block = first_block + block_cnt - 1;
2010        raid_map = device->raid_map;
2011
2012        /* Check for invalid block or wraparound. */
2013        if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2014                last_block < first_block)
2015                return PQI_RAID_BYPASS_INELIGIBLE;
2016
2017        data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2018        strip_size = get_unaligned_le16(&raid_map->strip_size);
2019        layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2020
2021        /* Calculate stripe information for the request. */
2022        blocks_per_row = data_disks_per_row * strip_size;
2023#if BITS_PER_LONG == 32
2024        tmpdiv = first_block;
2025        do_div(tmpdiv, blocks_per_row);
2026        first_row = tmpdiv;
2027        tmpdiv = last_block;
2028        do_div(tmpdiv, blocks_per_row);
2029        last_row = tmpdiv;
2030        first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2031        last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2032        tmpdiv = first_row_offset;
2033        do_div(tmpdiv, strip_size);
2034        first_column = tmpdiv;
2035        tmpdiv = last_row_offset;
2036        do_div(tmpdiv, strip_size);
2037        last_column = tmpdiv;
2038#else
2039        first_row = first_block / blocks_per_row;
2040        last_row = last_block / blocks_per_row;
2041        first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2042        last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2043        first_column = first_row_offset / strip_size;
2044        last_column = last_row_offset / strip_size;
2045#endif
2046
2047        /* If this isn't a single row/column then give to the controller. */
2048        if (first_row != last_row || first_column != last_column)
2049                return PQI_RAID_BYPASS_INELIGIBLE;
2050
2051        /* Proceeding with driver mapping. */
2052        total_disks_per_row = data_disks_per_row +
2053                get_unaligned_le16(&raid_map->metadata_disks_per_row);
2054        map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2055                get_unaligned_le16(&raid_map->row_cnt);
2056        map_index = (map_row * total_disks_per_row) + first_column;
2057
2058        /* RAID 1 */
2059        if (device->raid_level == SA_RAID_1) {
2060                if (device->offload_to_mirror)
2061                        map_index += data_disks_per_row;
2062                device->offload_to_mirror = !device->offload_to_mirror;
2063        } else if (device->raid_level == SA_RAID_ADM) {
2064                /* RAID ADM */
2065                /*
2066                 * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
2067                 * divisible by 3.
2068                 */
2069                offload_to_mirror = device->offload_to_mirror;
2070                if (offload_to_mirror == 0)  {
2071                        /* use physical disk in the first mirrored group. */
2072                        map_index %= data_disks_per_row;
2073                } else {
2074                        do {
2075                                /*
2076                                 * Determine mirror group that map_index
2077                                 * indicates.
2078                                 */
2079                                current_group = map_index / data_disks_per_row;
2080
2081                                if (offload_to_mirror != current_group) {
2082                                        if (current_group <
2083                                                layout_map_count - 1) {
2084                                                /*
2085                                                 * Select raid index from
2086                                                 * next group.
2087                                                 */
2088                                                map_index += data_disks_per_row;
2089                                                current_group++;
2090                                        } else {
2091                                                /*
2092                                                 * Select raid index from first
2093                                                 * group.
2094                                                 */
2095                                                map_index %= data_disks_per_row;
2096                                                current_group = 0;
2097                                        }
2098                                }
2099                        } while (offload_to_mirror != current_group);
2100                }
2101
2102                /* Set mirror group to use next time. */
2103                offload_to_mirror =
2104                        (offload_to_mirror >= layout_map_count - 1) ?
2105                                0 : offload_to_mirror + 1;
2106                WARN_ON(offload_to_mirror >= layout_map_count);
2107                device->offload_to_mirror = offload_to_mirror;
2108                /*
2109                 * Avoid direct use of device->offload_to_mirror within this
2110                 * function since multiple threads might simultaneously
2111                 * increment it beyond the range of device->layout_map_count -1.
2112                 */
2113        } else if ((device->raid_level == SA_RAID_5 ||
2114                device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2115                /* RAID 50/60 */
2116                /* Verify first and last block are in same RAID group */
2117                r5or6_blocks_per_row = strip_size * data_disks_per_row;
2118                stripesize = r5or6_blocks_per_row * layout_map_count;
2119#if BITS_PER_LONG == 32
2120                tmpdiv = first_block;
2121                first_group = do_div(tmpdiv, stripesize);
2122                tmpdiv = first_group;
2123                do_div(tmpdiv, r5or6_blocks_per_row);
2124                first_group = tmpdiv;
2125                tmpdiv = last_block;
2126                last_group = do_div(tmpdiv, stripesize);
2127                tmpdiv = last_group;
2128                do_div(tmpdiv, r5or6_blocks_per_row);
2129                last_group = tmpdiv;
2130#else
2131                first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2132                last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2133#endif
2134                if (first_group != last_group)
2135                        return PQI_RAID_BYPASS_INELIGIBLE;
2136
2137                /* Verify request is in a single row of RAID 5/6 */
2138#if BITS_PER_LONG == 32
2139                tmpdiv = first_block;
2140                do_div(tmpdiv, stripesize);
2141                first_row = r5or6_first_row = r0_first_row = tmpdiv;
2142                tmpdiv = last_block;
2143                do_div(tmpdiv, stripesize);
2144                r5or6_last_row = r0_last_row = tmpdiv;
2145#else
2146                first_row = r5or6_first_row = r0_first_row =
2147                        first_block / stripesize;
2148                r5or6_last_row = r0_last_row = last_block / stripesize;
2149#endif
2150                if (r5or6_first_row != r5or6_last_row)
2151                        return PQI_RAID_BYPASS_INELIGIBLE;
2152
2153                /* Verify request is in a single column */
2154#if BITS_PER_LONG == 32
2155                tmpdiv = first_block;
2156                first_row_offset = do_div(tmpdiv, stripesize);
2157                tmpdiv = first_row_offset;
2158                first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2159                r5or6_first_row_offset = first_row_offset;
2160                tmpdiv = last_block;
2161                r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2162                tmpdiv = r5or6_last_row_offset;
2163                r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2164                tmpdiv = r5or6_first_row_offset;
2165                do_div(tmpdiv, strip_size);
2166                first_column = r5or6_first_column = tmpdiv;
2167                tmpdiv = r5or6_last_row_offset;
2168                do_div(tmpdiv, strip_size);
2169                r5or6_last_column = tmpdiv;
2170#else
2171                first_row_offset = r5or6_first_row_offset =
2172                        (u32)((first_block % stripesize) %
2173                        r5or6_blocks_per_row);
2174
2175                r5or6_last_row_offset =
2176                        (u32)((last_block % stripesize) %
2177                        r5or6_blocks_per_row);
2178
2179                first_column = r5or6_first_row_offset / strip_size;
2180                r5or6_first_column = first_column;
2181                r5or6_last_column = r5or6_last_row_offset / strip_size;
2182#endif
2183                if (r5or6_first_column != r5or6_last_column)
2184                        return PQI_RAID_BYPASS_INELIGIBLE;
2185
2186                /* Request is eligible */
2187                map_row =
2188                        ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2189                        get_unaligned_le16(&raid_map->row_cnt);
2190
2191                map_index = (first_group *
2192                        (get_unaligned_le16(&raid_map->row_cnt) *
2193                        total_disks_per_row)) +
2194                        (map_row * total_disks_per_row) + first_column;
2195        }
2196
2197        if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2198                return PQI_RAID_BYPASS_INELIGIBLE;
2199
2200        aio_handle = raid_map->disk_data[map_index].aio_handle;
2201        disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2202                first_row * strip_size +
2203                (first_row_offset - first_column * strip_size);
2204        disk_block_cnt = block_cnt;
2205
2206        /* Handle differing logical/physical block sizes. */
2207        if (raid_map->phys_blk_shift) {
2208                disk_block <<= raid_map->phys_blk_shift;
2209                disk_block_cnt <<= raid_map->phys_blk_shift;
2210        }
2211
2212        if (unlikely(disk_block_cnt > 0xffff))
2213                return PQI_RAID_BYPASS_INELIGIBLE;
2214
2215        /* Build the new CDB for the physical disk I/O. */
2216        if (disk_block > 0xffffffff) {
2217                cdb[0] = is_write ? WRITE_16 : READ_16;
2218                cdb[1] = 0;
2219                put_unaligned_be64(disk_block, &cdb[2]);
2220                put_unaligned_be32(disk_block_cnt, &cdb[10]);
2221                cdb[14] = 0;
2222                cdb[15] = 0;
2223                cdb_length = 16;
2224        } else {
2225                cdb[0] = is_write ? WRITE_10 : READ_10;
2226                cdb[1] = 0;
2227                put_unaligned_be32((u32)disk_block, &cdb[2]);
2228                cdb[6] = 0;
2229                put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2230                cdb[9] = 0;
2231                cdb_length = 10;
2232        }
2233
2234        if (get_unaligned_le16(&raid_map->flags) &
2235                RAID_MAP_ENCRYPTION_ENABLED) {
2236                pqi_set_encryption_info(&encryption_info, raid_map,
2237                        first_block);
2238                encryption_info_ptr = &encryption_info;
2239        } else {
2240                encryption_info_ptr = NULL;
2241        }
2242
2243        return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2244                cdb, cdb_length, queue_group, encryption_info_ptr);
2245}
2246
2247#define PQI_STATUS_IDLE         0x0
2248
2249#define PQI_CREATE_ADMIN_QUEUE_PAIR     1
2250#define PQI_DELETE_ADMIN_QUEUE_PAIR     2
2251
2252#define PQI_DEVICE_STATE_POWER_ON_AND_RESET             0x0
2253#define PQI_DEVICE_STATE_STATUS_AVAILABLE               0x1
2254#define PQI_DEVICE_STATE_ALL_REGISTERS_READY            0x2
2255#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY         0x3
2256#define PQI_DEVICE_STATE_ERROR                          0x4
2257
2258#define PQI_MODE_READY_TIMEOUT_SECS             30
2259#define PQI_MODE_READY_POLL_INTERVAL_MSECS      1
2260
2261static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2262{
2263        struct pqi_device_registers __iomem *pqi_registers;
2264        unsigned long timeout;
2265        u64 signature;
2266        u8 status;
2267
2268        pqi_registers = ctrl_info->pqi_registers;
2269        timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2270
2271        while (1) {
2272                signature = readq(&pqi_registers->signature);
2273                if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2274                        sizeof(signature)) == 0)
2275                        break;
2276                if (time_after(jiffies, timeout)) {
2277                        dev_err(&ctrl_info->pci_dev->dev,
2278                                "timed out waiting for PQI signature\n");
2279                        return -ETIMEDOUT;
2280                }
2281                msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2282        }
2283
2284        while (1) {
2285                status = readb(&pqi_registers->function_and_status_code);
2286                if (status == PQI_STATUS_IDLE)
2287                        break;
2288                if (time_after(jiffies, timeout)) {
2289                        dev_err(&ctrl_info->pci_dev->dev,
2290                                "timed out waiting for PQI IDLE\n");
2291                        return -ETIMEDOUT;
2292                }
2293                msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2294        }
2295
2296        while (1) {
2297                if (readl(&pqi_registers->device_status) ==
2298                        PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2299                        break;
2300                if (time_after(jiffies, timeout)) {
2301                        dev_err(&ctrl_info->pci_dev->dev,
2302                                "timed out waiting for PQI all registers ready\n");
2303                        return -ETIMEDOUT;
2304                }
2305                msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2306        }
2307
2308        return 0;
2309}
2310
2311static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2312{
2313        struct pqi_scsi_dev *device;
2314
2315        device = io_request->scmd->device->hostdata;
2316        device->offload_enabled = false;
2317}
2318
2319static inline void pqi_take_device_offline(struct scsi_device *sdev)
2320{
2321        struct pqi_ctrl_info *ctrl_info;
2322        struct pqi_scsi_dev *device;
2323
2324        if (scsi_device_online(sdev)) {
2325                scsi_device_set_state(sdev, SDEV_OFFLINE);
2326                ctrl_info = shost_to_hba(sdev->host);
2327                schedule_delayed_work(&ctrl_info->rescan_work, 0);
2328                device = sdev->hostdata;
2329                dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2330                        ctrl_info->scsi_host->host_no, device->bus,
2331                        device->target, device->lun);
2332        }
2333}
2334
2335static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2336{
2337        u8 scsi_status;
2338        u8 host_byte;
2339        struct scsi_cmnd *scmd;
2340        struct pqi_raid_error_info *error_info;
2341        size_t sense_data_length;
2342        int residual_count;
2343        int xfer_count;
2344        struct scsi_sense_hdr sshdr;
2345
2346        scmd = io_request->scmd;
2347        if (!scmd)
2348                return;
2349
2350        error_info = io_request->error_info;
2351        scsi_status = error_info->status;
2352        host_byte = DID_OK;
2353
2354        if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2355                xfer_count =
2356                        get_unaligned_le32(&error_info->data_out_transferred);
2357                residual_count = scsi_bufflen(scmd) - xfer_count;
2358                scsi_set_resid(scmd, residual_count);
2359                if (xfer_count < scmd->underflow)
2360                        host_byte = DID_SOFT_ERROR;
2361        }
2362
2363        sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2364        if (sense_data_length == 0)
2365                sense_data_length =
2366                        get_unaligned_le16(&error_info->response_data_length);
2367        if (sense_data_length) {
2368                if (sense_data_length > sizeof(error_info->data))
2369                        sense_data_length = sizeof(error_info->data);
2370
2371                if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2372                        scsi_normalize_sense(error_info->data,
2373                                sense_data_length, &sshdr) &&
2374                                sshdr.sense_key == HARDWARE_ERROR &&
2375                                sshdr.asc == 0x3e &&
2376                                sshdr.ascq == 0x1) {
2377                        pqi_take_device_offline(scmd->device);
2378                        host_byte = DID_NO_CONNECT;
2379                }
2380
2381                if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2382                        sense_data_length = SCSI_SENSE_BUFFERSIZE;
2383                memcpy(scmd->sense_buffer, error_info->data,
2384                        sense_data_length);
2385        }
2386
2387        scmd->result = scsi_status;
2388        set_host_byte(scmd, host_byte);
2389}
2390
2391static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2392{
2393        u8 scsi_status;
2394        u8 host_byte;
2395        struct scsi_cmnd *scmd;
2396        struct pqi_aio_error_info *error_info;
2397        size_t sense_data_length;
2398        int residual_count;
2399        int xfer_count;
2400        bool device_offline;
2401
2402        scmd = io_request->scmd;
2403        error_info = io_request->error_info;
2404        host_byte = DID_OK;
2405        sense_data_length = 0;
2406        device_offline = false;
2407
2408        switch (error_info->service_response) {
2409        case PQI_AIO_SERV_RESPONSE_COMPLETE:
2410                scsi_status = error_info->status;
2411                break;
2412        case PQI_AIO_SERV_RESPONSE_FAILURE:
2413                switch (error_info->status) {
2414                case PQI_AIO_STATUS_IO_ABORTED:
2415                        scsi_status = SAM_STAT_TASK_ABORTED;
2416                        break;
2417                case PQI_AIO_STATUS_UNDERRUN:
2418                        scsi_status = SAM_STAT_GOOD;
2419                        residual_count = get_unaligned_le32(
2420                                                &error_info->residual_count);
2421                        scsi_set_resid(scmd, residual_count);
2422                        xfer_count = scsi_bufflen(scmd) - residual_count;
2423                        if (xfer_count < scmd->underflow)
2424                                host_byte = DID_SOFT_ERROR;
2425                        break;
2426                case PQI_AIO_STATUS_OVERRUN:
2427                        scsi_status = SAM_STAT_GOOD;
2428                        break;
2429                case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2430                        pqi_aio_path_disabled(io_request);
2431                        scsi_status = SAM_STAT_GOOD;
2432                        io_request->status = -EAGAIN;
2433                        break;
2434                case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2435                case PQI_AIO_STATUS_INVALID_DEVICE:
2436                        device_offline = true;
2437                        pqi_take_device_offline(scmd->device);
2438                        host_byte = DID_NO_CONNECT;
2439                        scsi_status = SAM_STAT_CHECK_CONDITION;
2440                        break;
2441                case PQI_AIO_STATUS_IO_ERROR:
2442                default:
2443                        scsi_status = SAM_STAT_CHECK_CONDITION;
2444                        break;
2445                }
2446                break;
2447        case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2448        case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2449                scsi_status = SAM_STAT_GOOD;
2450                break;
2451        case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2452        case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2453        default:
2454                scsi_status = SAM_STAT_CHECK_CONDITION;
2455                break;
2456        }
2457
2458        if (error_info->data_present) {
2459                sense_data_length =
2460                        get_unaligned_le16(&error_info->data_length);
2461                if (sense_data_length) {
2462                        if (sense_data_length > sizeof(error_info->data))
2463                                sense_data_length = sizeof(error_info->data);
2464                        if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2465                                sense_data_length = SCSI_SENSE_BUFFERSIZE;
2466                        memcpy(scmd->sense_buffer, error_info->data,
2467                                sense_data_length);
2468                }
2469        }
2470
2471        if (device_offline && sense_data_length == 0)
2472                scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2473                        0x3e, 0x1);
2474
2475        scmd->result = scsi_status;
2476        set_host_byte(scmd, host_byte);
2477}
2478
2479static void pqi_process_io_error(unsigned int iu_type,
2480        struct pqi_io_request *io_request)
2481{
2482        switch (iu_type) {
2483        case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2484                pqi_process_raid_io_error(io_request);
2485                break;
2486        case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2487                pqi_process_aio_io_error(io_request);
2488                break;
2489        }
2490}
2491
2492static int pqi_interpret_task_management_response(
2493        struct pqi_task_management_response *response)
2494{
2495        int rc;
2496
2497        switch (response->response_code) {
2498        case SOP_TMF_COMPLETE:
2499        case SOP_TMF_FUNCTION_SUCCEEDED:
2500                rc = 0;
2501                break;
2502        default:
2503                rc = -EIO;
2504                break;
2505        }
2506
2507        return rc;
2508}
2509
2510static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2511        struct pqi_queue_group *queue_group)
2512{
2513        unsigned int num_responses;
2514        pqi_index_t oq_pi;
2515        pqi_index_t oq_ci;
2516        struct pqi_io_request *io_request;
2517        struct pqi_io_response *response;
2518        u16 request_id;
2519
2520        num_responses = 0;
2521        oq_ci = queue_group->oq_ci_copy;
2522
2523        while (1) {
2524                oq_pi = *queue_group->oq_pi;
2525                if (oq_pi == oq_ci)
2526                        break;
2527
2528                num_responses++;
2529                response = queue_group->oq_element_array +
2530                        (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2531
2532                request_id = get_unaligned_le16(&response->request_id);
2533                WARN_ON(request_id >= ctrl_info->max_io_slots);
2534
2535                io_request = &ctrl_info->io_request_pool[request_id];
2536                WARN_ON(atomic_read(&io_request->refcount) == 0);
2537
2538                switch (response->header.iu_type) {
2539                case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2540                case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2541                case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2542                        break;
2543                case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2544                        io_request->status =
2545                                pqi_interpret_task_management_response(
2546                                        (void *)response);
2547                        break;
2548                case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2549                        pqi_aio_path_disabled(io_request);
2550                        io_request->status = -EAGAIN;
2551                        break;
2552                case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2553                case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2554                        io_request->error_info = ctrl_info->error_buffer +
2555                                (get_unaligned_le16(&response->error_index) *
2556                                PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2557                        pqi_process_io_error(response->header.iu_type,
2558                                io_request);
2559                        break;
2560                default:
2561                        dev_err(&ctrl_info->pci_dev->dev,
2562                                "unexpected IU type: 0x%x\n",
2563                                response->header.iu_type);
2564                        WARN_ON(response->header.iu_type);
2565                        break;
2566                }
2567
2568                io_request->io_complete_callback(io_request,
2569                        io_request->context);
2570
2571                /*
2572                 * Note that the I/O request structure CANNOT BE TOUCHED after
2573                 * returning from the I/O completion callback!
2574                 */
2575
2576                oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2577        }
2578
2579        if (num_responses) {
2580                queue_group->oq_ci_copy = oq_ci;
2581                writel(oq_ci, queue_group->oq_ci);
2582        }
2583
2584        return num_responses;
2585}
2586
2587static inline unsigned int pqi_num_elements_free(unsigned int pi,
2588        unsigned int ci, unsigned int elements_in_queue)
2589{
2590        unsigned int num_elements_used;
2591
2592        if (pi >= ci)
2593                num_elements_used = pi - ci;
2594        else
2595                num_elements_used = elements_in_queue - ci + pi;
2596
2597        return elements_in_queue - num_elements_used - 1;
2598}
2599
2600#define PQI_EVENT_ACK_TIMEOUT   30
2601
2602static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2603        struct pqi_event_acknowledge_request *iu, size_t iu_length)
2604{
2605        pqi_index_t iq_pi;
2606        pqi_index_t iq_ci;
2607        unsigned long flags;
2608        void *next_element;
2609        unsigned long timeout;
2610        struct pqi_queue_group *queue_group;
2611
2612        queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2613        put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2614
2615        timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2616
2617        while (1) {
2618                spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2619
2620                iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2621                iq_ci = *queue_group->iq_ci[RAID_PATH];
2622
2623                if (pqi_num_elements_free(iq_pi, iq_ci,
2624                        ctrl_info->num_elements_per_iq))
2625                        break;
2626
2627                spin_unlock_irqrestore(
2628                        &queue_group->submit_lock[RAID_PATH], flags);
2629
2630                if (time_after(jiffies, timeout)) {
2631                        dev_err(&ctrl_info->pci_dev->dev,
2632                                "sending event acknowledge timed out\n");
2633                        return;
2634                }
2635        }
2636
2637        next_element = queue_group->iq_element_array[RAID_PATH] +
2638                (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2639
2640        memcpy(next_element, iu, iu_length);
2641
2642        iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2643
2644        queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2645
2646        /*
2647         * This write notifies the controller that an IU is available to be
2648         * processed.
2649         */
2650        writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2651
2652        spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2653}
2654
2655static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2656        struct pqi_event *event)
2657{
2658        struct pqi_event_acknowledge_request request;
2659
2660        memset(&request, 0, sizeof(request));
2661
2662        request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2663        put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2664                &request.header.iu_length);
2665        request.event_type = event->event_type;
2666        request.event_id = event->event_id;
2667        request.additional_event_id = event->additional_event_id;
2668
2669        pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2670}
2671
2672static void pqi_event_worker(struct work_struct *work)
2673{
2674        unsigned int i;
2675        struct pqi_ctrl_info *ctrl_info;
2676        struct pqi_event *pending_event;
2677        bool got_non_heartbeat_event = false;
2678
2679        ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2680
2681        pending_event = ctrl_info->pending_events;
2682        for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2683                if (pending_event->pending) {
2684                        pending_event->pending = false;
2685                        pqi_acknowledge_event(ctrl_info, pending_event);
2686                        if (i != PQI_EVENT_HEARTBEAT)
2687                                got_non_heartbeat_event = true;
2688                }
2689                pending_event++;
2690        }
2691
2692        if (got_non_heartbeat_event)
2693                pqi_schedule_rescan_worker(ctrl_info);
2694}
2695
2696static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2697{
2698        unsigned int i;
2699        unsigned int path;
2700        struct pqi_queue_group *queue_group;
2701        unsigned long flags;
2702        struct pqi_io_request *io_request;
2703        struct pqi_io_request *next;
2704        struct scsi_cmnd *scmd;
2705
2706        ctrl_info->controller_online = false;
2707        dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2708
2709        for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2710                queue_group = &ctrl_info->queue_groups[i];
2711
2712                for (path = 0; path < 2; path++) {
2713                        spin_lock_irqsave(
2714                                &queue_group->submit_lock[path], flags);
2715
2716                        list_for_each_entry_safe(io_request, next,
2717                                &queue_group->request_list[path],
2718                                request_list_entry) {
2719
2720                                scmd = io_request->scmd;
2721                                if (scmd) {
2722                                        set_host_byte(scmd, DID_NO_CONNECT);
2723                                        pqi_scsi_done(scmd);
2724                                }
2725
2726                                list_del(&io_request->request_list_entry);
2727                        }
2728
2729                        spin_unlock_irqrestore(
2730                                &queue_group->submit_lock[path], flags);
2731                }
2732        }
2733}
2734
2735#define PQI_HEARTBEAT_TIMER_INTERVAL    (5 * HZ)
2736#define PQI_MAX_HEARTBEAT_REQUESTS      5
2737
2738static void pqi_heartbeat_timer_handler(unsigned long data)
2739{
2740        int num_interrupts;
2741        struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2742
2743        num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2744
2745        if (num_interrupts == ctrl_info->previous_num_interrupts) {
2746                ctrl_info->num_heartbeats_requested++;
2747                if (ctrl_info->num_heartbeats_requested >
2748                        PQI_MAX_HEARTBEAT_REQUESTS) {
2749                        pqi_take_ctrl_offline(ctrl_info);
2750                        return;
2751                }
2752                ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2753                schedule_work(&ctrl_info->event_work);
2754        } else {
2755                ctrl_info->num_heartbeats_requested = 0;
2756        }
2757
2758        ctrl_info->previous_num_interrupts = num_interrupts;
2759        mod_timer(&ctrl_info->heartbeat_timer,
2760                jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2761}
2762
2763static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2764{
2765        ctrl_info->previous_num_interrupts =
2766                atomic_read(&ctrl_info->num_interrupts);
2767
2768        init_timer(&ctrl_info->heartbeat_timer);
2769        ctrl_info->heartbeat_timer.expires =
2770                jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2771        ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2772        ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2773        add_timer(&ctrl_info->heartbeat_timer);
2774        ctrl_info->heartbeat_timer_started = true;
2775}
2776
2777static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2778{
2779        if (ctrl_info->heartbeat_timer_started)
2780                del_timer_sync(&ctrl_info->heartbeat_timer);
2781}
2782
2783static int pqi_event_type_to_event_index(unsigned int event_type)
2784{
2785        int index;
2786
2787        switch (event_type) {
2788        case PQI_EVENT_TYPE_HEARTBEAT:
2789                index = PQI_EVENT_HEARTBEAT;
2790                break;
2791        case PQI_EVENT_TYPE_HOTPLUG:
2792                index = PQI_EVENT_HOTPLUG;
2793                break;
2794        case PQI_EVENT_TYPE_HARDWARE:
2795                index = PQI_EVENT_HARDWARE;
2796                break;
2797        case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2798                index = PQI_EVENT_PHYSICAL_DEVICE;
2799                break;
2800        case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2801                index = PQI_EVENT_LOGICAL_DEVICE;
2802                break;
2803        case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2804                index = PQI_EVENT_AIO_STATE_CHANGE;
2805                break;
2806        case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2807                index = PQI_EVENT_AIO_CONFIG_CHANGE;
2808                break;
2809        default:
2810                index = -1;
2811                break;
2812        }
2813
2814        return index;
2815}
2816
2817static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2818{
2819        unsigned int num_events;
2820        pqi_index_t oq_pi;
2821        pqi_index_t oq_ci;
2822        struct pqi_event_queue *event_queue;
2823        struct pqi_event_response *response;
2824        struct pqi_event *pending_event;
2825        bool need_delayed_work;
2826        int event_index;
2827
2828        event_queue = &ctrl_info->event_queue;
2829        num_events = 0;
2830        need_delayed_work = false;
2831        oq_ci = event_queue->oq_ci_copy;
2832
2833        while (1) {
2834                oq_pi = *event_queue->oq_pi;
2835                if (oq_pi == oq_ci)
2836                        break;
2837
2838                num_events++;
2839                response = event_queue->oq_element_array +
2840                        (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2841
2842                event_index =
2843                        pqi_event_type_to_event_index(response->event_type);
2844
2845                if (event_index >= 0) {
2846                        if (response->request_acknowlege) {
2847                                pending_event =
2848                                        &ctrl_info->pending_events[event_index];
2849                                pending_event->event_type =
2850                                        response->event_type;
2851                                pending_event->event_id = response->event_id;
2852                                pending_event->additional_event_id =
2853                                        response->additional_event_id;
2854                                if (event_index != PQI_EVENT_HEARTBEAT) {
2855                                        pending_event->pending = true;
2856                                        need_delayed_work = true;
2857                                }
2858                        }
2859                }
2860
2861                oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2862        }
2863
2864        if (num_events) {
2865                event_queue->oq_ci_copy = oq_ci;
2866                writel(oq_ci, event_queue->oq_ci);
2867
2868                if (need_delayed_work)
2869                        schedule_work(&ctrl_info->event_work);
2870        }
2871
2872        return num_events;
2873}
2874
2875static irqreturn_t pqi_irq_handler(int irq, void *data)
2876{
2877        struct pqi_ctrl_info *ctrl_info;
2878        struct pqi_queue_group *queue_group;
2879        unsigned int num_responses_handled;
2880
2881        queue_group = data;
2882        ctrl_info = queue_group->ctrl_info;
2883
2884        if (!ctrl_info || !queue_group->oq_ci)
2885                return IRQ_NONE;
2886
2887        num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2888
2889        if (irq == ctrl_info->event_irq)
2890                num_responses_handled += pqi_process_event_intr(ctrl_info);
2891
2892        if (num_responses_handled)
2893                atomic_inc(&ctrl_info->num_interrupts);
2894
2895        pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2896        pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2897
2898        return IRQ_HANDLED;
2899}
2900
2901static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2902{
2903        int i;
2904        int rc;
2905
2906        ctrl_info->event_irq = ctrl_info->msix_vectors[0];
2907
2908        for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2909                rc = request_irq(ctrl_info->msix_vectors[i],
2910                        pqi_irq_handler, 0,
2911                        DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2912                if (rc) {
2913                        dev_err(&ctrl_info->pci_dev->dev,
2914                                "irq %u init failed with error %d\n",
2915                                ctrl_info->msix_vectors[i], rc);
2916                        return rc;
2917                }
2918                ctrl_info->num_msix_vectors_initialized++;
2919        }
2920
2921        return 0;
2922}
2923
2924static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2925{
2926        int i;
2927
2928        for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2929                free_irq(ctrl_info->msix_vectors[i],
2930                        ctrl_info->intr_data[i]);
2931}
2932
2933static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2934{
2935        unsigned int i;
2936        int max_vectors;
2937        int num_vectors_enabled;
2938        struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2939
2940        max_vectors = ctrl_info->num_queue_groups;
2941
2942        for (i = 0; i < max_vectors; i++)
2943                msix_entries[i].entry = i;
2944
2945        num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2946                msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2947
2948        if (num_vectors_enabled < 0) {
2949                dev_err(&ctrl_info->pci_dev->dev,
2950                        "MSI-X init failed with error %d\n",
2951                        num_vectors_enabled);
2952                return num_vectors_enabled;
2953        }
2954
2955        ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2956        for (i = 0; i < num_vectors_enabled; i++) {
2957                ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2958                ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2959        }
2960
2961        return 0;
2962}
2963
2964static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2965{
2966        int i;
2967        int rc;
2968        int cpu;
2969
2970        cpu = cpumask_first(cpu_online_mask);
2971        for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2972                rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2973                        get_cpu_mask(cpu));
2974                if (rc)
2975                        dev_err(&ctrl_info->pci_dev->dev,
2976                                "error %d setting affinity hint for irq vector %u\n",
2977                                rc, ctrl_info->msix_vectors[i]);
2978                cpu = cpumask_next(cpu, cpu_online_mask);
2979        }
2980}
2981
2982static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2983{
2984        int i;
2985
2986        for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2987                irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
2988}
2989
2990static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2991{
2992        unsigned int i;
2993        size_t alloc_length;
2994        size_t element_array_length_per_iq;
2995        size_t element_array_length_per_oq;
2996        void *element_array;
2997        void *next_queue_index;
2998        void *aligned_pointer;
2999        unsigned int num_inbound_queues;
3000        unsigned int num_outbound_queues;
3001        unsigned int num_queue_indexes;
3002        struct pqi_queue_group *queue_group;
3003
3004        element_array_length_per_iq =
3005                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3006                ctrl_info->num_elements_per_iq;
3007        element_array_length_per_oq =
3008                PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3009                ctrl_info->num_elements_per_oq;
3010        num_inbound_queues = ctrl_info->num_queue_groups * 2;
3011        num_outbound_queues = ctrl_info->num_queue_groups;
3012        num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3013
3014        aligned_pointer = NULL;
3015
3016        for (i = 0; i < num_inbound_queues; i++) {
3017                aligned_pointer = PTR_ALIGN(aligned_pointer,
3018                        PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3019                aligned_pointer += element_array_length_per_iq;
3020        }
3021
3022        for (i = 0; i < num_outbound_queues; i++) {
3023                aligned_pointer = PTR_ALIGN(aligned_pointer,
3024                        PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3025                aligned_pointer += element_array_length_per_oq;
3026        }
3027
3028        aligned_pointer = PTR_ALIGN(aligned_pointer,
3029                PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3030        aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3031                PQI_EVENT_OQ_ELEMENT_LENGTH;
3032
3033        for (i = 0; i < num_queue_indexes; i++) {
3034                aligned_pointer = PTR_ALIGN(aligned_pointer,
3035                        PQI_OPERATIONAL_INDEX_ALIGNMENT);
3036                aligned_pointer += sizeof(pqi_index_t);
3037        }
3038
3039        alloc_length = (size_t)aligned_pointer +
3040                PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3041
3042        ctrl_info->queue_memory_base =
3043                dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3044                        alloc_length,
3045                        &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3046
3047        if (!ctrl_info->queue_memory_base) {
3048                dev_err(&ctrl_info->pci_dev->dev,
3049                        "failed to allocate memory for PQI admin queues\n");
3050                return -ENOMEM;
3051        }
3052
3053        ctrl_info->queue_memory_length = alloc_length;
3054
3055        element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3056                PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3057
3058        for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3059                queue_group = &ctrl_info->queue_groups[i];
3060                queue_group->iq_element_array[RAID_PATH] = element_array;
3061                queue_group->iq_element_array_bus_addr[RAID_PATH] =
3062                        ctrl_info->queue_memory_base_dma_handle +
3063                                (element_array - ctrl_info->queue_memory_base);
3064                element_array += element_array_length_per_iq;
3065                element_array = PTR_ALIGN(element_array,
3066                        PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3067                queue_group->iq_element_array[AIO_PATH] = element_array;
3068                queue_group->iq_element_array_bus_addr[AIO_PATH] =
3069                        ctrl_info->queue_memory_base_dma_handle +
3070                        (element_array - ctrl_info->queue_memory_base);
3071                element_array += element_array_length_per_iq;
3072                element_array = PTR_ALIGN(element_array,
3073                        PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3074        }
3075
3076        for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3077                queue_group = &ctrl_info->queue_groups[i];
3078                queue_group->oq_element_array = element_array;
3079                queue_group->oq_element_array_bus_addr =
3080                        ctrl_info->queue_memory_base_dma_handle +
3081                        (element_array - ctrl_info->queue_memory_base);
3082                element_array += element_array_length_per_oq;
3083                element_array = PTR_ALIGN(element_array,
3084                        PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3085        }
3086
3087        ctrl_info->event_queue.oq_element_array = element_array;
3088        ctrl_info->event_queue.oq_element_array_bus_addr =
3089                ctrl_info->queue_memory_base_dma_handle +
3090                (element_array - ctrl_info->queue_memory_base);
3091        element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3092                PQI_EVENT_OQ_ELEMENT_LENGTH;
3093
3094        next_queue_index = PTR_ALIGN(element_array,
3095                PQI_OPERATIONAL_INDEX_ALIGNMENT);
3096
3097        for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3098                queue_group = &ctrl_info->queue_groups[i];
3099                queue_group->iq_ci[RAID_PATH] = next_queue_index;
3100                queue_group->iq_ci_bus_addr[RAID_PATH] =
3101                        ctrl_info->queue_memory_base_dma_handle +
3102                        (next_queue_index - ctrl_info->queue_memory_base);
3103                next_queue_index += sizeof(pqi_index_t);
3104                next_queue_index = PTR_ALIGN(next_queue_index,
3105                        PQI_OPERATIONAL_INDEX_ALIGNMENT);
3106                queue_group->iq_ci[AIO_PATH] = next_queue_index;
3107                queue_group->iq_ci_bus_addr[AIO_PATH] =
3108                        ctrl_info->queue_memory_base_dma_handle +
3109                        (next_queue_index - ctrl_info->queue_memory_base);
3110                next_queue_index += sizeof(pqi_index_t);
3111                next_queue_index = PTR_ALIGN(next_queue_index,
3112                        PQI_OPERATIONAL_INDEX_ALIGNMENT);
3113                queue_group->oq_pi = next_queue_index;
3114                queue_group->oq_pi_bus_addr =
3115                        ctrl_info->queue_memory_base_dma_handle +
3116                        (next_queue_index - ctrl_info->queue_memory_base);
3117                next_queue_index += sizeof(pqi_index_t);
3118                next_queue_index = PTR_ALIGN(next_queue_index,
3119                        PQI_OPERATIONAL_INDEX_ALIGNMENT);
3120        }
3121
3122        ctrl_info->event_queue.oq_pi = next_queue_index;
3123        ctrl_info->event_queue.oq_pi_bus_addr =
3124                ctrl_info->queue_memory_base_dma_handle +
3125                (next_queue_index - ctrl_info->queue_memory_base);
3126
3127        return 0;
3128}
3129
3130static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3131{
3132        unsigned int i;
3133        u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3134        u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3135
3136        /*
3137         * Initialize the backpointers to the controller structure in
3138         * each operational queue group structure.
3139         */
3140        for (i = 0; i < ctrl_info->num_queue_groups; i++)
3141                ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3142
3143        /*
3144         * Assign IDs to all operational queues.  Note that the IDs
3145         * assigned to operational IQs are independent of the IDs
3146         * assigned to operational OQs.
3147         */
3148        ctrl_info->event_queue.oq_id = next_oq_id++;
3149        for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3150                ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3151                ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3152                ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3153        }
3154
3155        /*
3156         * Assign MSI-X table entry indexes to all queues.  Note that the
3157         * interrupt for the event queue is shared with the first queue group.
3158         */
3159        ctrl_info->event_queue.int_msg_num = 0;
3160        for (i = 0; i < ctrl_info->num_queue_groups; i++)
3161                ctrl_info->queue_groups[i].int_msg_num = i;
3162
3163        for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3164                spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3165                spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3166                INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3167                INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3168        }
3169}
3170
3171static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3172{
3173        size_t alloc_length;
3174        struct pqi_admin_queues_aligned *admin_queues_aligned;
3175        struct pqi_admin_queues *admin_queues;
3176
3177        alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3178                PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3179
3180        ctrl_info->admin_queue_memory_base =
3181                dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3182                        alloc_length,
3183                        &ctrl_info->admin_queue_memory_base_dma_handle,
3184                        GFP_KERNEL);
3185
3186        if (!ctrl_info->admin_queue_memory_base)
3187                return -ENOMEM;
3188
3189        ctrl_info->admin_queue_memory_length = alloc_length;
3190
3191        admin_queues = &ctrl_info->admin_queues;
3192        admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3193                PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3194        admin_queues->iq_element_array =
3195                &admin_queues_aligned->iq_element_array;
3196        admin_queues->oq_element_array =
3197                &admin_queues_aligned->oq_element_array;
3198        admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3199        admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3200
3201        admin_queues->iq_element_array_bus_addr =
3202                ctrl_info->admin_queue_memory_base_dma_handle +
3203                (admin_queues->iq_element_array -
3204                ctrl_info->admin_queue_memory_base);
3205        admin_queues->oq_element_array_bus_addr =
3206                ctrl_info->admin_queue_memory_base_dma_handle +
3207                (admin_queues->oq_element_array -
3208                ctrl_info->admin_queue_memory_base);
3209        admin_queues->iq_ci_bus_addr =
3210                ctrl_info->admin_queue_memory_base_dma_handle +
3211                ((void *)admin_queues->iq_ci -
3212                ctrl_info->admin_queue_memory_base);
3213        admin_queues->oq_pi_bus_addr =
3214                ctrl_info->admin_queue_memory_base_dma_handle +
3215                ((void *)admin_queues->oq_pi -
3216                ctrl_info->admin_queue_memory_base);
3217
3218        return 0;
3219}
3220
3221#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES          HZ
3222#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS      1
3223
3224static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3225{
3226        struct pqi_device_registers __iomem *pqi_registers;
3227        struct pqi_admin_queues *admin_queues;
3228        unsigned long timeout;
3229        u8 status;
3230        u32 reg;
3231
3232        pqi_registers = ctrl_info->pqi_registers;
3233        admin_queues = &ctrl_info->admin_queues;
3234
3235        writeq((u64)admin_queues->iq_element_array_bus_addr,
3236                &pqi_registers->admin_iq_element_array_addr);
3237        writeq((u64)admin_queues->oq_element_array_bus_addr,
3238                &pqi_registers->admin_oq_element_array_addr);
3239        writeq((u64)admin_queues->iq_ci_bus_addr,
3240                &pqi_registers->admin_iq_ci_addr);
3241        writeq((u64)admin_queues->oq_pi_bus_addr,
3242                &pqi_registers->admin_oq_pi_addr);
3243
3244        reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3245                (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3246                (admin_queues->int_msg_num << 16);
3247        writel(reg, &pqi_registers->admin_iq_num_elements);
3248        writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3249                &pqi_registers->function_and_status_code);
3250
3251        timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3252        while (1) {
3253                status = readb(&pqi_registers->function_and_status_code);
3254                if (status == PQI_STATUS_IDLE)
3255                        break;
3256                if (time_after(jiffies, timeout))
3257                        return -ETIMEDOUT;
3258                msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3259        }
3260
3261        /*
3262         * The offset registers are not initialized to the correct
3263         * offsets until *after* the create admin queue pair command
3264         * completes successfully.
3265         */
3266        admin_queues->iq_pi = ctrl_info->iomem_base +
3267                PQI_DEVICE_REGISTERS_OFFSET +
3268                readq(&pqi_registers->admin_iq_pi_offset);
3269        admin_queues->oq_ci = ctrl_info->iomem_base +
3270                PQI_DEVICE_REGISTERS_OFFSET +
3271                readq(&pqi_registers->admin_oq_ci_offset);
3272
3273        return 0;
3274}
3275
3276static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3277        struct pqi_general_admin_request *request)
3278{
3279        struct pqi_admin_queues *admin_queues;
3280        void *next_element;
3281        pqi_index_t iq_pi;
3282
3283        admin_queues = &ctrl_info->admin_queues;
3284        iq_pi = admin_queues->iq_pi_copy;
3285
3286        next_element = admin_queues->iq_element_array +
3287                (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3288
3289        memcpy(next_element, request, sizeof(*request));
3290
3291        iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3292        admin_queues->iq_pi_copy = iq_pi;
3293
3294        /*
3295         * This write notifies the controller that an IU is available to be
3296         * processed.
3297         */
3298        writel(iq_pi, admin_queues->iq_pi);
3299}
3300
3301static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3302        struct pqi_general_admin_response *response)
3303{
3304        struct pqi_admin_queues *admin_queues;
3305        pqi_index_t oq_pi;
3306        pqi_index_t oq_ci;
3307        unsigned long timeout;
3308
3309        admin_queues = &ctrl_info->admin_queues;
3310        oq_ci = admin_queues->oq_ci_copy;
3311
3312        timeout = (3 * HZ) + jiffies;
3313
3314        while (1) {
3315                oq_pi = *admin_queues->oq_pi;
3316                if (oq_pi != oq_ci)
3317                        break;
3318                if (time_after(jiffies, timeout)) {
3319                        dev_err(&ctrl_info->pci_dev->dev,
3320                                "timed out waiting for admin response\n");
3321                        return -ETIMEDOUT;
3322                }
3323                usleep_range(1000, 2000);
3324        }
3325
3326        memcpy(response, admin_queues->oq_element_array +
3327                (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3328
3329        oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3330        admin_queues->oq_ci_copy = oq_ci;
3331        writel(oq_ci, admin_queues->oq_ci);
3332
3333        return 0;
3334}
3335
3336static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3337        struct pqi_queue_group *queue_group, enum pqi_io_path path,
3338        struct pqi_io_request *io_request)
3339{
3340        struct pqi_io_request *next;
3341        void *next_element;
3342        pqi_index_t iq_pi;
3343        pqi_index_t iq_ci;
3344        size_t iu_length;
3345        unsigned long flags;
3346        unsigned int num_elements_needed;
3347        unsigned int num_elements_to_end_of_queue;
3348        size_t copy_count;
3349        struct pqi_iu_header *request;
3350
3351        spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3352
3353        if (io_request)
3354                list_add_tail(&io_request->request_list_entry,
3355                        &queue_group->request_list[path]);
3356
3357        iq_pi = queue_group->iq_pi_copy[path];
3358
3359        list_for_each_entry_safe(io_request, next,
3360                &queue_group->request_list[path], request_list_entry) {
3361
3362                request = io_request->iu;
3363
3364                iu_length = get_unaligned_le16(&request->iu_length) +
3365                        PQI_REQUEST_HEADER_LENGTH;
3366                num_elements_needed =
3367                        DIV_ROUND_UP(iu_length,
3368                                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3369
3370                iq_ci = *queue_group->iq_ci[path];
3371
3372                if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3373                        ctrl_info->num_elements_per_iq))
3374                        break;
3375
3376                put_unaligned_le16(queue_group->oq_id,
3377                        &request->response_queue_id);
3378
3379                next_element = queue_group->iq_element_array[path] +
3380                        (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3381
3382                num_elements_to_end_of_queue =
3383                        ctrl_info->num_elements_per_iq - iq_pi;
3384
3385                if (num_elements_needed <= num_elements_to_end_of_queue) {
3386                        memcpy(next_element, request, iu_length);
3387                } else {
3388                        copy_count = num_elements_to_end_of_queue *
3389                                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3390                        memcpy(next_element, request, copy_count);
3391                        memcpy(queue_group->iq_element_array[path],
3392                                (u8 *)request + copy_count,
3393                                iu_length - copy_count);
3394                }
3395
3396                iq_pi = (iq_pi + num_elements_needed) %
3397                        ctrl_info->num_elements_per_iq;
3398
3399                list_del(&io_request->request_list_entry);
3400        }
3401
3402        if (iq_pi != queue_group->iq_pi_copy[path]) {
3403                queue_group->iq_pi_copy[path] = iq_pi;
3404                /*
3405                 * This write notifies the controller that one or more IUs are
3406                 * available to be processed.
3407                 */
3408                writel(iq_pi, queue_group->iq_pi[path]);
3409        }
3410
3411        spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3412}
3413
3414static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3415        void *context)
3416{
3417        struct completion *waiting = context;
3418
3419        complete(waiting);
3420}
3421
3422static int pqi_submit_raid_request_synchronous_with_io_request(
3423        struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3424        unsigned long timeout_msecs)
3425{
3426        int rc = 0;
3427        DECLARE_COMPLETION_ONSTACK(wait);
3428
3429        io_request->io_complete_callback = pqi_raid_synchronous_complete;
3430        io_request->context = &wait;
3431
3432        pqi_start_io(ctrl_info,
3433                &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3434                io_request);
3435
3436        if (timeout_msecs == NO_TIMEOUT) {
3437                wait_for_completion_io(&wait);
3438        } else {
3439                if (!wait_for_completion_io_timeout(&wait,
3440                        msecs_to_jiffies(timeout_msecs))) {
3441                        dev_warn(&ctrl_info->pci_dev->dev,
3442                                "command timed out\n");
3443                        rc = -ETIMEDOUT;
3444                }
3445        }
3446
3447        return rc;
3448}
3449
3450static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3451        struct pqi_iu_header *request, unsigned int flags,
3452        struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3453{
3454        int rc;
3455        struct pqi_io_request *io_request;
3456        unsigned long start_jiffies;
3457        unsigned long msecs_blocked;
3458        size_t iu_length;
3459
3460        /*
3461         * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3462         * are mutually exclusive.
3463         */
3464
3465        if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3466                if (down_interruptible(&ctrl_info->sync_request_sem))
3467                        return -ERESTARTSYS;
3468        } else {
3469                if (timeout_msecs == NO_TIMEOUT) {
3470                        down(&ctrl_info->sync_request_sem);
3471                } else {
3472                        start_jiffies = jiffies;
3473                        if (down_timeout(&ctrl_info->sync_request_sem,
3474                                msecs_to_jiffies(timeout_msecs)))
3475                                return -ETIMEDOUT;
3476                        msecs_blocked =
3477                                jiffies_to_msecs(jiffies - start_jiffies);
3478                        if (msecs_blocked >= timeout_msecs)
3479                                return -ETIMEDOUT;
3480                        timeout_msecs -= msecs_blocked;
3481                }
3482        }
3483
3484        io_request = pqi_alloc_io_request(ctrl_info);
3485
3486        put_unaligned_le16(io_request->index,
3487                &(((struct pqi_raid_path_request *)request)->request_id));
3488
3489        if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3490                ((struct pqi_raid_path_request *)request)->error_index =
3491                        ((struct pqi_raid_path_request *)request)->request_id;
3492
3493        iu_length = get_unaligned_le16(&request->iu_length) +
3494                PQI_REQUEST_HEADER_LENGTH;
3495        memcpy(io_request->iu, request, iu_length);
3496
3497        rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3498                io_request, timeout_msecs);
3499
3500        if (error_info) {
3501                if (io_request->error_info)
3502                        memcpy(error_info, io_request->error_info,
3503                                sizeof(*error_info));
3504                else
3505                        memset(error_info, 0, sizeof(*error_info));
3506        } else if (rc == 0 && io_request->error_info) {
3507                u8 scsi_status;
3508                struct pqi_raid_error_info *raid_error_info;
3509
3510                raid_error_info = io_request->error_info;
3511                scsi_status = raid_error_info->status;
3512
3513                if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3514                        raid_error_info->data_out_result ==
3515                        PQI_DATA_IN_OUT_UNDERFLOW)
3516                        scsi_status = SAM_STAT_GOOD;
3517
3518                if (scsi_status != SAM_STAT_GOOD)
3519                        rc = -EIO;
3520        }
3521
3522        pqi_free_io_request(io_request);
3523
3524        up(&ctrl_info->sync_request_sem);
3525
3526        return rc;
3527}
3528
3529static int pqi_validate_admin_response(
3530        struct pqi_general_admin_response *response, u8 expected_function_code)
3531{
3532        if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3533                return -EINVAL;
3534
3535        if (get_unaligned_le16(&response->header.iu_length) !=
3536                PQI_GENERAL_ADMIN_IU_LENGTH)
3537                return -EINVAL;
3538
3539        if (response->function_code != expected_function_code)
3540                return -EINVAL;
3541
3542        if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3543                return -EINVAL;
3544
3545        return 0;
3546}
3547
3548static int pqi_submit_admin_request_synchronous(
3549        struct pqi_ctrl_info *ctrl_info,
3550        struct pqi_general_admin_request *request,
3551        struct pqi_general_admin_response *response)
3552{
3553        int rc;
3554
3555        pqi_submit_admin_request(ctrl_info, request);
3556
3557        rc = pqi_poll_for_admin_response(ctrl_info, response);
3558
3559        if (rc == 0)
3560                rc = pqi_validate_admin_response(response,
3561                        request->function_code);
3562
3563        return rc;
3564}
3565
3566static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3567{
3568        int rc;
3569        struct pqi_general_admin_request request;
3570        struct pqi_general_admin_response response;
3571        struct pqi_device_capability *capability;
3572        struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3573
3574        capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3575        if (!capability)
3576                return -ENOMEM;
3577
3578        memset(&request, 0, sizeof(request));
3579
3580        request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3581        put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3582                &request.header.iu_length);
3583        request.function_code =
3584                PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3585        put_unaligned_le32(sizeof(*capability),
3586                &request.data.report_device_capability.buffer_length);
3587
3588        rc = pqi_map_single(ctrl_info->pci_dev,
3589                &request.data.report_device_capability.sg_descriptor,
3590                capability, sizeof(*capability),
3591                PCI_DMA_FROMDEVICE);
3592        if (rc)
3593                goto out;
3594
3595        rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3596                &response);
3597
3598        pqi_pci_unmap(ctrl_info->pci_dev,
3599                &request.data.report_device_capability.sg_descriptor, 1,
3600                PCI_DMA_FROMDEVICE);
3601
3602        if (rc)
3603                goto out;
3604
3605        if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3606                rc = -EIO;
3607                goto out;
3608        }
3609
3610        ctrl_info->max_inbound_queues =
3611                get_unaligned_le16(&capability->max_inbound_queues);
3612        ctrl_info->max_elements_per_iq =
3613                get_unaligned_le16(&capability->max_elements_per_iq);
3614        ctrl_info->max_iq_element_length =
3615                get_unaligned_le16(&capability->max_iq_element_length)
3616                * 16;
3617        ctrl_info->max_outbound_queues =
3618                get_unaligned_le16(&capability->max_outbound_queues);
3619        ctrl_info->max_elements_per_oq =
3620                get_unaligned_le16(&capability->max_elements_per_oq);
3621        ctrl_info->max_oq_element_length =
3622                get_unaligned_le16(&capability->max_oq_element_length)
3623                * 16;
3624
3625        sop_iu_layer_descriptor =
3626                &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3627
3628        ctrl_info->max_inbound_iu_length_per_firmware =
3629                get_unaligned_le16(
3630                        &sop_iu_layer_descriptor->max_inbound_iu_length);
3631        ctrl_info->inbound_spanning_supported =
3632                sop_iu_layer_descriptor->inbound_spanning_supported;
3633        ctrl_info->outbound_spanning_supported =
3634                sop_iu_layer_descriptor->outbound_spanning_supported;
3635
3636out:
3637        kfree(capability);
3638
3639        return rc;
3640}
3641
3642static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3643{
3644        if (ctrl_info->max_iq_element_length <
3645                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3646                dev_err(&ctrl_info->pci_dev->dev,
3647                        "max. inbound queue element length of %d is less than the required length of %d\n",
3648                        ctrl_info->max_iq_element_length,
3649                        PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3650                return -EINVAL;
3651        }
3652
3653        if (ctrl_info->max_oq_element_length <
3654                PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3655                dev_err(&ctrl_info->pci_dev->dev,
3656                        "max. outbound queue element length of %d is less than the required length of %d\n",
3657                        ctrl_info->max_oq_element_length,
3658                        PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3659                return -EINVAL;
3660        }
3661
3662        if (ctrl_info->max_inbound_iu_length_per_firmware <
3663                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3664                dev_err(&ctrl_info->pci_dev->dev,
3665                        "max. inbound IU length of %u is less than the min. required length of %d\n",
3666                        ctrl_info->max_inbound_iu_length_per_firmware,
3667                        PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3668                return -EINVAL;
3669        }
3670
3671        if (!ctrl_info->inbound_spanning_supported) {
3672                dev_err(&ctrl_info->pci_dev->dev,
3673                        "the controller does not support inbound spanning\n");
3674                return -EINVAL;
3675        }
3676
3677        if (ctrl_info->outbound_spanning_supported) {
3678                dev_err(&ctrl_info->pci_dev->dev,
3679                        "the controller supports outbound spanning but this driver does not\n");
3680                return -EINVAL;
3681        }
3682
3683        return 0;
3684}
3685
3686static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3687        bool inbound_queue, u16 queue_id)
3688{
3689        struct pqi_general_admin_request request;
3690        struct pqi_general_admin_response response;
3691
3692        memset(&request, 0, sizeof(request));
3693        request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3694        put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3695                &request.header.iu_length);
3696        if (inbound_queue)
3697                request.function_code =
3698                        PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3699        else
3700                request.function_code =
3701                        PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3702        put_unaligned_le16(queue_id,
3703                &request.data.delete_operational_queue.queue_id);
3704
3705        return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3706                &response);
3707}
3708
3709static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3710{
3711        int rc;
3712        struct pqi_event_queue *event_queue;
3713        struct pqi_general_admin_request request;
3714        struct pqi_general_admin_response response;
3715
3716        event_queue = &ctrl_info->event_queue;
3717
3718        /*
3719         * Create OQ (Outbound Queue - device to host queue) to dedicate
3720         * to events.
3721         */
3722        memset(&request, 0, sizeof(request));
3723        request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3724        put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3725                &request.header.iu_length);
3726        request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3727        put_unaligned_le16(event_queue->oq_id,
3728                &request.data.create_operational_oq.queue_id);
3729        put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3730                &request.data.create_operational_oq.element_array_addr);
3731        put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3732                &request.data.create_operational_oq.pi_addr);
3733        put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3734                &request.data.create_operational_oq.num_elements);
3735        put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3736                &request.data.create_operational_oq.element_length);
3737        request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3738        put_unaligned_le16(event_queue->int_msg_num,
3739                &request.data.create_operational_oq.int_msg_num);
3740
3741        rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3742                &response);
3743        if (rc)
3744                return rc;
3745
3746        event_queue->oq_ci = ctrl_info->iomem_base +
3747                PQI_DEVICE_REGISTERS_OFFSET +
3748                get_unaligned_le64(
3749                        &response.data.create_operational_oq.oq_ci_offset);
3750
3751        return 0;
3752}
3753
3754static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3755{
3756        unsigned int i;
3757        int rc;
3758        struct pqi_queue_group *queue_group;
3759        struct pqi_general_admin_request request;
3760        struct pqi_general_admin_response response;
3761
3762        i = ctrl_info->num_active_queue_groups;
3763        queue_group = &ctrl_info->queue_groups[i];
3764
3765        /*
3766         * Create IQ (Inbound Queue - host to device queue) for
3767         * RAID path.
3768         */
3769        memset(&request, 0, sizeof(request));
3770        request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3771        put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3772                &request.header.iu_length);
3773        request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3774        put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3775                &request.data.create_operational_iq.queue_id);
3776        put_unaligned_le64(
3777                (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3778                &request.data.create_operational_iq.element_array_addr);
3779        put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3780                &request.data.create_operational_iq.ci_addr);
3781        put_unaligned_le16(ctrl_info->num_elements_per_iq,
3782                &request.data.create_operational_iq.num_elements);
3783        put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3784                &request.data.create_operational_iq.element_length);
3785        request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3786
3787        rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3788                &response);
3789        if (rc) {
3790                dev_err(&ctrl_info->pci_dev->dev,
3791                        "error creating inbound RAID queue\n");
3792                return rc;
3793        }
3794
3795        queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3796                PQI_DEVICE_REGISTERS_OFFSET +
3797                get_unaligned_le64(
3798                        &response.data.create_operational_iq.iq_pi_offset);
3799
3800        /*
3801         * Create IQ (Inbound Queue - host to device queue) for
3802         * Advanced I/O (AIO) path.
3803         */
3804        memset(&request, 0, sizeof(request));
3805        request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3806        put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3807                &request.header.iu_length);
3808        request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3809        put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3810                &request.data.create_operational_iq.queue_id);
3811        put_unaligned_le64((u64)queue_group->
3812                iq_element_array_bus_addr[AIO_PATH],
3813                &request.data.create_operational_iq.element_array_addr);
3814        put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3815                &request.data.create_operational_iq.ci_addr);
3816        put_unaligned_le16(ctrl_info->num_elements_per_iq,
3817                &request.data.create_operational_iq.num_elements);
3818        put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3819                &request.data.create_operational_iq.element_length);
3820        request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3821
3822        rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3823                &response);
3824        if (rc) {
3825                dev_err(&ctrl_info->pci_dev->dev,
3826                        "error creating inbound AIO queue\n");
3827                goto delete_inbound_queue_raid;
3828        }
3829
3830        queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3831                PQI_DEVICE_REGISTERS_OFFSET +
3832                get_unaligned_le64(
3833                        &response.data.create_operational_iq.iq_pi_offset);
3834
3835        /*
3836         * Designate the 2nd IQ as the AIO path.  By default, all IQs are
3837         * assumed to be for RAID path I/O unless we change the queue's
3838         * property.
3839         */
3840        memset(&request, 0, sizeof(request));
3841        request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3842        put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3843                &request.header.iu_length);
3844        request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3845        put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3846                &request.data.change_operational_iq_properties.queue_id);
3847        put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3848                &request.data.change_operational_iq_properties.vendor_specific);
3849
3850        rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3851                &response);
3852        if (rc) {
3853                dev_err(&ctrl_info->pci_dev->dev,
3854                        "error changing queue property\n");
3855                goto delete_inbound_queue_aio;
3856        }
3857
3858        /*
3859         * Create OQ (Outbound Queue - device to host queue).
3860         */
3861        memset(&request, 0, sizeof(request));
3862        request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3863        put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3864                &request.header.iu_length);
3865        request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3866        put_unaligned_le16(queue_group->oq_id,
3867                &request.data.create_operational_oq.queue_id);
3868        put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3869                &request.data.create_operational_oq.element_array_addr);
3870        put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3871                &request.data.create_operational_oq.pi_addr);
3872        put_unaligned_le16(ctrl_info->num_elements_per_oq,
3873                &request.data.create_operational_oq.num_elements);
3874        put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3875                &request.data.create_operational_oq.element_length);
3876        request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3877        put_unaligned_le16(queue_group->int_msg_num,
3878                &request.data.create_operational_oq.int_msg_num);
3879
3880        rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3881                &response);
3882        if (rc) {
3883                dev_err(&ctrl_info->pci_dev->dev,
3884                        "error creating outbound queue\n");
3885                goto delete_inbound_queue_aio;
3886        }
3887
3888        queue_group->oq_ci = ctrl_info->iomem_base +
3889                PQI_DEVICE_REGISTERS_OFFSET +
3890                get_unaligned_le64(
3891                        &response.data.create_operational_oq.oq_ci_offset);
3892
3893        ctrl_info->num_active_queue_groups++;
3894
3895        return 0;
3896
3897delete_inbound_queue_aio:
3898        pqi_delete_operational_queue(ctrl_info, true,
3899                queue_group->iq_id[AIO_PATH]);
3900
3901delete_inbound_queue_raid:
3902        pqi_delete_operational_queue(ctrl_info, true,
3903                queue_group->iq_id[RAID_PATH]);
3904
3905        return rc;
3906}
3907
3908static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3909{
3910        int rc;
3911        unsigned int i;
3912
3913        rc = pqi_create_event_queue(ctrl_info);
3914        if (rc) {
3915                dev_err(&ctrl_info->pci_dev->dev,
3916                        "error creating event queue\n");
3917                return rc;
3918        }
3919
3920        for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3921                rc = pqi_create_queue_group(ctrl_info);
3922                if (rc) {
3923                        dev_err(&ctrl_info->pci_dev->dev,
3924                                "error creating queue group number %u/%u\n",
3925                                i, ctrl_info->num_queue_groups);
3926                        return rc;
3927                }
3928        }
3929
3930        return 0;
3931}
3932
3933#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
3934        (offsetof(struct pqi_event_config, descriptors) + \
3935        (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3936
3937static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3938{
3939        int rc;
3940        unsigned int i;
3941        struct pqi_event_config *event_config;
3942        struct pqi_general_management_request request;
3943
3944        event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3945                GFP_KERNEL);
3946        if (!event_config)
3947                return -ENOMEM;
3948
3949        memset(&request, 0, sizeof(request));
3950
3951        request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3952        put_unaligned_le16(offsetof(struct pqi_general_management_request,
3953                data.report_event_configuration.sg_descriptors[1]) -
3954                PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3955        put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3956                &request.data.report_event_configuration.buffer_length);
3957
3958        rc = pqi_map_single(ctrl_info->pci_dev,
3959                request.data.report_event_configuration.sg_descriptors,
3960                event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3961                PCI_DMA_FROMDEVICE);
3962        if (rc)
3963                goto out;
3964
3965        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3966                0, NULL, NO_TIMEOUT);
3967
3968        pqi_pci_unmap(ctrl_info->pci_dev,
3969                request.data.report_event_configuration.sg_descriptors, 1,
3970                PCI_DMA_FROMDEVICE);
3971
3972        if (rc)
3973                goto out;
3974
3975        for (i = 0; i < event_config->num_event_descriptors; i++)
3976                put_unaligned_le16(ctrl_info->event_queue.oq_id,
3977                        &event_config->descriptors[i].oq_id);
3978
3979        memset(&request, 0, sizeof(request));
3980
3981        request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3982        put_unaligned_le16(offsetof(struct pqi_general_management_request,
3983                data.report_event_configuration.sg_descriptors[1]) -
3984                PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3985        put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3986                &request.data.report_event_configuration.buffer_length);
3987
3988        rc = pqi_map_single(ctrl_info->pci_dev,
3989                request.data.report_event_configuration.sg_descriptors,
3990                event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3991                PCI_DMA_TODEVICE);
3992        if (rc)
3993                goto out;
3994
3995        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3996                NULL, NO_TIMEOUT);
3997
3998        pqi_pci_unmap(ctrl_info->pci_dev,
3999                request.data.report_event_configuration.sg_descriptors, 1,
4000                PCI_DMA_TODEVICE);
4001
4002out:
4003        kfree(event_config);
4004
4005        return rc;
4006}
4007
4008static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4009{
4010        unsigned int i;
4011        struct device *dev;
4012        size_t sg_chain_buffer_length;
4013        struct pqi_io_request *io_request;
4014
4015        if (!ctrl_info->io_request_pool)
4016                return;
4017
4018        dev = &ctrl_info->pci_dev->dev;
4019        sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4020        io_request = ctrl_info->io_request_pool;
4021
4022        for (i = 0; i < ctrl_info->max_io_slots; i++) {
4023                kfree(io_request->iu);
4024                if (!io_request->sg_chain_buffer)
4025                        break;
4026                dma_free_coherent(dev, sg_chain_buffer_length,
4027                        io_request->sg_chain_buffer,
4028                        io_request->sg_chain_buffer_dma_handle);
4029                io_request++;
4030        }
4031
4032        kfree(ctrl_info->io_request_pool);
4033        ctrl_info->io_request_pool = NULL;
4034}
4035
4036static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4037{
4038        ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4039                ctrl_info->error_buffer_length,
4040                &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4041
4042        if (!ctrl_info->error_buffer)
4043                return -ENOMEM;
4044
4045        return 0;
4046}
4047
4048static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4049{
4050        unsigned int i;
4051        void *sg_chain_buffer;
4052        size_t sg_chain_buffer_length;
4053        dma_addr_t sg_chain_buffer_dma_handle;
4054        struct device *dev;
4055        struct pqi_io_request *io_request;
4056
4057        ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4058                sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4059
4060        if (!ctrl_info->io_request_pool) {
4061                dev_err(&ctrl_info->pci_dev->dev,
4062                        "failed to allocate I/O request pool\n");
4063                goto error;
4064        }
4065
4066        dev = &ctrl_info->pci_dev->dev;
4067        sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4068        io_request = ctrl_info->io_request_pool;
4069
4070        for (i = 0; i < ctrl_info->max_io_slots; i++) {
4071                io_request->iu =
4072                        kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4073
4074                if (!io_request->iu) {
4075                        dev_err(&ctrl_info->pci_dev->dev,
4076                                "failed to allocate IU buffers\n");
4077                        goto error;
4078                }
4079
4080                sg_chain_buffer = dma_alloc_coherent(dev,
4081                        sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4082                        GFP_KERNEL);
4083
4084                if (!sg_chain_buffer) {
4085                        dev_err(&ctrl_info->pci_dev->dev,
4086                                "failed to allocate PQI scatter-gather chain buffers\n");
4087                        goto error;
4088                }
4089
4090                io_request->index = i;
4091                io_request->sg_chain_buffer = sg_chain_buffer;
4092                io_request->sg_chain_buffer_dma_handle =
4093                        sg_chain_buffer_dma_handle;
4094                io_request++;
4095        }
4096
4097        return 0;
4098
4099error:
4100        pqi_free_all_io_requests(ctrl_info);
4101
4102        return -ENOMEM;
4103}
4104
4105/*
4106 * Calculate required resources that are sized based on max. outstanding
4107 * requests and max. transfer size.
4108 */
4109
4110static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4111{
4112        u32 max_transfer_size;
4113        u32 max_sg_entries;
4114
4115        ctrl_info->scsi_ml_can_queue =
4116                ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4117        ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4118
4119        ctrl_info->error_buffer_length =
4120                ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4121
4122        if (reset_devices)
4123                max_transfer_size = min(ctrl_info->max_transfer_size,
4124                        PQI_MAX_TRANSFER_SIZE_KDUMP);
4125        else
4126                max_transfer_size = min(ctrl_info->max_transfer_size,
4127                        PQI_MAX_TRANSFER_SIZE);
4128
4129        max_sg_entries = max_transfer_size / PAGE_SIZE;
4130
4131        /* +1 to cover when the buffer is not page-aligned. */
4132        max_sg_entries++;
4133
4134        max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4135
4136        max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4137
4138        ctrl_info->sg_chain_buffer_length =
4139                max_sg_entries * sizeof(struct pqi_sg_descriptor);
4140        ctrl_info->sg_tablesize = max_sg_entries;
4141        ctrl_info->max_sectors = max_transfer_size / 512;
4142}
4143
4144static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4145{
4146        int num_queue_groups;
4147        u16 num_elements_per_iq;
4148        u16 num_elements_per_oq;
4149
4150        if (reset_devices) {
4151                num_queue_groups = 1;
4152        } else {
4153                int num_cpus;
4154                int max_queue_groups;
4155
4156                max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4157                        ctrl_info->max_outbound_queues - 1);
4158                max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4159
4160                num_cpus = num_online_cpus();
4161                num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4162                num_queue_groups = min(num_queue_groups, max_queue_groups);
4163        }
4164
4165        ctrl_info->num_queue_groups = num_queue_groups;
4166
4167        /*
4168         * Make sure that the max. inbound IU length is an even multiple
4169         * of our inbound element length.
4170         */
4171        ctrl_info->max_inbound_iu_length =
4172                (ctrl_info->max_inbound_iu_length_per_firmware /
4173                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4174                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4175
4176        num_elements_per_iq =
4177                (ctrl_info->max_inbound_iu_length /
4178                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4179
4180        /* Add one because one element in each queue is unusable. */
4181        num_elements_per_iq++;
4182
4183        num_elements_per_iq = min(num_elements_per_iq,
4184                ctrl_info->max_elements_per_iq);
4185
4186        num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4187        num_elements_per_oq = min(num_elements_per_oq,
4188                ctrl_info->max_elements_per_oq);
4189
4190        ctrl_info->num_elements_per_iq = num_elements_per_iq;
4191        ctrl_info->num_elements_per_oq = num_elements_per_oq;
4192
4193        ctrl_info->max_sg_per_iu =
4194                ((ctrl_info->max_inbound_iu_length -
4195                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4196                sizeof(struct pqi_sg_descriptor)) +
4197                PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4198}
4199
4200static inline void pqi_set_sg_descriptor(
4201        struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4202{
4203        u64 address = (u64)sg_dma_address(sg);
4204        unsigned int length = sg_dma_len(sg);
4205
4206        put_unaligned_le64(address, &sg_descriptor->address);
4207        put_unaligned_le32(length, &sg_descriptor->length);
4208        put_unaligned_le32(0, &sg_descriptor->flags);
4209}
4210
4211static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4212        struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4213        struct pqi_io_request *io_request)
4214{
4215        int i;
4216        u16 iu_length;
4217        int sg_count;
4218        bool chained;
4219        unsigned int num_sg_in_iu;
4220        unsigned int max_sg_per_iu;
4221        struct scatterlist *sg;
4222        struct pqi_sg_descriptor *sg_descriptor;
4223
4224        sg_count = scsi_dma_map(scmd);
4225        if (sg_count < 0)
4226                return sg_count;
4227
4228        iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4229                PQI_REQUEST_HEADER_LENGTH;
4230
4231        if (sg_count == 0)
4232                goto out;
4233
4234        sg = scsi_sglist(scmd);
4235        sg_descriptor = request->sg_descriptors;
4236        max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4237        chained = false;
4238        num_sg_in_iu = 0;
4239        i = 0;
4240
4241        while (1) {
4242                pqi_set_sg_descriptor(sg_descriptor, sg);
4243                if (!chained)
4244                        num_sg_in_iu++;
4245                i++;
4246                if (i == sg_count)
4247                        break;
4248                sg_descriptor++;
4249                if (i == max_sg_per_iu) {
4250                        put_unaligned_le64(
4251                                (u64)io_request->sg_chain_buffer_dma_handle,
4252                                &sg_descriptor->address);
4253                        put_unaligned_le32((sg_count - num_sg_in_iu)
4254                                * sizeof(*sg_descriptor),
4255                                &sg_descriptor->length);
4256                        put_unaligned_le32(CISS_SG_CHAIN,
4257                                &sg_descriptor->flags);
4258                        chained = true;
4259                        num_sg_in_iu++;
4260                        sg_descriptor = io_request->sg_chain_buffer;
4261                }
4262                sg = sg_next(sg);
4263        }
4264
4265        put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4266        request->partial = chained;
4267        iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4268
4269out:
4270        put_unaligned_le16(iu_length, &request->header.iu_length);
4271
4272        return 0;
4273}
4274
4275static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4276        struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4277        struct pqi_io_request *io_request)
4278{
4279        int i;
4280        u16 iu_length;
4281        int sg_count;
4282        bool chained;
4283        unsigned int num_sg_in_iu;
4284        unsigned int max_sg_per_iu;
4285        struct scatterlist *sg;
4286        struct pqi_sg_descriptor *sg_descriptor;
4287
4288        sg_count = scsi_dma_map(scmd);
4289        if (sg_count < 0)
4290                return sg_count;
4291
4292        iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4293                PQI_REQUEST_HEADER_LENGTH;
4294        num_sg_in_iu = 0;
4295
4296        if (sg_count == 0)
4297                goto out;
4298
4299        sg = scsi_sglist(scmd);
4300        sg_descriptor = request->sg_descriptors;
4301        max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4302        chained = false;
4303        i = 0;
4304
4305        while (1) {
4306                pqi_set_sg_descriptor(sg_descriptor, sg);
4307                if (!chained)
4308                        num_sg_in_iu++;
4309                i++;
4310                if (i == sg_count)
4311                        break;
4312                sg_descriptor++;
4313                if (i == max_sg_per_iu) {
4314                        put_unaligned_le64(
4315                                (u64)io_request->sg_chain_buffer_dma_handle,
4316                                &sg_descriptor->address);
4317                        put_unaligned_le32((sg_count - num_sg_in_iu)
4318                                * sizeof(*sg_descriptor),
4319                                &sg_descriptor->length);
4320                        put_unaligned_le32(CISS_SG_CHAIN,
4321                                &sg_descriptor->flags);
4322                        chained = true;
4323                        num_sg_in_iu++;
4324                        sg_descriptor = io_request->sg_chain_buffer;
4325                }
4326                sg = sg_next(sg);
4327        }
4328
4329        put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4330        request->partial = chained;
4331        iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4332
4333out:
4334        put_unaligned_le16(iu_length, &request->header.iu_length);
4335        request->num_sg_descriptors = num_sg_in_iu;
4336
4337        return 0;
4338}
4339
4340static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4341        void *context)
4342{
4343        struct scsi_cmnd *scmd;
4344
4345        scmd = io_request->scmd;
4346        pqi_free_io_request(io_request);
4347        scsi_dma_unmap(scmd);
4348        pqi_scsi_done(scmd);
4349}
4350
4351static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4352        struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4353        struct pqi_queue_group *queue_group)
4354{
4355        int rc;
4356        size_t cdb_length;
4357        struct pqi_io_request *io_request;
4358        struct pqi_raid_path_request *request;
4359
4360        io_request = pqi_alloc_io_request(ctrl_info);
4361        io_request->io_complete_callback = pqi_raid_io_complete;
4362        io_request->scmd = scmd;
4363
4364        scmd->host_scribble = (unsigned char *)io_request;
4365
4366        request = io_request->iu;
4367        memset(request, 0,
4368                offsetof(struct pqi_raid_path_request, sg_descriptors));
4369
4370        request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4371        put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4372        request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4373        put_unaligned_le16(io_request->index, &request->request_id);
4374        request->error_index = request->request_id;
4375        memcpy(request->lun_number, device->scsi3addr,
4376                sizeof(request->lun_number));
4377
4378        cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4379        memcpy(request->cdb, scmd->cmnd, cdb_length);
4380
4381        switch (cdb_length) {
4382        case 6:
4383        case 10:
4384        case 12:
4385        case 16:
4386                /* No bytes in the Additional CDB bytes field */
4387                request->additional_cdb_bytes_usage =
4388                        SOP_ADDITIONAL_CDB_BYTES_0;
4389                break;
4390        case 20:
4391                /* 4 bytes in the Additional cdb field */
4392                request->additional_cdb_bytes_usage =
4393                        SOP_ADDITIONAL_CDB_BYTES_4;
4394                break;
4395        case 24:
4396                /* 8 bytes in the Additional cdb field */
4397                request->additional_cdb_bytes_usage =
4398                        SOP_ADDITIONAL_CDB_BYTES_8;
4399                break;
4400        case 28:
4401                /* 12 bytes in the Additional cdb field */
4402                request->additional_cdb_bytes_usage =
4403                        SOP_ADDITIONAL_CDB_BYTES_12;
4404                break;
4405        case 32:
4406        default:
4407                /* 16 bytes in the Additional cdb field */
4408                request->additional_cdb_bytes_usage =
4409                        SOP_ADDITIONAL_CDB_BYTES_16;
4410                break;
4411        }
4412
4413        switch (scmd->sc_data_direction) {
4414        case DMA_TO_DEVICE:
4415                request->data_direction = SOP_READ_FLAG;
4416                break;
4417        case DMA_FROM_DEVICE:
4418                request->data_direction = SOP_WRITE_FLAG;
4419                break;
4420        case DMA_NONE:
4421                request->data_direction = SOP_NO_DIRECTION_FLAG;
4422                break;
4423        case DMA_BIDIRECTIONAL:
4424                request->data_direction = SOP_BIDIRECTIONAL;
4425                break;
4426        default:
4427                dev_err(&ctrl_info->pci_dev->dev,
4428                        "unknown data direction: %d\n",
4429                        scmd->sc_data_direction);
4430                WARN_ON(scmd->sc_data_direction);
4431                break;
4432        }
4433
4434        rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4435        if (rc) {
4436                pqi_free_io_request(io_request);
4437                return SCSI_MLQUEUE_HOST_BUSY;
4438        }
4439
4440        pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4441
4442        return 0;
4443}
4444
4445static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4446        void *context)
4447{
4448        struct scsi_cmnd *scmd;
4449
4450        scmd = io_request->scmd;
4451        scsi_dma_unmap(scmd);
4452        if (io_request->status == -EAGAIN)
4453                set_host_byte(scmd, DID_IMM_RETRY);
4454        pqi_free_io_request(io_request);
4455        pqi_scsi_done(scmd);
4456}
4457
4458static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4459        struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4460        struct pqi_queue_group *queue_group)
4461{
4462        return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4463                scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4464}
4465
4466static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4467        struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4468        unsigned int cdb_length, struct pqi_queue_group *queue_group,
4469        struct pqi_encryption_info *encryption_info)
4470{
4471        int rc;
4472        struct pqi_io_request *io_request;
4473        struct pqi_aio_path_request *request;
4474
4475        io_request = pqi_alloc_io_request(ctrl_info);
4476        io_request->io_complete_callback = pqi_aio_io_complete;
4477        io_request->scmd = scmd;
4478
4479        scmd->host_scribble = (unsigned char *)io_request;
4480
4481        request = io_request->iu;
4482        memset(request, 0,
4483                offsetof(struct pqi_raid_path_request, sg_descriptors));
4484
4485        request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4486        put_unaligned_le32(aio_handle, &request->nexus_id);
4487        put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4488        request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4489        put_unaligned_le16(io_request->index, &request->request_id);
4490        request->error_index = request->request_id;
4491        if (cdb_length > sizeof(request->cdb))
4492                cdb_length = sizeof(request->cdb);
4493        request->cdb_length = cdb_length;
4494        memcpy(request->cdb, cdb, cdb_length);
4495
4496        switch (scmd->sc_data_direction) {
4497        case DMA_TO_DEVICE:
4498                request->data_direction = SOP_READ_FLAG;
4499                break;
4500        case DMA_FROM_DEVICE:
4501                request->data_direction = SOP_WRITE_FLAG;
4502                break;
4503        case DMA_NONE:
4504                request->data_direction = SOP_NO_DIRECTION_FLAG;
4505                break;
4506        case DMA_BIDIRECTIONAL:
4507                request->data_direction = SOP_BIDIRECTIONAL;
4508                break;
4509        default:
4510                dev_err(&ctrl_info->pci_dev->dev,
4511                        "unknown data direction: %d\n",
4512                        scmd->sc_data_direction);
4513                WARN_ON(scmd->sc_data_direction);
4514                break;
4515        }
4516
4517        if (encryption_info) {
4518                request->encryption_enable = true;
4519                put_unaligned_le16(encryption_info->data_encryption_key_index,
4520                        &request->data_encryption_key_index);
4521                put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4522                        &request->encrypt_tweak_lower);
4523                put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4524                        &request->encrypt_tweak_upper);
4525        }
4526
4527        rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4528        if (rc) {
4529                pqi_free_io_request(io_request);
4530                return SCSI_MLQUEUE_HOST_BUSY;
4531        }
4532
4533        pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4534
4535        return 0;
4536}
4537
4538static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4539        struct scsi_cmnd *scmd)
4540{
4541        int rc;
4542        struct pqi_ctrl_info *ctrl_info;
4543        struct pqi_scsi_dev *device;
4544        u16 hwq;
4545        struct pqi_queue_group *queue_group;
4546        bool raid_bypassed;
4547
4548        device = scmd->device->hostdata;
4549        ctrl_info = shost_to_hba(shost);
4550
4551        if (pqi_ctrl_offline(ctrl_info)) {
4552                set_host_byte(scmd, DID_NO_CONNECT);
4553                pqi_scsi_done(scmd);
4554                return 0;
4555        }
4556
4557        /*
4558         * This is necessary because the SML doesn't zero out this field during
4559         * error recovery.
4560         */
4561        scmd->result = 0;
4562
4563        hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4564        if (hwq >= ctrl_info->num_queue_groups)
4565                hwq = 0;
4566
4567        queue_group = &ctrl_info->queue_groups[hwq];
4568
4569        if (pqi_is_logical_device(device)) {
4570                raid_bypassed = false;
4571                if (device->offload_enabled &&
4572                        scmd->request->cmd_type == REQ_TYPE_FS) {
4573                        rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4574                                scmd, queue_group);
4575                        if (rc == 0 ||
4576                                rc == SCSI_MLQUEUE_HOST_BUSY ||
4577                                rc == SAM_STAT_CHECK_CONDITION ||
4578                                rc == SAM_STAT_RESERVATION_CONFLICT)
4579                                raid_bypassed = true;
4580                }
4581                if (!raid_bypassed)
4582                        rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4583                                queue_group);
4584        } else {
4585                if (device->aio_enabled)
4586                        rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4587                                queue_group);
4588                else
4589                        rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4590                                queue_group);
4591        }
4592
4593        return rc;
4594}
4595
4596static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4597        void *context)
4598{
4599        struct completion *waiting = context;
4600
4601        complete(waiting);
4602}
4603
4604#define PQI_LUN_RESET_TIMEOUT_SECS      10
4605
4606static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4607        struct pqi_scsi_dev *device, struct completion *wait)
4608{
4609        int rc;
4610        unsigned int wait_secs = 0;
4611
4612        while (1) {
4613                if (wait_for_completion_io_timeout(wait,
4614                        PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4615                        rc = 0;
4616                        break;
4617                }
4618
4619                pqi_check_ctrl_health(ctrl_info);
4620                if (pqi_ctrl_offline(ctrl_info)) {
4621                        rc = -ETIMEDOUT;
4622                        break;
4623                }
4624
4625                wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
4626
4627                dev_err(&ctrl_info->pci_dev->dev,
4628                        "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4629                        ctrl_info->scsi_host->host_no, device->bus,
4630                        device->target, device->lun, wait_secs);
4631        }
4632
4633        return rc;
4634}
4635
4636static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
4637        struct pqi_scsi_dev *device)
4638{
4639        int rc;
4640        struct pqi_io_request *io_request;
4641        DECLARE_COMPLETION_ONSTACK(wait);
4642        struct pqi_task_management_request *request;
4643
4644        down(&ctrl_info->lun_reset_sem);
4645
4646        io_request = pqi_alloc_io_request(ctrl_info);
4647        io_request->io_complete_callback = pqi_lun_reset_complete;
4648        io_request->context = &wait;
4649
4650        request = io_request->iu;
4651        memset(request, 0, sizeof(*request));
4652
4653        request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4654        put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4655                &request->header.iu_length);
4656        put_unaligned_le16(io_request->index, &request->request_id);
4657        memcpy(request->lun_number, device->scsi3addr,
4658                sizeof(request->lun_number));
4659        request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4660
4661        pqi_start_io(ctrl_info,
4662                &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4663                io_request);
4664
4665        rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4666        if (rc == 0)
4667                rc = io_request->status;
4668
4669        pqi_free_io_request(io_request);
4670        up(&ctrl_info->lun_reset_sem);
4671
4672        return rc;
4673}
4674
4675/* Performs a reset at the LUN level. */
4676
4677static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4678        struct pqi_scsi_dev *device)
4679{
4680        int rc;
4681
4682        pqi_check_ctrl_health(ctrl_info);
4683        if (pqi_ctrl_offline(ctrl_info))
4684                return FAILED;
4685
4686        rc = pqi_lun_reset(ctrl_info, device);
4687
4688        return rc == 0 ? SUCCESS : FAILED;
4689}
4690
4691static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4692{
4693        int rc;
4694        struct pqi_ctrl_info *ctrl_info;
4695        struct pqi_scsi_dev *device;
4696
4697        ctrl_info = shost_to_hba(scmd->device->host);
4698        device = scmd->device->hostdata;
4699
4700        dev_err(&ctrl_info->pci_dev->dev,
4701                "resetting scsi %d:%d:%d:%d\n",
4702                ctrl_info->scsi_host->host_no,
4703                device->bus, device->target, device->lun);
4704
4705        rc = pqi_device_reset(ctrl_info, device);
4706
4707        dev_err(&ctrl_info->pci_dev->dev,
4708                "reset of scsi %d:%d:%d:%d: %s\n",
4709                ctrl_info->scsi_host->host_no,
4710                device->bus, device->target, device->lun,
4711                rc == SUCCESS ? "SUCCESS" : "FAILED");
4712
4713        return rc;
4714}
4715
4716static int pqi_slave_alloc(struct scsi_device *sdev)
4717{
4718        struct pqi_scsi_dev *device;
4719        unsigned long flags;
4720        struct pqi_ctrl_info *ctrl_info;
4721        struct scsi_target *starget;
4722        struct sas_rphy *rphy;
4723
4724        ctrl_info = shost_to_hba(sdev->host);
4725
4726        spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4727
4728        if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4729                starget = scsi_target(sdev);
4730                rphy = target_to_rphy(starget);
4731                device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4732                if (device) {
4733                        device->target = sdev_id(sdev);
4734                        device->lun = sdev->lun;
4735                        device->target_lun_valid = true;
4736                }
4737        } else {
4738                device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4739                        sdev_id(sdev), sdev->lun);
4740        }
4741
4742        if (device && device->expose_device) {
4743                sdev->hostdata = device;
4744                device->sdev = sdev;
4745                if (device->queue_depth) {
4746                        device->advertised_queue_depth = device->queue_depth;
4747                        scsi_adjust_queue_depth(sdev,
4748                                scsi_get_tag_type(sdev),
4749                                device->advertised_queue_depth);
4750                }
4751        }
4752
4753        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4754
4755        return 0;
4756}
4757
4758static int pqi_slave_configure(struct scsi_device *sdev)
4759{
4760        struct pqi_scsi_dev *device;
4761
4762        device = sdev->hostdata;
4763        if (!device->expose_device)
4764                sdev->no_uld_attach = true;
4765
4766        return 0;
4767}
4768
4769static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4770        void __user *arg)
4771{
4772        struct pci_dev *pci_dev;
4773        u32 subsystem_vendor;
4774        u32 subsystem_device;
4775        cciss_pci_info_struct pciinfo;
4776
4777        if (!arg)
4778                return -EINVAL;
4779
4780        pci_dev = ctrl_info->pci_dev;
4781
4782        pciinfo.domain = pci_domain_nr(pci_dev->bus);
4783        pciinfo.bus = pci_dev->bus->number;
4784        pciinfo.dev_fn = pci_dev->devfn;
4785        subsystem_vendor = pci_dev->subsystem_vendor;
4786        subsystem_device = pci_dev->subsystem_device;
4787        pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4788                subsystem_vendor;
4789
4790        if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4791                return -EFAULT;
4792
4793        return 0;
4794}
4795
4796static int pqi_getdrivver_ioctl(void __user *arg)
4797{
4798        u32 version;
4799
4800        if (!arg)
4801                return -EINVAL;
4802
4803        version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4804                (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4805
4806        if (copy_to_user(arg, &version, sizeof(version)))
4807                return -EFAULT;
4808
4809        return 0;
4810}
4811
4812struct ciss_error_info {
4813        u8      scsi_status;
4814        int     command_status;
4815        size_t  sense_data_length;
4816};
4817
4818static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4819        struct ciss_error_info *ciss_error_info)
4820{
4821        int ciss_cmd_status;
4822        size_t sense_data_length;
4823
4824        switch (pqi_error_info->data_out_result) {
4825        case PQI_DATA_IN_OUT_GOOD:
4826                ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4827                break;
4828        case PQI_DATA_IN_OUT_UNDERFLOW:
4829                ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4830                break;
4831        case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4832                ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4833                break;
4834        case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4835        case PQI_DATA_IN_OUT_BUFFER_ERROR:
4836        case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4837        case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4838        case PQI_DATA_IN_OUT_ERROR:
4839                ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4840                break;
4841        case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4842        case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4843        case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4844        case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4845        case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4846        case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4847        case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4848        case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4849        case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4850        case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4851                ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4852                break;
4853        case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4854                ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4855                break;
4856        case PQI_DATA_IN_OUT_ABORTED:
4857                ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4858                break;
4859        case PQI_DATA_IN_OUT_TIMEOUT:
4860                ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4861                break;
4862        default:
4863                ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4864                break;
4865        }
4866
4867        sense_data_length =
4868                get_unaligned_le16(&pqi_error_info->sense_data_length);
4869        if (sense_data_length == 0)
4870                sense_data_length =
4871                get_unaligned_le16(&pqi_error_info->response_data_length);
4872        if (sense_data_length)
4873                if (sense_data_length > sizeof(pqi_error_info->data))
4874                        sense_data_length = sizeof(pqi_error_info->data);
4875
4876        ciss_error_info->scsi_status = pqi_error_info->status;
4877        ciss_error_info->command_status = ciss_cmd_status;
4878        ciss_error_info->sense_data_length = sense_data_length;
4879}
4880
4881static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4882{
4883        int rc;
4884        char *kernel_buffer = NULL;
4885        u16 iu_length;
4886        size_t sense_data_length;
4887        IOCTL_Command_struct iocommand;
4888        struct pqi_raid_path_request request;
4889        struct pqi_raid_error_info pqi_error_info;
4890        struct ciss_error_info ciss_error_info;
4891
4892        if (pqi_ctrl_offline(ctrl_info))
4893                return -ENXIO;
4894        if (!arg)
4895                return -EINVAL;
4896        if (!capable(CAP_SYS_RAWIO))
4897                return -EPERM;
4898        if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4899                return -EFAULT;
4900        if (iocommand.buf_size < 1 &&
4901                iocommand.Request.Type.Direction != XFER_NONE)
4902                return -EINVAL;
4903        if (iocommand.Request.CDBLen > sizeof(request.cdb))
4904                return -EINVAL;
4905        if (iocommand.Request.Type.Type != TYPE_CMD)
4906                return -EINVAL;
4907
4908        switch (iocommand.Request.Type.Direction) {
4909        case XFER_NONE:
4910        case XFER_WRITE:
4911        case XFER_READ:
4912                break;
4913        default:
4914                return -EINVAL;
4915        }
4916
4917        if (iocommand.buf_size > 0) {
4918                kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4919                if (!kernel_buffer)
4920                        return -ENOMEM;
4921                if (iocommand.Request.Type.Direction & XFER_WRITE) {
4922                        if (copy_from_user(kernel_buffer, iocommand.buf,
4923                                iocommand.buf_size)) {
4924                                rc = -EFAULT;
4925                                goto out;
4926                        }
4927                } else {
4928                        memset(kernel_buffer, 0, iocommand.buf_size);
4929                }
4930        }
4931
4932        memset(&request, 0, sizeof(request));
4933
4934        request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4935        iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4936                PQI_REQUEST_HEADER_LENGTH;
4937        memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4938                sizeof(request.lun_number));
4939        memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4940        request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4941
4942        switch (iocommand.Request.Type.Direction) {
4943        case XFER_NONE:
4944                request.data_direction = SOP_NO_DIRECTION_FLAG;
4945                break;
4946        case XFER_WRITE:
4947                request.data_direction = SOP_WRITE_FLAG;
4948                break;
4949        case XFER_READ:
4950                request.data_direction = SOP_READ_FLAG;
4951                break;
4952        }
4953
4954        request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4955
4956        if (iocommand.buf_size > 0) {
4957                put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4958
4959                rc = pqi_map_single(ctrl_info->pci_dev,
4960                        &request.sg_descriptors[0], kernel_buffer,
4961                        iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4962                if (rc)
4963                        goto out;
4964
4965                iu_length += sizeof(request.sg_descriptors[0]);
4966        }
4967
4968        put_unaligned_le16(iu_length, &request.header.iu_length);
4969
4970        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4971                PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4972
4973        if (iocommand.buf_size > 0)
4974                pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4975                        PCI_DMA_BIDIRECTIONAL);
4976
4977        memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4978
4979        if (rc == 0) {
4980                pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4981                iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4982                iocommand.error_info.CommandStatus =
4983                        ciss_error_info.command_status;
4984                sense_data_length = ciss_error_info.sense_data_length;
4985                if (sense_data_length) {
4986                        if (sense_data_length >
4987                                sizeof(iocommand.error_info.SenseInfo))
4988                                sense_data_length =
4989                                        sizeof(iocommand.error_info.SenseInfo);
4990                        memcpy(iocommand.error_info.SenseInfo,
4991                                pqi_error_info.data, sense_data_length);
4992                        iocommand.error_info.SenseLen = sense_data_length;
4993                }
4994        }
4995
4996        if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4997                rc = -EFAULT;
4998                goto out;
4999        }
5000
5001        if (rc == 0 && iocommand.buf_size > 0 &&
5002                (iocommand.Request.Type.Direction & XFER_READ)) {
5003                if (copy_to_user(iocommand.buf, kernel_buffer,
5004                        iocommand.buf_size)) {
5005                        rc = -EFAULT;
5006                }
5007        }
5008
5009out:
5010        kfree(kernel_buffer);
5011
5012        return rc;
5013}
5014
5015static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5016{
5017        int rc;
5018        struct pqi_ctrl_info *ctrl_info;
5019
5020        ctrl_info = shost_to_hba(sdev->host);
5021
5022        switch (cmd) {
5023        case CCISS_DEREGDISK:
5024        case CCISS_REGNEWDISK:
5025        case CCISS_REGNEWD:
5026                rc = pqi_scan_scsi_devices(ctrl_info);
5027                break;
5028        case CCISS_GETPCIINFO:
5029                rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5030                break;
5031        case CCISS_GETDRIVVER:
5032                rc = pqi_getdrivver_ioctl(arg);
5033                break;
5034        case CCISS_PASSTHRU:
5035                rc = pqi_passthru_ioctl(ctrl_info, arg);
5036                break;
5037        default:
5038                rc = -EINVAL;
5039                break;
5040        }
5041
5042        return rc;
5043}
5044
5045static ssize_t pqi_version_show(struct device *dev,
5046        struct device_attribute *attr, char *buffer)
5047{
5048        ssize_t count = 0;
5049        struct Scsi_Host *shost;
5050        struct pqi_ctrl_info *ctrl_info;
5051
5052        shost = class_to_shost(dev);
5053        ctrl_info = shost_to_hba(shost);
5054
5055        count += snprintf(buffer + count, PAGE_SIZE - count,
5056                "  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5057
5058        count += snprintf(buffer + count, PAGE_SIZE - count,
5059                "firmware: %s\n", ctrl_info->firmware_version);
5060
5061        return count;
5062}
5063
5064static ssize_t pqi_host_rescan_store(struct device *dev,
5065        struct device_attribute *attr, const char *buffer, size_t count)
5066{
5067        struct Scsi_Host *shost = class_to_shost(dev);
5068
5069        pqi_scan_start(shost);
5070
5071        return count;
5072}
5073
5074static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5075static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5076
5077static struct device_attribute *pqi_shost_attrs[] = {
5078        &dev_attr_version,
5079        &dev_attr_rescan,
5080        NULL
5081};
5082
5083static ssize_t pqi_sas_address_show(struct device *dev,
5084        struct device_attribute *attr, char *buffer)
5085{
5086        struct pqi_ctrl_info *ctrl_info;
5087        struct scsi_device *sdev;
5088        struct pqi_scsi_dev *device;
5089        unsigned long flags;
5090        u64 sas_address;
5091
5092        sdev = to_scsi_device(dev);
5093        ctrl_info = shost_to_hba(sdev->host);
5094
5095        spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5096
5097        device = sdev->hostdata;
5098        if (pqi_is_logical_device(device)) {
5099                spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5100                        flags);
5101                return -ENODEV;
5102        }
5103        sas_address = device->sas_address;
5104
5105        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5106
5107        return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5108}
5109
5110static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5111        struct device_attribute *attr, char *buffer)
5112{
5113        struct pqi_ctrl_info *ctrl_info;
5114        struct scsi_device *sdev;
5115        struct pqi_scsi_dev *device;
5116        unsigned long flags;
5117
5118        sdev = to_scsi_device(dev);
5119        ctrl_info = shost_to_hba(sdev->host);
5120
5121        spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5122
5123        device = sdev->hostdata;
5124        buffer[0] = device->offload_enabled ? '1' : '0';
5125        buffer[1] = '\n';
5126        buffer[2] = '\0';
5127
5128        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5129
5130        return 2;
5131}
5132
5133static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5134static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5135        pqi_ssd_smart_path_enabled_show, NULL);
5136
5137static struct device_attribute *pqi_sdev_attrs[] = {
5138        &dev_attr_sas_address,
5139        &dev_attr_ssd_smart_path_enabled,
5140        NULL
5141};
5142
5143static struct scsi_host_template pqi_driver_template = {
5144        .module = THIS_MODULE,
5145        .name = DRIVER_NAME_SHORT,
5146        .proc_name = DRIVER_NAME_SHORT,
5147        .queuecommand = pqi_scsi_queue_command,
5148        .scan_start = pqi_scan_start,
5149        .scan_finished = pqi_scan_finished,
5150        .this_id = -1,
5151        .use_clustering = ENABLE_CLUSTERING,
5152        .eh_device_reset_handler = pqi_eh_device_reset_handler,
5153        .ioctl = pqi_ioctl,
5154        .slave_alloc = pqi_slave_alloc,
5155        .slave_configure = pqi_slave_configure,
5156        .sdev_attrs = pqi_sdev_attrs,
5157        .shost_attrs = pqi_shost_attrs,
5158};
5159
5160static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5161{
5162        int rc;
5163        struct Scsi_Host *shost;
5164
5165        shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5166        if (!shost) {
5167                dev_err(&ctrl_info->pci_dev->dev,
5168                        "scsi_host_alloc failed for controller %u\n",
5169                        ctrl_info->ctrl_id);
5170                return -ENOMEM;
5171        }
5172
5173        shost->io_port = 0;
5174        shost->n_io_port = 0;
5175        shost->this_id = -1;
5176        shost->max_channel = PQI_MAX_BUS;
5177        shost->max_cmd_len = MAX_COMMAND_SIZE;
5178        shost->max_lun = ~0;
5179        shost->max_id = ~0;
5180        shost->max_sectors = ctrl_info->max_sectors;
5181        shost->can_queue = ctrl_info->scsi_ml_can_queue;
5182        shost->cmd_per_lun = shost->can_queue;
5183        shost->sg_tablesize = ctrl_info->sg_tablesize;
5184        shost->transportt = pqi_sas_transport_template;
5185        shost->irq = ctrl_info->msix_vectors[0];
5186        shost->unique_id = shost->irq;
5187        shost->hostdata[0] = (unsigned long)ctrl_info;
5188
5189        rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5190        if (rc) {
5191                dev_err(&ctrl_info->pci_dev->dev,
5192                        "scsi_add_host failed for controller %u\n",
5193                        ctrl_info->ctrl_id);
5194                goto free_host;
5195        }
5196
5197        rc = pqi_add_sas_host(shost, ctrl_info);
5198        if (rc) {
5199                dev_err(&ctrl_info->pci_dev->dev,
5200                        "add SAS host failed for controller %u\n",
5201                        ctrl_info->ctrl_id);
5202                goto remove_host;
5203        }
5204
5205        ctrl_info->scsi_host = shost;
5206
5207        return 0;
5208
5209remove_host:
5210        scsi_remove_host(shost);
5211free_host:
5212        scsi_host_put(shost);
5213
5214        return rc;
5215}
5216
5217static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5218{
5219        struct Scsi_Host *shost;
5220
5221        pqi_delete_sas_host(ctrl_info);
5222
5223        shost = ctrl_info->scsi_host;
5224        if (!shost)
5225                return;
5226
5227        scsi_remove_host(shost);
5228        scsi_host_put(shost);
5229}
5230
5231#define PQI_RESET_ACTION_RESET          0x1
5232
5233#define PQI_RESET_TYPE_NO_RESET         0x0
5234#define PQI_RESET_TYPE_SOFT_RESET       0x1
5235#define PQI_RESET_TYPE_FIRM_RESET       0x2
5236#define PQI_RESET_TYPE_HARD_RESET       0x3
5237
5238static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5239{
5240        int rc;
5241        u32 reset_params;
5242
5243        reset_params = (PQI_RESET_ACTION_RESET << 5) |
5244                PQI_RESET_TYPE_HARD_RESET;
5245
5246        writel(reset_params,
5247                &ctrl_info->pqi_registers->device_reset);
5248
5249        rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5250        if (rc)
5251                dev_err(&ctrl_info->pci_dev->dev,
5252                        "PQI reset failed\n");
5253
5254        return rc;
5255}
5256
5257static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5258{
5259        int rc;
5260        struct bmic_identify_controller *identify;
5261
5262        identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5263        if (!identify)
5264                return -ENOMEM;
5265
5266        rc = pqi_identify_controller(ctrl_info, identify);
5267        if (rc)
5268                goto out;
5269
5270        memcpy(ctrl_info->firmware_version, identify->firmware_version,
5271                sizeof(identify->firmware_version));
5272        ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5273        snprintf(ctrl_info->firmware_version +
5274                strlen(ctrl_info->firmware_version),
5275                sizeof(ctrl_info->firmware_version),
5276                "-%u", get_unaligned_le16(&identify->firmware_build_number));
5277
5278out:
5279        kfree(identify);
5280
5281        return rc;
5282}
5283
5284/* Switches the controller from PQI mode back into SIS mode. */
5285
5286static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
5287{
5288        int rc;
5289
5290        sis_disable_msix(ctrl_info);
5291        rc = pqi_reset(ctrl_info);
5292        if (rc)
5293                return rc;
5294        sis_reenable_sis_mode(ctrl_info);
5295        pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5296
5297        return 0;
5298}
5299
5300/*
5301 * If the controller isn't already in SIS mode, this function forces it into
5302 * SIS mode.
5303 */
5304
5305static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
5306{
5307        if (!sis_is_firmware_running(ctrl_info))
5308                return -ENXIO;
5309
5310        if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
5311                return 0;
5312
5313        if (sis_is_kernel_up(ctrl_info)) {
5314                pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5315                return 0;
5316        }
5317
5318        return pqi_revert_to_sis_mode(ctrl_info);
5319}
5320
5321static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5322{
5323        int rc;
5324
5325        rc = pqi_force_sis_mode(ctrl_info);
5326        if (rc)
5327                return rc;
5328
5329        /*
5330         * Wait until the controller is ready to start accepting SIS
5331         * commands.
5332         */
5333        rc = sis_wait_for_ctrl_ready(ctrl_info);
5334        if (rc) {
5335                dev_err(&ctrl_info->pci_dev->dev,
5336                        "error initializing SIS interface\n");
5337                return rc;
5338        }
5339
5340        /*
5341         * Get the controller properties.  This allows us to determine
5342         * whether or not it supports PQI mode.
5343         */
5344        rc = sis_get_ctrl_properties(ctrl_info);
5345        if (rc) {
5346                dev_err(&ctrl_info->pci_dev->dev,
5347                        "error obtaining controller properties\n");
5348                return rc;
5349        }
5350
5351        rc = sis_get_pqi_capabilities(ctrl_info);
5352        if (rc) {
5353                dev_err(&ctrl_info->pci_dev->dev,
5354                        "error obtaining controller capabilities\n");
5355                return rc;
5356        }
5357
5358        if (reset_devices) {
5359                if (ctrl_info->max_outstanding_requests >
5360                        PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
5361                        ctrl_info->max_outstanding_requests =
5362                                        PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
5363        } else {
5364                if (ctrl_info->max_outstanding_requests >
5365                        PQI_MAX_OUTSTANDING_REQUESTS)
5366                        ctrl_info->max_outstanding_requests =
5367                                        PQI_MAX_OUTSTANDING_REQUESTS;
5368        }
5369
5370        pqi_calculate_io_resources(ctrl_info);
5371
5372        rc = pqi_alloc_error_buffer(ctrl_info);
5373        if (rc) {
5374                dev_err(&ctrl_info->pci_dev->dev,
5375                        "failed to allocate PQI error buffer\n");
5376                return rc;
5377        }
5378
5379        /*
5380         * If the function we are about to call succeeds, the
5381         * controller will transition from legacy SIS mode
5382         * into PQI mode.
5383         */
5384        rc = sis_init_base_struct_addr(ctrl_info);
5385        if (rc) {
5386                dev_err(&ctrl_info->pci_dev->dev,
5387                        "error initializing PQI mode\n");
5388                return rc;
5389        }
5390
5391        /* Wait for the controller to complete the SIS -> PQI transition. */
5392        rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5393        if (rc) {
5394                dev_err(&ctrl_info->pci_dev->dev,
5395                        "transition to PQI mode failed\n");
5396                return rc;
5397        }
5398
5399        /* From here on, we are running in PQI mode. */
5400        ctrl_info->pqi_mode_enabled = true;
5401        pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5402
5403        rc = pqi_alloc_admin_queues(ctrl_info);
5404        if (rc) {
5405                dev_err(&ctrl_info->pci_dev->dev,
5406                        "error allocating admin queues\n");
5407                return rc;
5408        }
5409
5410        rc = pqi_create_admin_queues(ctrl_info);
5411        if (rc) {
5412                dev_err(&ctrl_info->pci_dev->dev,
5413                        "error creating admin queues\n");
5414                return rc;
5415        }
5416
5417        rc = pqi_report_device_capability(ctrl_info);
5418        if (rc) {
5419                dev_err(&ctrl_info->pci_dev->dev,
5420                        "obtaining device capability failed\n");
5421                return rc;
5422        }
5423
5424        rc = pqi_validate_device_capability(ctrl_info);
5425        if (rc)
5426                return rc;
5427
5428        pqi_calculate_queue_resources(ctrl_info);
5429
5430        rc = pqi_enable_msix_interrupts(ctrl_info);
5431        if (rc)
5432                return rc;
5433
5434        if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5435                ctrl_info->max_msix_vectors =
5436                        ctrl_info->num_msix_vectors_enabled;
5437                pqi_calculate_queue_resources(ctrl_info);
5438        }
5439
5440        rc = pqi_alloc_io_resources(ctrl_info);
5441        if (rc)
5442                return rc;
5443
5444        rc = pqi_alloc_operational_queues(ctrl_info);
5445        if (rc)
5446                return rc;
5447
5448        pqi_init_operational_queues(ctrl_info);
5449
5450        rc = pqi_request_irqs(ctrl_info);
5451        if (rc)
5452                return rc;
5453
5454        pqi_irq_set_affinity_hint(ctrl_info);
5455
5456        rc = pqi_create_queues(ctrl_info);
5457        if (rc)
5458                return rc;
5459
5460        sis_enable_msix(ctrl_info);
5461
5462        rc = pqi_configure_events(ctrl_info);
5463        if (rc) {
5464                dev_err(&ctrl_info->pci_dev->dev,
5465                        "error configuring events\n");
5466                return rc;
5467        }
5468
5469        pqi_start_heartbeat_timer(ctrl_info);
5470
5471        ctrl_info->controller_online = true;
5472
5473        /* Register with the SCSI subsystem. */
5474        rc = pqi_register_scsi(ctrl_info);
5475        if (rc)
5476                return rc;
5477
5478        rc = pqi_get_ctrl_firmware_version(ctrl_info);
5479        if (rc) {
5480                dev_err(&ctrl_info->pci_dev->dev,
5481                        "error obtaining firmware version\n");
5482                return rc;
5483        }
5484
5485        rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5486        if (rc) {
5487                dev_err(&ctrl_info->pci_dev->dev,
5488                        "error updating host wellness\n");
5489                return rc;
5490        }
5491
5492        pqi_schedule_update_time_worker(ctrl_info);
5493
5494        pqi_scan_scsi_devices(ctrl_info);
5495
5496        return 0;
5497}
5498
5499static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5500{
5501        int rc;
5502        u64 mask;
5503
5504        rc = pci_enable_device(ctrl_info->pci_dev);
5505        if (rc) {
5506                dev_err(&ctrl_info->pci_dev->dev,
5507                        "failed to enable PCI device\n");
5508                return rc;
5509        }
5510
5511        if (sizeof(dma_addr_t) > 4)
5512                mask = DMA_BIT_MASK(64);
5513        else
5514                mask = DMA_BIT_MASK(32);
5515
5516        rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5517        if (rc) {
5518                dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5519                goto disable_device;
5520        }
5521
5522        rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5523        if (rc) {
5524                dev_err(&ctrl_info->pci_dev->dev,
5525                        "failed to obtain PCI resources\n");
5526                goto disable_device;
5527        }
5528
5529        ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5530                ctrl_info->pci_dev, 0),
5531                sizeof(struct pqi_ctrl_registers));
5532        if (!ctrl_info->iomem_base) {
5533                dev_err(&ctrl_info->pci_dev->dev,
5534                        "failed to map memory for controller registers\n");
5535                rc = -ENOMEM;
5536                goto release_regions;
5537        }
5538
5539        ctrl_info->registers = ctrl_info->iomem_base;
5540        ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5541
5542        /* Enable bus mastering. */
5543        pci_set_master(ctrl_info->pci_dev);
5544
5545        pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5546
5547        return 0;
5548
5549release_regions:
5550        pci_release_regions(ctrl_info->pci_dev);
5551disable_device:
5552        pci_disable_device(ctrl_info->pci_dev);
5553
5554        return rc;
5555}
5556
5557static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5558{
5559        iounmap(ctrl_info->iomem_base);
5560        pci_release_regions(ctrl_info->pci_dev);
5561        pci_disable_device(ctrl_info->pci_dev);
5562        pci_set_drvdata(ctrl_info->pci_dev, NULL);
5563}
5564
5565static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5566{
5567        struct pqi_ctrl_info *ctrl_info;
5568
5569        ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5570                        GFP_KERNEL, numa_node);
5571        if (!ctrl_info)
5572                return NULL;
5573
5574        mutex_init(&ctrl_info->scan_mutex);
5575
5576        INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5577        spin_lock_init(&ctrl_info->scsi_device_list_lock);
5578
5579        INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5580        atomic_set(&ctrl_info->num_interrupts, 0);
5581
5582        INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5583        INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5584
5585        sema_init(&ctrl_info->sync_request_sem,
5586                PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5587        sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5588
5589        ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5590        ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5591
5592        return ctrl_info;
5593}
5594
5595static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5596{
5597        kfree(ctrl_info);
5598}
5599
5600static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5601{
5602        pqi_irq_unset_affinity_hint(ctrl_info);
5603        pqi_free_irqs(ctrl_info);
5604        if (ctrl_info->num_msix_vectors_enabled)
5605                pci_disable_msix(ctrl_info->pci_dev);
5606}
5607
5608static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5609{
5610        pqi_stop_heartbeat_timer(ctrl_info);
5611        pqi_free_interrupts(ctrl_info);
5612        if (ctrl_info->queue_memory_base)
5613                dma_free_coherent(&ctrl_info->pci_dev->dev,
5614                        ctrl_info->queue_memory_length,
5615                        ctrl_info->queue_memory_base,
5616                        ctrl_info->queue_memory_base_dma_handle);
5617        if (ctrl_info->admin_queue_memory_base)
5618                dma_free_coherent(&ctrl_info->pci_dev->dev,
5619                        ctrl_info->admin_queue_memory_length,
5620                        ctrl_info->admin_queue_memory_base,
5621                        ctrl_info->admin_queue_memory_base_dma_handle);
5622        pqi_free_all_io_requests(ctrl_info);
5623        if (ctrl_info->error_buffer)
5624                dma_free_coherent(&ctrl_info->pci_dev->dev,
5625                        ctrl_info->error_buffer_length,
5626                        ctrl_info->error_buffer,
5627                        ctrl_info->error_buffer_dma_handle);
5628        if (ctrl_info->iomem_base)
5629                pqi_cleanup_pci_init(ctrl_info);
5630        pqi_free_ctrl_info(ctrl_info);
5631}
5632
5633static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5634{
5635        cancel_delayed_work_sync(&ctrl_info->rescan_work);
5636        cancel_delayed_work_sync(&ctrl_info->update_time_work);
5637        pqi_remove_all_scsi_devices(ctrl_info);
5638        pqi_unregister_scsi(ctrl_info);
5639        if (ctrl_info->pqi_mode_enabled)
5640                pqi_revert_to_sis_mode(ctrl_info);
5641        pqi_free_ctrl_resources(ctrl_info);
5642}
5643
5644static void pqi_print_ctrl_info(struct pci_dev *pdev,
5645        const struct pci_device_id *id)
5646{
5647        char *ctrl_description;
5648
5649        if (id->driver_data) {
5650                ctrl_description = (char *)id->driver_data;
5651        } else {
5652                switch (id->subvendor) {
5653                case PCI_VENDOR_ID_HP:
5654                        ctrl_description = hpe_branded_controller;
5655                        break;
5656                case PCI_VENDOR_ID_ADAPTEC2:
5657                default:
5658                        ctrl_description = microsemi_branded_controller;
5659                        break;
5660                }
5661        }
5662
5663        dev_info(&pdev->dev, "%s found\n", ctrl_description);
5664}
5665
5666static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5667{
5668        int rc;
5669        int node;
5670        struct pqi_ctrl_info *ctrl_info;
5671
5672        pqi_print_ctrl_info(pdev, id);
5673
5674        if (pqi_disable_device_id_wildcards &&
5675                id->subvendor == PCI_ANY_ID &&
5676                id->subdevice == PCI_ANY_ID) {
5677                dev_warn(&pdev->dev,
5678                        "controller not probed because device ID wildcards are disabled\n");
5679                return -ENODEV;
5680        }
5681
5682        if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5683                dev_warn(&pdev->dev,
5684                        "controller device ID matched using wildcards\n");
5685
5686        node = dev_to_node(&pdev->dev);
5687        if (node == NUMA_NO_NODE)
5688                set_dev_node(&pdev->dev, 0);
5689
5690        ctrl_info = pqi_alloc_ctrl_info(node);
5691        if (!ctrl_info) {
5692                dev_err(&pdev->dev,
5693                        "failed to allocate controller info block\n");
5694                return -ENOMEM;
5695        }
5696
5697        ctrl_info->pci_dev = pdev;
5698
5699        rc = pqi_pci_init(ctrl_info);
5700        if (rc)
5701                goto error;
5702
5703        rc = pqi_ctrl_init(ctrl_info);
5704        if (rc)
5705                goto error;
5706
5707        return 0;
5708
5709error:
5710        pqi_remove_ctrl(ctrl_info);
5711
5712        return rc;
5713}
5714
5715static void pqi_pci_remove(struct pci_dev *pdev)
5716{
5717        struct pqi_ctrl_info *ctrl_info;
5718
5719        ctrl_info = pci_get_drvdata(pdev);
5720        if (!ctrl_info)
5721                return;
5722
5723        pqi_remove_ctrl(ctrl_info);
5724}
5725
5726static void pqi_shutdown(struct pci_dev *pdev)
5727{
5728        int rc;
5729        struct pqi_ctrl_info *ctrl_info;
5730
5731        ctrl_info = pci_get_drvdata(pdev);
5732        if (!ctrl_info)
5733                goto error;
5734
5735        /*
5736         * Write all data in the controller's battery-backed cache to
5737         * storage.
5738         */
5739        rc = pqi_flush_cache(ctrl_info);
5740        if (rc == 0)
5741                return;
5742
5743error:
5744        dev_warn(&pdev->dev,
5745                "unable to flush controller cache\n");
5746}
5747
5748/* Define the PCI IDs for the controllers that we support. */
5749static const struct pci_device_id pqi_pci_id_table[] = {
5750        {
5751                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5752                               PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5753        },
5754        {
5755                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5756                               PCI_VENDOR_ID_HP, 0x0600)
5757        },
5758        {
5759                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5760                               PCI_VENDOR_ID_HP, 0x0601)
5761        },
5762        {
5763                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5764                               PCI_VENDOR_ID_HP, 0x0602)
5765        },
5766        {
5767                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5768                               PCI_VENDOR_ID_HP, 0x0603)
5769        },
5770        {
5771                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5772                               PCI_VENDOR_ID_HP, 0x0650)
5773        },
5774        {
5775                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5776                               PCI_VENDOR_ID_HP, 0x0651)
5777        },
5778        {
5779                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5780                               PCI_VENDOR_ID_HP, 0x0652)
5781        },
5782        {
5783                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5784                               PCI_VENDOR_ID_HP, 0x0653)
5785        },
5786        {
5787                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5788                               PCI_VENDOR_ID_HP, 0x0654)
5789        },
5790        {
5791                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5792                               PCI_VENDOR_ID_HP, 0x0655)
5793        },
5794        {
5795                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5796                               PCI_VENDOR_ID_HP, 0x0700)
5797        },
5798        {
5799                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5800                               PCI_VENDOR_ID_HP, 0x0701)
5801        },
5802        {
5803                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5804                               PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5805        },
5806        {
5807                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5808                               PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5809        },
5810        {
5811                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5812                               PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5813        },
5814        {
5815                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5816                               PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5817        },
5818        {
5819                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5820                               PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5821        },
5822        {
5823                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5824                               PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5825        },
5826        {
5827                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5828                               PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5829        },
5830        {
5831                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5832                               PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5833        },
5834        {
5835                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5836                               PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5837        },
5838        {
5839                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5840                               PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5841        },
5842        {
5843                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5844                               PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5845        },
5846        {
5847                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5848                               PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5849        },
5850        {
5851                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5852                               PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5853        },
5854        {
5855                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5856                               PCI_VENDOR_ID_HP, 0x1001)
5857        },
5858        {
5859                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5860                               PCI_VENDOR_ID_HP, 0x1100)
5861        },
5862        {
5863                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5864                               PCI_VENDOR_ID_HP, 0x1101)
5865        },
5866        {
5867                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5868                               PCI_VENDOR_ID_HP, 0x1102)
5869        },
5870        {
5871                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5872                               PCI_VENDOR_ID_HP, 0x1150)
5873        },
5874        {
5875                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5876                               PCI_ANY_ID, PCI_ANY_ID)
5877        },
5878        { 0 }
5879};
5880
5881MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5882
5883static struct pci_driver pqi_pci_driver = {
5884        .name = DRIVER_NAME_SHORT,
5885        .id_table = pqi_pci_id_table,
5886        .probe = pqi_pci_probe,
5887        .remove = pqi_pci_remove,
5888        .shutdown = pqi_shutdown,
5889};
5890
5891static int __init pqi_init(void)
5892{
5893        int rc;
5894
5895        pr_info(DRIVER_NAME "\n");
5896
5897        pqi_sas_transport_template =
5898                sas_attach_transport(&pqi_sas_transport_functions);
5899        if (!pqi_sas_transport_template)
5900                return -ENODEV;
5901
5902        rc = pci_register_driver(&pqi_pci_driver);
5903        if (rc)
5904                sas_release_transport(pqi_sas_transport_template);
5905
5906        return rc;
5907}
5908
5909static void __exit pqi_cleanup(void)
5910{
5911        pci_unregister_driver(&pqi_pci_driver);
5912        sas_release_transport(pqi_sas_transport_template);
5913}
5914
5915module_init(pqi_init);
5916module_exit(pqi_cleanup);
5917
5918static void __attribute__((unused)) verify_structures(void)
5919{
5920        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5921                sis_host_to_ctrl_doorbell) != 0x20);
5922        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5923                sis_interrupt_mask) != 0x34);
5924        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5925                sis_ctrl_to_host_doorbell) != 0x9c);
5926        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5927                sis_ctrl_to_host_doorbell_clear) != 0xa0);
5928        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5929                sis_driver_scratch) != 0xb0);
5930        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5931                sis_firmware_status) != 0xbc);
5932        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5933                sis_mailbox) != 0x1000);
5934        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5935                pqi_registers) != 0x4000);
5936
5937        BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5938                iu_type) != 0x0);
5939        BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5940                iu_length) != 0x2);
5941        BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5942                response_queue_id) != 0x4);
5943        BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5944                work_area) != 0x6);
5945        BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5946
5947        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5948                status) != 0x0);
5949        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5950                service_response) != 0x1);
5951        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5952                data_present) != 0x2);
5953        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5954                reserved) != 0x3);
5955        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5956                residual_count) != 0x4);
5957        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5958                data_length) != 0x8);
5959        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5960                reserved1) != 0xa);
5961        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5962                data) != 0xc);
5963        BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5964
5965        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5966                data_in_result) != 0x0);
5967        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5968                data_out_result) != 0x1);
5969        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5970                reserved) != 0x2);
5971        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5972                status) != 0x5);
5973        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5974                status_qualifier) != 0x6);
5975        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5976                sense_data_length) != 0x8);
5977        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5978                response_data_length) != 0xa);
5979        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5980                data_in_transferred) != 0xc);
5981        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5982                data_out_transferred) != 0x10);
5983        BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5984                data) != 0x14);
5985        BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5986
5987        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5988                signature) != 0x0);
5989        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5990                function_and_status_code) != 0x8);
5991        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5992                max_admin_iq_elements) != 0x10);
5993        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5994                max_admin_oq_elements) != 0x11);
5995        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5996                admin_iq_element_length) != 0x12);
5997        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5998                admin_oq_element_length) != 0x13);
5999        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6000                max_reset_timeout) != 0x14);
6001        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6002                legacy_intx_status) != 0x18);
6003        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6004                legacy_intx_mask_set) != 0x1c);
6005        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6006                legacy_intx_mask_clear) != 0x20);
6007        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6008                device_status) != 0x40);
6009        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6010                admin_iq_pi_offset) != 0x48);
6011        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6012                admin_oq_ci_offset) != 0x50);
6013        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6014                admin_iq_element_array_addr) != 0x58);
6015        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6016                admin_oq_element_array_addr) != 0x60);
6017        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6018                admin_iq_ci_addr) != 0x68);
6019        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6020                admin_oq_pi_addr) != 0x70);
6021        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6022                admin_iq_num_elements) != 0x78);
6023        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6024                admin_oq_num_elements) != 0x79);
6025        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6026                admin_queue_int_msg_num) != 0x7a);
6027        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6028                device_error) != 0x80);
6029        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6030                error_details) != 0x88);
6031        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6032                device_reset) != 0x90);
6033        BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6034                power_action) != 0x94);
6035        BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6036
6037        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6038                header.iu_type) != 0);
6039        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6040                header.iu_length) != 2);
6041        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6042                header.work_area) != 6);
6043        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6044                request_id) != 8);
6045        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6046                function_code) != 10);
6047        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6048                data.report_device_capability.buffer_length) != 44);
6049        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6050                data.report_device_capability.sg_descriptor) != 48);
6051        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6052                data.create_operational_iq.queue_id) != 12);
6053        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6054                data.create_operational_iq.element_array_addr) != 16);
6055        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6056                data.create_operational_iq.ci_addr) != 24);
6057        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6058                data.create_operational_iq.num_elements) != 32);
6059        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6060                data.create_operational_iq.element_length) != 34);
6061        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6062                data.create_operational_iq.queue_protocol) != 36);
6063        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6064                data.create_operational_oq.queue_id) != 12);
6065        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6066                data.create_operational_oq.element_array_addr) != 16);
6067        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6068                data.create_operational_oq.pi_addr) != 24);
6069        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6070                data.create_operational_oq.num_elements) != 32);
6071        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6072                data.create_operational_oq.element_length) != 34);
6073        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6074                data.create_operational_oq.queue_protocol) != 36);
6075        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6076                data.create_operational_oq.int_msg_num) != 40);
6077        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6078                data.create_operational_oq.coalescing_count) != 42);
6079        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6080                data.create_operational_oq.min_coalescing_time) != 44);
6081        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6082                data.create_operational_oq.max_coalescing_time) != 48);
6083        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6084                data.delete_operational_queue.queue_id) != 12);
6085        BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6086        BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6087                data.create_operational_iq) != 64 - 11);
6088        BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6089                data.create_operational_oq) != 64 - 11);
6090        BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6091                data.delete_operational_queue) != 64 - 11);
6092
6093        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6094                header.iu_type) != 0);
6095        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6096                header.iu_length) != 2);
6097        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6098                header.work_area) != 6);
6099        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6100                request_id) != 8);
6101        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6102                function_code) != 10);
6103        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6104                status) != 11);
6105        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6106                data.create_operational_iq.status_descriptor) != 12);
6107        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6108                data.create_operational_iq.iq_pi_offset) != 16);
6109        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6110                data.create_operational_oq.status_descriptor) != 12);
6111        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6112                data.create_operational_oq.oq_ci_offset) != 16);
6113        BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6114
6115        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6116                header.iu_type) != 0);
6117        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6118                header.iu_length) != 2);
6119        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6120                header.response_queue_id) != 4);
6121        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6122                header.work_area) != 6);
6123        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6124                request_id) != 8);
6125        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6126                nexus_id) != 10);
6127        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6128                buffer_length) != 12);
6129        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6130                lun_number) != 16);
6131        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6132                protocol_specific) != 24);
6133        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6134                error_index) != 27);
6135        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6136                cdb) != 32);
6137        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6138                sg_descriptors) != 64);
6139        BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6140                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6141
6142        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6143                header.iu_type) != 0);
6144        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6145                header.iu_length) != 2);
6146        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6147                header.response_queue_id) != 4);
6148        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6149                header.work_area) != 6);
6150        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6151                request_id) != 8);
6152        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6153                nexus_id) != 12);
6154        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6155                buffer_length) != 16);
6156        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6157                data_encryption_key_index) != 22);
6158        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6159                encrypt_tweak_lower) != 24);
6160        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6161                encrypt_tweak_upper) != 28);
6162        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6163                cdb) != 32);
6164        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6165                error_index) != 48);
6166        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6167                num_sg_descriptors) != 50);
6168        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6169                cdb_length) != 51);
6170        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6171                lun_number) != 52);
6172        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6173                sg_descriptors) != 64);
6174        BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6175                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6176
6177        BUILD_BUG_ON(offsetof(struct pqi_io_response,
6178                header.iu_type) != 0);
6179        BUILD_BUG_ON(offsetof(struct pqi_io_response,
6180                header.iu_length) != 2);
6181        BUILD_BUG_ON(offsetof(struct pqi_io_response,
6182                request_id) != 8);
6183        BUILD_BUG_ON(offsetof(struct pqi_io_response,
6184                error_index) != 10);
6185
6186        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6187                header.iu_type) != 0);
6188        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6189                header.iu_length) != 2);
6190        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6191                header.response_queue_id) != 4);
6192        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6193                request_id) != 8);
6194        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6195                data.report_event_configuration.buffer_length) != 12);
6196        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6197                data.report_event_configuration.sg_descriptors) != 16);
6198        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6199                data.set_event_configuration.global_event_oq_id) != 10);
6200        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6201                data.set_event_configuration.buffer_length) != 12);
6202        BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6203                data.set_event_configuration.sg_descriptors) != 16);
6204
6205        BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6206                max_inbound_iu_length) != 6);
6207        BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6208                max_outbound_iu_length) != 14);
6209        BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6210
6211        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6212                data_length) != 0);
6213        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6214                iq_arbitration_priority_support_bitmask) != 8);
6215        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6216                maximum_aw_a) != 9);
6217        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6218                maximum_aw_b) != 10);
6219        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6220                maximum_aw_c) != 11);
6221        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6222                max_inbound_queues) != 16);
6223        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6224                max_elements_per_iq) != 18);
6225        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6226                max_iq_element_length) != 24);
6227        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6228                min_iq_element_length) != 26);
6229        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6230                max_outbound_queues) != 30);
6231        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6232                max_elements_per_oq) != 32);
6233        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6234                intr_coalescing_time_granularity) != 34);
6235        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6236                max_oq_element_length) != 36);
6237        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6238                min_oq_element_length) != 38);
6239        BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6240                iu_layer_descriptors) != 64);
6241        BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6242
6243        BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6244                event_type) != 0);
6245        BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6246                oq_id) != 2);
6247        BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6248
6249        BUILD_BUG_ON(offsetof(struct pqi_event_config,
6250                num_event_descriptors) != 2);
6251        BUILD_BUG_ON(offsetof(struct pqi_event_config,
6252                descriptors) != 4);
6253
6254        BUILD_BUG_ON(offsetof(struct pqi_event_response,
6255                header.iu_type) != 0);
6256        BUILD_BUG_ON(offsetof(struct pqi_event_response,
6257                header.iu_length) != 2);
6258        BUILD_BUG_ON(offsetof(struct pqi_event_response,
6259                event_type) != 8);
6260        BUILD_BUG_ON(offsetof(struct pqi_event_response,
6261                event_id) != 10);
6262        BUILD_BUG_ON(offsetof(struct pqi_event_response,
6263                additional_event_id) != 12);
6264        BUILD_BUG_ON(offsetof(struct pqi_event_response,
6265                data) != 16);
6266        BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6267
6268        BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6269                header.iu_type) != 0);
6270        BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6271                header.iu_length) != 2);
6272        BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6273                event_type) != 8);
6274        BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6275                event_id) != 10);
6276        BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6277                additional_event_id) != 12);
6278        BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6279
6280        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6281                header.iu_type) != 0);
6282        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6283                header.iu_length) != 2);
6284        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6285                request_id) != 8);
6286        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6287                nexus_id) != 10);
6288        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6289                lun_number) != 16);
6290        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6291                protocol_specific) != 24);
6292        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6293                outbound_queue_id_to_manage) != 26);
6294        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6295                request_id_to_manage) != 28);
6296        BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6297                task_management_function) != 30);
6298        BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6299
6300        BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6301                header.iu_type) != 0);
6302        BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6303                header.iu_length) != 2);
6304        BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6305                request_id) != 8);
6306        BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6307                nexus_id) != 10);
6308        BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6309                additional_response_info) != 12);
6310        BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6311                response_code) != 15);
6312        BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6313
6314        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6315                configured_logical_drive_count) != 0);
6316        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6317                configuration_signature) != 1);
6318        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6319                firmware_version) != 5);
6320        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6321                extended_logical_unit_count) != 154);
6322        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6323                firmware_build_number) != 190);
6324        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6325                controller_mode) != 292);
6326
6327        BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6328        BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6329        BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6330                PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6331        BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6332                PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6333        BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6334        BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6335                PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6336        BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6337        BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6338                PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6339
6340        BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6341        BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
6342                PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6343}
6344