linux/drivers/scsi/hpsa.c
<<
>>
Prefs
   1/*
   2 *    Disk Array driver for HP Smart Array SAS controllers
   3 *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
   4 *    Copyright 2016 Microsemi Corporation
   5 *    Copyright 2014-2015 PMC-Sierra, Inc.
   6 *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
   7 *
   8 *    This program is free software; you can redistribute it and/or modify
   9 *    it under the terms of the GNU General Public License as published by
  10 *    the Free Software Foundation; version 2 of the License.
  11 *
  12 *    This program is distributed in the hope that it will be useful,
  13 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  15 *    NON INFRINGEMENT.  See the GNU General Public License for more details.
  16 *
  17 *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
  18 *
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/interrupt.h>
  23#include <linux/types.h>
  24#include <linux/pci.h>
  25#include <linux/kernel.h>
  26#include <linux/slab.h>
  27#include <linux/delay.h>
  28#include <linux/fs.h>
  29#include <linux/timer.h>
  30#include <linux/init.h>
  31#include <linux/spinlock.h>
  32#include <linux/compat.h>
  33#include <linux/blktrace_api.h>
  34#include <linux/uaccess.h>
  35#include <linux/io.h>
  36#include <linux/dma-mapping.h>
  37#include <linux/completion.h>
  38#include <linux/moduleparam.h>
  39#include <scsi/scsi.h>
  40#include <scsi/scsi_cmnd.h>
  41#include <scsi/scsi_device.h>
  42#include <scsi/scsi_host.h>
  43#include <scsi/scsi_tcq.h>
  44#include <scsi/scsi_eh.h>
  45#include <scsi/scsi_transport_sas.h>
  46#include <scsi/scsi_dbg.h>
  47#include <linux/cciss_ioctl.h>
  48#include <linux/string.h>
  49#include <linux/bitmap.h>
  50#include <linux/atomic.h>
  51#include <linux/jiffies.h>
  52#include <linux/percpu-defs.h>
  53#include <linux/percpu.h>
  54#include <asm/unaligned.h>
  55#include <asm/div64.h>
  56#include "hpsa_cmd.h"
  57#include "hpsa.h"
  58
  59/*
  60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
  61 * with an optional trailing '-' followed by a byte value (0-255).
  62 */
  63#define HPSA_DRIVER_VERSION "3.4.20-200"
  64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
  65#define HPSA "hpsa"
  66
  67/* How long to wait for CISS doorbell communication */
  68#define CLEAR_EVENT_WAIT_INTERVAL 20    /* ms for each msleep() call */
  69#define MODE_CHANGE_WAIT_INTERVAL 10    /* ms for each msleep() call */
  70#define MAX_CLEAR_EVENT_WAIT 30000      /* times 20 ms = 600 s */
  71#define MAX_MODE_CHANGE_WAIT 2000       /* times 10 ms = 20 s */
  72#define MAX_IOCTL_CONFIG_WAIT 1000
  73
  74/*define how many times we will try a command because of bus resets */
  75#define MAX_CMD_RETRIES 3
  76/* How long to wait before giving up on a command */
  77#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
  78
  79/* Embedded module documentation macros - see modules.h */
  80MODULE_AUTHOR("Hewlett-Packard Company");
  81MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
  82        HPSA_DRIVER_VERSION);
  83MODULE_VERSION(HPSA_DRIVER_VERSION);
  84MODULE_LICENSE("GPL");
  85MODULE_ALIAS("cciss");
  86
  87static int hpsa_simple_mode;
  88module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
  89MODULE_PARM_DESC(hpsa_simple_mode,
  90        "Use 'simple mode' rather than 'performant mode'");
  91
  92/* define the PCI info for the cards we can control */
  93static const struct pci_device_id hpsa_pci_device_id[] = {
  94        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
  95        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
  96        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
  97        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
  98        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
  99        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
 100        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
 101        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
 102        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
 103        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
 104        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
 105        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
 106        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
 107        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
 108        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
 109        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103c, 0x1920},
 110        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
 111        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
 112        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
 113        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
 114        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103c, 0x1925},
 115        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
 116        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
 117        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
 118        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
 119        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
 120        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
 121        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
 122        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
 123        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
 124        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
 125        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
 126        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
 127        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
 128        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
 129        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
 130        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
 131        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
 132        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
 133        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
 134        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
 135        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
 136        {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
 137        {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
 138        {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
 139        {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
 140        {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
 141        {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
 142        {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
 143        {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
 144        {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
 145        {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
 146        {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
 147        {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
 148                PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 149        {PCI_VENDOR_ID_COMPAQ,     PCI_ANY_ID,  PCI_ANY_ID, PCI_ANY_ID,
 150                PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 151        {0,}
 152};
 153
 154MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
 155
 156/*  board_id = Subsystem Device ID & Vendor ID
 157 *  product = Marketing Name for the board
 158 *  access = Address of the struct of function pointers
 159 */
 160static struct board_type products[] = {
 161        {0x40700E11, "Smart Array 5300", &SA5A_access},
 162        {0x40800E11, "Smart Array 5i", &SA5B_access},
 163        {0x40820E11, "Smart Array 532", &SA5B_access},
 164        {0x40830E11, "Smart Array 5312", &SA5B_access},
 165        {0x409A0E11, "Smart Array 641", &SA5A_access},
 166        {0x409B0E11, "Smart Array 642", &SA5A_access},
 167        {0x409C0E11, "Smart Array 6400", &SA5A_access},
 168        {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
 169        {0x40910E11, "Smart Array 6i", &SA5A_access},
 170        {0x3225103C, "Smart Array P600", &SA5A_access},
 171        {0x3223103C, "Smart Array P800", &SA5A_access},
 172        {0x3234103C, "Smart Array P400", &SA5A_access},
 173        {0x3235103C, "Smart Array P400i", &SA5A_access},
 174        {0x3211103C, "Smart Array E200i", &SA5A_access},
 175        {0x3212103C, "Smart Array E200", &SA5A_access},
 176        {0x3213103C, "Smart Array E200i", &SA5A_access},
 177        {0x3214103C, "Smart Array E200i", &SA5A_access},
 178        {0x3215103C, "Smart Array E200i", &SA5A_access},
 179        {0x3237103C, "Smart Array E500", &SA5A_access},
 180        {0x323D103C, "Smart Array P700m", &SA5A_access},
 181        {0x3241103C, "Smart Array P212", &SA5_access},
 182        {0x3243103C, "Smart Array P410", &SA5_access},
 183        {0x3245103C, "Smart Array P410i", &SA5_access},
 184        {0x3247103C, "Smart Array P411", &SA5_access},
 185        {0x3249103C, "Smart Array P812", &SA5_access},
 186        {0x324A103C, "Smart Array P712m", &SA5_access},
 187        {0x324B103C, "Smart Array P711m", &SA5_access},
 188        {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
 189        {0x3350103C, "Smart Array P222", &SA5_access},
 190        {0x3351103C, "Smart Array P420", &SA5_access},
 191        {0x3352103C, "Smart Array P421", &SA5_access},
 192        {0x3353103C, "Smart Array P822", &SA5_access},
 193        {0x3354103C, "Smart Array P420i", &SA5_access},
 194        {0x3355103C, "Smart Array P220i", &SA5_access},
 195        {0x3356103C, "Smart Array P721m", &SA5_access},
 196        {0x1920103C, "Smart Array P430i", &SA5_access},
 197        {0x1921103C, "Smart Array P830i", &SA5_access},
 198        {0x1922103C, "Smart Array P430", &SA5_access},
 199        {0x1923103C, "Smart Array P431", &SA5_access},
 200        {0x1924103C, "Smart Array P830", &SA5_access},
 201        {0x1925103C, "Smart Array P831", &SA5_access},
 202        {0x1926103C, "Smart Array P731m", &SA5_access},
 203        {0x1928103C, "Smart Array P230i", &SA5_access},
 204        {0x1929103C, "Smart Array P530", &SA5_access},
 205        {0x21BD103C, "Smart Array P244br", &SA5_access},
 206        {0x21BE103C, "Smart Array P741m", &SA5_access},
 207        {0x21BF103C, "Smart HBA H240ar", &SA5_access},
 208        {0x21C0103C, "Smart Array P440ar", &SA5_access},
 209        {0x21C1103C, "Smart Array P840ar", &SA5_access},
 210        {0x21C2103C, "Smart Array P440", &SA5_access},
 211        {0x21C3103C, "Smart Array P441", &SA5_access},
 212        {0x21C4103C, "Smart Array", &SA5_access},
 213        {0x21C5103C, "Smart Array P841", &SA5_access},
 214        {0x21C6103C, "Smart HBA H244br", &SA5_access},
 215        {0x21C7103C, "Smart HBA H240", &SA5_access},
 216        {0x21C8103C, "Smart HBA H241", &SA5_access},
 217        {0x21C9103C, "Smart Array", &SA5_access},
 218        {0x21CA103C, "Smart Array P246br", &SA5_access},
 219        {0x21CB103C, "Smart Array P840", &SA5_access},
 220        {0x21CC103C, "Smart Array", &SA5_access},
 221        {0x21CD103C, "Smart Array", &SA5_access},
 222        {0x21CE103C, "Smart HBA", &SA5_access},
 223        {0x05809005, "SmartHBA-SA", &SA5_access},
 224        {0x05819005, "SmartHBA-SA 8i", &SA5_access},
 225        {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
 226        {0x05839005, "SmartHBA-SA 8e", &SA5_access},
 227        {0x05849005, "SmartHBA-SA 16i", &SA5_access},
 228        {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
 229        {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
 230        {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
 231        {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
 232        {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
 233        {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
 234        {0xFFFF103C, "Unknown Smart Array", &SA5_access},
 235};
 236
 237static struct scsi_transport_template *hpsa_sas_transport_template;
 238static int hpsa_add_sas_host(struct ctlr_info *h);
 239static void hpsa_delete_sas_host(struct ctlr_info *h);
 240static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
 241                        struct hpsa_scsi_dev_t *device);
 242static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
 243static struct hpsa_scsi_dev_t
 244        *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
 245                struct sas_rphy *rphy);
 246
 247#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
 248static const struct scsi_cmnd hpsa_cmd_busy;
 249#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
 250static const struct scsi_cmnd hpsa_cmd_idle;
 251static int number_of_controllers;
 252
 253static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
 254static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
 255static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
 256                      void __user *arg);
 257static int hpsa_passthru_ioctl(struct ctlr_info *h,
 258                               IOCTL_Command_struct *iocommand);
 259static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
 260                                   BIG_IOCTL_Command_struct *ioc);
 261
 262#ifdef CONFIG_COMPAT
 263static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
 264        void __user *arg);
 265#endif
 266
 267static void cmd_free(struct ctlr_info *h, struct CommandList *c);
 268static struct CommandList *cmd_alloc(struct ctlr_info *h);
 269static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
 270static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
 271                                            struct scsi_cmnd *scmd);
 272static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
 273        void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
 274        int cmd_type);
 275static void hpsa_free_cmd_pool(struct ctlr_info *h);
 276#define VPD_PAGE (1 << 8)
 277#define HPSA_SIMPLE_ERROR_BITS 0x03
 278
 279static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 280static void hpsa_scan_start(struct Scsi_Host *);
 281static int hpsa_scan_finished(struct Scsi_Host *sh,
 282        unsigned long elapsed_time);
 283static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
 284
 285static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
 286static int hpsa_slave_alloc(struct scsi_device *sdev);
 287static int hpsa_slave_configure(struct scsi_device *sdev);
 288static void hpsa_slave_destroy(struct scsi_device *sdev);
 289
 290static void hpsa_update_scsi_devices(struct ctlr_info *h);
 291static int check_for_unit_attention(struct ctlr_info *h,
 292        struct CommandList *c);
 293static void check_ioctl_unit_attention(struct ctlr_info *h,
 294        struct CommandList *c);
 295/* performant mode helper functions */
 296static void calc_bucket_map(int *bucket, int num_buckets,
 297        int nsgs, int min_blocks, u32 *bucket_map);
 298static void hpsa_free_performant_mode(struct ctlr_info *h);
 299static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
 300static inline u32 next_command(struct ctlr_info *h, u8 q);
 301static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
 302                               u32 *cfg_base_addr, u64 *cfg_base_addr_index,
 303                               u64 *cfg_offset);
 304static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 305                                    unsigned long *memory_bar);
 306static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
 307                                bool *legacy_board);
 308static int wait_for_device_to_become_ready(struct ctlr_info *h,
 309                                           unsigned char lunaddr[],
 310                                           int reply_queue);
 311static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
 312                                     int wait_for_ready);
 313static inline void finish_cmd(struct CommandList *c);
 314static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
 315#define BOARD_NOT_READY 0
 316#define BOARD_READY 1
 317static void hpsa_drain_accel_commands(struct ctlr_info *h);
 318static void hpsa_flush_cache(struct ctlr_info *h);
 319static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
 320        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
 321        u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
 322static void hpsa_command_resubmit_worker(struct work_struct *work);
 323static u32 lockup_detected(struct ctlr_info *h);
 324static int detect_controller_lockup(struct ctlr_info *h);
 325static void hpsa_disable_rld_caching(struct ctlr_info *h);
 326static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
 327        struct ReportExtendedLUNdata *buf, int bufsize);
 328static bool hpsa_vpd_page_supported(struct ctlr_info *h,
 329        unsigned char scsi3addr[], u8 page);
 330static int hpsa_luns_changed(struct ctlr_info *h);
 331static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
 332                               struct hpsa_scsi_dev_t *dev,
 333                               unsigned char *scsi3addr);
 334
 335static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 336{
 337        unsigned long *priv = shost_priv(sdev->host);
 338        return (struct ctlr_info *) *priv;
 339}
 340
 341static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
 342{
 343        unsigned long *priv = shost_priv(sh);
 344        return (struct ctlr_info *) *priv;
 345}
 346
 347static inline bool hpsa_is_cmd_idle(struct CommandList *c)
 348{
 349        return c->scsi_cmd == SCSI_CMD_IDLE;
 350}
 351
 352/* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
 353static void decode_sense_data(const u8 *sense_data, int sense_data_len,
 354                        u8 *sense_key, u8 *asc, u8 *ascq)
 355{
 356        struct scsi_sense_hdr sshdr;
 357        bool rc;
 358
 359        *sense_key = -1;
 360        *asc = -1;
 361        *ascq = -1;
 362
 363        if (sense_data_len < 1)
 364                return;
 365
 366        rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
 367        if (rc) {
 368                *sense_key = sshdr.sense_key;
 369                *asc = sshdr.asc;
 370                *ascq = sshdr.ascq;
 371        }
 372}
 373
 374static int check_for_unit_attention(struct ctlr_info *h,
 375        struct CommandList *c)
 376{
 377        u8 sense_key, asc, ascq;
 378        int sense_len;
 379
 380        if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
 381                sense_len = sizeof(c->err_info->SenseInfo);
 382        else
 383                sense_len = c->err_info->SenseLen;
 384
 385        decode_sense_data(c->err_info->SenseInfo, sense_len,
 386                                &sense_key, &asc, &ascq);
 387        if (sense_key != UNIT_ATTENTION || asc == 0xff)
 388                return 0;
 389
 390        switch (asc) {
 391        case STATE_CHANGED:
 392                dev_warn(&h->pdev->dev,
 393                        "%s: a state change detected, command retried\n",
 394                        h->devname);
 395                break;
 396        case LUN_FAILED:
 397                dev_warn(&h->pdev->dev,
 398                        "%s: LUN failure detected\n", h->devname);
 399                break;
 400        case REPORT_LUNS_CHANGED:
 401                dev_warn(&h->pdev->dev,
 402                        "%s: report LUN data changed\n", h->devname);
 403        /*
 404         * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
 405         * target (array) devices.
 406         */
 407                break;
 408        case POWER_OR_RESET:
 409                dev_warn(&h->pdev->dev,
 410                        "%s: a power on or device reset detected\n",
 411                        h->devname);
 412                break;
 413        case UNIT_ATTENTION_CLEARED:
 414                dev_warn(&h->pdev->dev,
 415                        "%s: unit attention cleared by another initiator\n",
 416                        h->devname);
 417                break;
 418        default:
 419                dev_warn(&h->pdev->dev,
 420                        "%s: unknown unit attention detected\n",
 421                        h->devname);
 422                break;
 423        }
 424        return 1;
 425}
 426
 427static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
 428{
 429        if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
 430                (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
 431                 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
 432                return 0;
 433        dev_warn(&h->pdev->dev, HPSA "device busy");
 434        return 1;
 435}
 436
 437static u32 lockup_detected(struct ctlr_info *h);
 438static ssize_t host_show_lockup_detected(struct device *dev,
 439                struct device_attribute *attr, char *buf)
 440{
 441        int ld;
 442        struct ctlr_info *h;
 443        struct Scsi_Host *shost = class_to_shost(dev);
 444
 445        h = shost_to_hba(shost);
 446        ld = lockup_detected(h);
 447
 448        return sprintf(buf, "ld=%d\n", ld);
 449}
 450
 451static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
 452                                         struct device_attribute *attr,
 453                                         const char *buf, size_t count)
 454{
 455        int status, len;
 456        struct ctlr_info *h;
 457        struct Scsi_Host *shost = class_to_shost(dev);
 458        char tmpbuf[10];
 459
 460        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
 461                return -EACCES;
 462        len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
 463        strncpy(tmpbuf, buf, len);
 464        tmpbuf[len] = '\0';
 465        if (sscanf(tmpbuf, "%d", &status) != 1)
 466                return -EINVAL;
 467        h = shost_to_hba(shost);
 468        h->acciopath_status = !!status;
 469        dev_warn(&h->pdev->dev,
 470                "hpsa: HP SSD Smart Path %s via sysfs update.\n",
 471                h->acciopath_status ? "enabled" : "disabled");
 472        return count;
 473}
 474
 475static ssize_t host_store_raid_offload_debug(struct device *dev,
 476                                         struct device_attribute *attr,
 477                                         const char *buf, size_t count)
 478{
 479        int debug_level, len;
 480        struct ctlr_info *h;
 481        struct Scsi_Host *shost = class_to_shost(dev);
 482        char tmpbuf[10];
 483
 484        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
 485                return -EACCES;
 486        len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
 487        strncpy(tmpbuf, buf, len);
 488        tmpbuf[len] = '\0';
 489        if (sscanf(tmpbuf, "%d", &debug_level) != 1)
 490                return -EINVAL;
 491        if (debug_level < 0)
 492                debug_level = 0;
 493        h = shost_to_hba(shost);
 494        h->raid_offload_debug = debug_level;
 495        dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
 496                h->raid_offload_debug);
 497        return count;
 498}
 499
 500static ssize_t host_store_rescan(struct device *dev,
 501                                 struct device_attribute *attr,
 502                                 const char *buf, size_t count)
 503{
 504        struct ctlr_info *h;
 505        struct Scsi_Host *shost = class_to_shost(dev);
 506        h = shost_to_hba(shost);
 507        hpsa_scan_start(h->scsi_host);
 508        return count;
 509}
 510
 511static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
 512{
 513        device->offload_enabled = 0;
 514        device->offload_to_be_enabled = 0;
 515}
 516
 517static ssize_t host_show_firmware_revision(struct device *dev,
 518             struct device_attribute *attr, char *buf)
 519{
 520        struct ctlr_info *h;
 521        struct Scsi_Host *shost = class_to_shost(dev);
 522        unsigned char *fwrev;
 523
 524        h = shost_to_hba(shost);
 525        if (!h->hba_inquiry_data)
 526                return 0;
 527        fwrev = &h->hba_inquiry_data[32];
 528        return snprintf(buf, 20, "%c%c%c%c\n",
 529                fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
 530}
 531
 532static ssize_t host_show_commands_outstanding(struct device *dev,
 533             struct device_attribute *attr, char *buf)
 534{
 535        struct Scsi_Host *shost = class_to_shost(dev);
 536        struct ctlr_info *h = shost_to_hba(shost);
 537
 538        return snprintf(buf, 20, "%d\n",
 539                        atomic_read(&h->commands_outstanding));
 540}
 541
 542static ssize_t host_show_transport_mode(struct device *dev,
 543        struct device_attribute *attr, char *buf)
 544{
 545        struct ctlr_info *h;
 546        struct Scsi_Host *shost = class_to_shost(dev);
 547
 548        h = shost_to_hba(shost);
 549        return snprintf(buf, 20, "%s\n",
 550                h->transMethod & CFGTBL_Trans_Performant ?
 551                        "performant" : "simple");
 552}
 553
 554static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
 555        struct device_attribute *attr, char *buf)
 556{
 557        struct ctlr_info *h;
 558        struct Scsi_Host *shost = class_to_shost(dev);
 559
 560        h = shost_to_hba(shost);
 561        return snprintf(buf, 30, "HP SSD Smart Path %s\n",
 562                (h->acciopath_status == 1) ?  "enabled" : "disabled");
 563}
 564
 565/* List of controllers which cannot be hard reset on kexec with reset_devices */
 566static u32 unresettable_controller[] = {
 567        0x324a103C, /* Smart Array P712m */
 568        0x324b103C, /* Smart Array P711m */
 569        0x3223103C, /* Smart Array P800 */
 570        0x3234103C, /* Smart Array P400 */
 571        0x3235103C, /* Smart Array P400i */
 572        0x3211103C, /* Smart Array E200i */
 573        0x3212103C, /* Smart Array E200 */
 574        0x3213103C, /* Smart Array E200i */
 575        0x3214103C, /* Smart Array E200i */
 576        0x3215103C, /* Smart Array E200i */
 577        0x3237103C, /* Smart Array E500 */
 578        0x323D103C, /* Smart Array P700m */
 579        0x40800E11, /* Smart Array 5i */
 580        0x409C0E11, /* Smart Array 6400 */
 581        0x409D0E11, /* Smart Array 6400 EM */
 582        0x40700E11, /* Smart Array 5300 */
 583        0x40820E11, /* Smart Array 532 */
 584        0x40830E11, /* Smart Array 5312 */
 585        0x409A0E11, /* Smart Array 641 */
 586        0x409B0E11, /* Smart Array 642 */
 587        0x40910E11, /* Smart Array 6i */
 588};
 589
 590/* List of controllers which cannot even be soft reset */
 591static u32 soft_unresettable_controller[] = {
 592        0x40800E11, /* Smart Array 5i */
 593        0x40700E11, /* Smart Array 5300 */
 594        0x40820E11, /* Smart Array 532 */
 595        0x40830E11, /* Smart Array 5312 */
 596        0x409A0E11, /* Smart Array 641 */
 597        0x409B0E11, /* Smart Array 642 */
 598        0x40910E11, /* Smart Array 6i */
 599        /* Exclude 640x boards.  These are two pci devices in one slot
 600         * which share a battery backed cache module.  One controls the
 601         * cache, the other accesses the cache through the one that controls
 602         * it.  If we reset the one controlling the cache, the other will
 603         * likely not be happy.  Just forbid resetting this conjoined mess.
 604         * The 640x isn't really supported by hpsa anyway.
 605         */
 606        0x409C0E11, /* Smart Array 6400 */
 607        0x409D0E11, /* Smart Array 6400 EM */
 608};
 609
 610static int board_id_in_array(u32 a[], int nelems, u32 board_id)
 611{
 612        int i;
 613
 614        for (i = 0; i < nelems; i++)
 615                if (a[i] == board_id)
 616                        return 1;
 617        return 0;
 618}
 619
 620static int ctlr_is_hard_resettable(u32 board_id)
 621{
 622        return !board_id_in_array(unresettable_controller,
 623                        ARRAY_SIZE(unresettable_controller), board_id);
 624}
 625
 626static int ctlr_is_soft_resettable(u32 board_id)
 627{
 628        return !board_id_in_array(soft_unresettable_controller,
 629                        ARRAY_SIZE(soft_unresettable_controller), board_id);
 630}
 631
 632static int ctlr_is_resettable(u32 board_id)
 633{
 634        return ctlr_is_hard_resettable(board_id) ||
 635                ctlr_is_soft_resettable(board_id);
 636}
 637
 638static ssize_t host_show_resettable(struct device *dev,
 639        struct device_attribute *attr, char *buf)
 640{
 641        struct ctlr_info *h;
 642        struct Scsi_Host *shost = class_to_shost(dev);
 643
 644        h = shost_to_hba(shost);
 645        return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
 646}
 647
 648static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
 649{
 650        return (scsi3addr[3] & 0xC0) == 0x40;
 651}
 652
 653static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
 654        "1(+0)ADM", "UNKNOWN", "PHYS DRV"
 655};
 656#define HPSA_RAID_0     0
 657#define HPSA_RAID_4     1
 658#define HPSA_RAID_1     2       /* also used for RAID 10 */
 659#define HPSA_RAID_5     3       /* also used for RAID 50 */
 660#define HPSA_RAID_51    4
 661#define HPSA_RAID_6     5       /* also used for RAID 60 */
 662#define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
 663#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
 664#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
 665
 666static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
 667{
 668        return !device->physical_device;
 669}
 670
 671static ssize_t raid_level_show(struct device *dev,
 672             struct device_attribute *attr, char *buf)
 673{
 674        ssize_t l = 0;
 675        unsigned char rlevel;
 676        struct ctlr_info *h;
 677        struct scsi_device *sdev;
 678        struct hpsa_scsi_dev_t *hdev;
 679        unsigned long flags;
 680
 681        sdev = to_scsi_device(dev);
 682        h = sdev_to_hba(sdev);
 683        spin_lock_irqsave(&h->lock, flags);
 684        hdev = sdev->hostdata;
 685        if (!hdev) {
 686                spin_unlock_irqrestore(&h->lock, flags);
 687                return -ENODEV;
 688        }
 689
 690        /* Is this even a logical drive? */
 691        if (!is_logical_device(hdev)) {
 692                spin_unlock_irqrestore(&h->lock, flags);
 693                l = snprintf(buf, PAGE_SIZE, "N/A\n");
 694                return l;
 695        }
 696
 697        rlevel = hdev->raid_level;
 698        spin_unlock_irqrestore(&h->lock, flags);
 699        if (rlevel > RAID_UNKNOWN)
 700                rlevel = RAID_UNKNOWN;
 701        l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
 702        return l;
 703}
 704
 705static ssize_t lunid_show(struct device *dev,
 706             struct device_attribute *attr, char *buf)
 707{
 708        struct ctlr_info *h;
 709        struct scsi_device *sdev;
 710        struct hpsa_scsi_dev_t *hdev;
 711        unsigned long flags;
 712        unsigned char lunid[8];
 713
 714        sdev = to_scsi_device(dev);
 715        h = sdev_to_hba(sdev);
 716        spin_lock_irqsave(&h->lock, flags);
 717        hdev = sdev->hostdata;
 718        if (!hdev) {
 719                spin_unlock_irqrestore(&h->lock, flags);
 720                return -ENODEV;
 721        }
 722        memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
 723        spin_unlock_irqrestore(&h->lock, flags);
 724        return snprintf(buf, 20, "0x%8phN\n", lunid);
 725}
 726
 727static ssize_t unique_id_show(struct device *dev,
 728             struct device_attribute *attr, char *buf)
 729{
 730        struct ctlr_info *h;
 731        struct scsi_device *sdev;
 732        struct hpsa_scsi_dev_t *hdev;
 733        unsigned long flags;
 734        unsigned char sn[16];
 735
 736        sdev = to_scsi_device(dev);
 737        h = sdev_to_hba(sdev);
 738        spin_lock_irqsave(&h->lock, flags);
 739        hdev = sdev->hostdata;
 740        if (!hdev) {
 741                spin_unlock_irqrestore(&h->lock, flags);
 742                return -ENODEV;
 743        }
 744        memcpy(sn, hdev->device_id, sizeof(sn));
 745        spin_unlock_irqrestore(&h->lock, flags);
 746        return snprintf(buf, 16 * 2 + 2,
 747                        "%02X%02X%02X%02X%02X%02X%02X%02X"
 748                        "%02X%02X%02X%02X%02X%02X%02X%02X\n",
 749                        sn[0], sn[1], sn[2], sn[3],
 750                        sn[4], sn[5], sn[6], sn[7],
 751                        sn[8], sn[9], sn[10], sn[11],
 752                        sn[12], sn[13], sn[14], sn[15]);
 753}
 754
 755static ssize_t sas_address_show(struct device *dev,
 756              struct device_attribute *attr, char *buf)
 757{
 758        struct ctlr_info *h;
 759        struct scsi_device *sdev;
 760        struct hpsa_scsi_dev_t *hdev;
 761        unsigned long flags;
 762        u64 sas_address;
 763
 764        sdev = to_scsi_device(dev);
 765        h = sdev_to_hba(sdev);
 766        spin_lock_irqsave(&h->lock, flags);
 767        hdev = sdev->hostdata;
 768        if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
 769                spin_unlock_irqrestore(&h->lock, flags);
 770                return -ENODEV;
 771        }
 772        sas_address = hdev->sas_address;
 773        spin_unlock_irqrestore(&h->lock, flags);
 774
 775        return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
 776}
 777
 778static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
 779             struct device_attribute *attr, char *buf)
 780{
 781        struct ctlr_info *h;
 782        struct scsi_device *sdev;
 783        struct hpsa_scsi_dev_t *hdev;
 784        unsigned long flags;
 785        int offload_enabled;
 786
 787        sdev = to_scsi_device(dev);
 788        h = sdev_to_hba(sdev);
 789        spin_lock_irqsave(&h->lock, flags);
 790        hdev = sdev->hostdata;
 791        if (!hdev) {
 792                spin_unlock_irqrestore(&h->lock, flags);
 793                return -ENODEV;
 794        }
 795        offload_enabled = hdev->offload_enabled;
 796        spin_unlock_irqrestore(&h->lock, flags);
 797
 798        if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
 799                return snprintf(buf, 20, "%d\n", offload_enabled);
 800        else
 801                return snprintf(buf, 40, "%s\n",
 802                                "Not applicable for a controller");
 803}
 804
 805#define MAX_PATHS 8
 806static ssize_t path_info_show(struct device *dev,
 807             struct device_attribute *attr, char *buf)
 808{
 809        struct ctlr_info *h;
 810        struct scsi_device *sdev;
 811        struct hpsa_scsi_dev_t *hdev;
 812        unsigned long flags;
 813        int i;
 814        int output_len = 0;
 815        u8 box;
 816        u8 bay;
 817        u8 path_map_index = 0;
 818        char *active;
 819        unsigned char phys_connector[2];
 820
 821        sdev = to_scsi_device(dev);
 822        h = sdev_to_hba(sdev);
 823        spin_lock_irqsave(&h->devlock, flags);
 824        hdev = sdev->hostdata;
 825        if (!hdev) {
 826                spin_unlock_irqrestore(&h->devlock, flags);
 827                return -ENODEV;
 828        }
 829
 830        bay = hdev->bay;
 831        for (i = 0; i < MAX_PATHS; i++) {
 832                path_map_index = 1<<i;
 833                if (i == hdev->active_path_index)
 834                        active = "Active";
 835                else if (hdev->path_map & path_map_index)
 836                        active = "Inactive";
 837                else
 838                        continue;
 839
 840                output_len += scnprintf(buf + output_len,
 841                                PAGE_SIZE - output_len,
 842                                "[%d:%d:%d:%d] %20.20s ",
 843                                h->scsi_host->host_no,
 844                                hdev->bus, hdev->target, hdev->lun,
 845                                scsi_device_type(hdev->devtype));
 846
 847                if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
 848                        output_len += scnprintf(buf + output_len,
 849                                                PAGE_SIZE - output_len,
 850                                                "%s\n", active);
 851                        continue;
 852                }
 853
 854                box = hdev->box[i];
 855                memcpy(&phys_connector, &hdev->phys_connector[i],
 856                        sizeof(phys_connector));
 857                if (phys_connector[0] < '0')
 858                        phys_connector[0] = '0';
 859                if (phys_connector[1] < '0')
 860                        phys_connector[1] = '0';
 861                output_len += scnprintf(buf + output_len,
 862                                PAGE_SIZE - output_len,
 863                                "PORT: %.2s ",
 864                                phys_connector);
 865                if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
 866                        hdev->expose_device) {
 867                        if (box == 0 || box == 0xFF) {
 868                                output_len += scnprintf(buf + output_len,
 869                                        PAGE_SIZE - output_len,
 870                                        "BAY: %hhu %s\n",
 871                                        bay, active);
 872                        } else {
 873                                output_len += scnprintf(buf + output_len,
 874                                        PAGE_SIZE - output_len,
 875                                        "BOX: %hhu BAY: %hhu %s\n",
 876                                        box, bay, active);
 877                        }
 878                } else if (box != 0 && box != 0xFF) {
 879                        output_len += scnprintf(buf + output_len,
 880                                PAGE_SIZE - output_len, "BOX: %hhu %s\n",
 881                                box, active);
 882                } else
 883                        output_len += scnprintf(buf + output_len,
 884                                PAGE_SIZE - output_len, "%s\n", active);
 885        }
 886
 887        spin_unlock_irqrestore(&h->devlock, flags);
 888        return output_len;
 889}
 890
 891static ssize_t host_show_ctlr_num(struct device *dev,
 892        struct device_attribute *attr, char *buf)
 893{
 894        struct ctlr_info *h;
 895        struct Scsi_Host *shost = class_to_shost(dev);
 896
 897        h = shost_to_hba(shost);
 898        return snprintf(buf, 20, "%d\n", h->ctlr);
 899}
 900
 901static ssize_t host_show_legacy_board(struct device *dev,
 902        struct device_attribute *attr, char *buf)
 903{
 904        struct ctlr_info *h;
 905        struct Scsi_Host *shost = class_to_shost(dev);
 906
 907        h = shost_to_hba(shost);
 908        return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
 909}
 910
 911static DEVICE_ATTR_RO(raid_level);
 912static DEVICE_ATTR_RO(lunid);
 913static DEVICE_ATTR_RO(unique_id);
 914static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
 915static DEVICE_ATTR_RO(sas_address);
 916static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
 917                        host_show_hp_ssd_smart_path_enabled, NULL);
 918static DEVICE_ATTR_RO(path_info);
 919static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
 920                host_show_hp_ssd_smart_path_status,
 921                host_store_hp_ssd_smart_path_status);
 922static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
 923                        host_store_raid_offload_debug);
 924static DEVICE_ATTR(firmware_revision, S_IRUGO,
 925        host_show_firmware_revision, NULL);
 926static DEVICE_ATTR(commands_outstanding, S_IRUGO,
 927        host_show_commands_outstanding, NULL);
 928static DEVICE_ATTR(transport_mode, S_IRUGO,
 929        host_show_transport_mode, NULL);
 930static DEVICE_ATTR(resettable, S_IRUGO,
 931        host_show_resettable, NULL);
 932static DEVICE_ATTR(lockup_detected, S_IRUGO,
 933        host_show_lockup_detected, NULL);
 934static DEVICE_ATTR(ctlr_num, S_IRUGO,
 935        host_show_ctlr_num, NULL);
 936static DEVICE_ATTR(legacy_board, S_IRUGO,
 937        host_show_legacy_board, NULL);
 938
 939static struct attribute *hpsa_sdev_attrs[] = {
 940        &dev_attr_raid_level.attr,
 941        &dev_attr_lunid.attr,
 942        &dev_attr_unique_id.attr,
 943        &dev_attr_hp_ssd_smart_path_enabled.attr,
 944        &dev_attr_path_info.attr,
 945        &dev_attr_sas_address.attr,
 946        NULL,
 947};
 948
 949ATTRIBUTE_GROUPS(hpsa_sdev);
 950
 951static struct attribute *hpsa_shost_attrs[] = {
 952        &dev_attr_rescan.attr,
 953        &dev_attr_firmware_revision.attr,
 954        &dev_attr_commands_outstanding.attr,
 955        &dev_attr_transport_mode.attr,
 956        &dev_attr_resettable.attr,
 957        &dev_attr_hp_ssd_smart_path_status.attr,
 958        &dev_attr_raid_offload_debug.attr,
 959        &dev_attr_lockup_detected.attr,
 960        &dev_attr_ctlr_num.attr,
 961        &dev_attr_legacy_board.attr,
 962        NULL,
 963};
 964
 965ATTRIBUTE_GROUPS(hpsa_shost);
 966
 967#define HPSA_NRESERVED_CMDS     (HPSA_CMDS_RESERVED_FOR_DRIVER +\
 968                                 HPSA_MAX_CONCURRENT_PASSTHRUS)
 969
 970static struct scsi_host_template hpsa_driver_template = {
 971        .module                 = THIS_MODULE,
 972        .name                   = HPSA,
 973        .proc_name              = HPSA,
 974        .queuecommand           = hpsa_scsi_queue_command,
 975        .scan_start             = hpsa_scan_start,
 976        .scan_finished          = hpsa_scan_finished,
 977        .change_queue_depth     = hpsa_change_queue_depth,
 978        .this_id                = -1,
 979        .eh_device_reset_handler = hpsa_eh_device_reset_handler,
 980        .ioctl                  = hpsa_ioctl,
 981        .slave_alloc            = hpsa_slave_alloc,
 982        .slave_configure        = hpsa_slave_configure,
 983        .slave_destroy          = hpsa_slave_destroy,
 984#ifdef CONFIG_COMPAT
 985        .compat_ioctl           = hpsa_compat_ioctl,
 986#endif
 987        .sdev_groups = hpsa_sdev_groups,
 988        .shost_groups = hpsa_shost_groups,
 989        .max_sectors = 2048,
 990        .no_write_same = 1,
 991};
 992
 993static inline u32 next_command(struct ctlr_info *h, u8 q)
 994{
 995        u32 a;
 996        struct reply_queue_buffer *rq = &h->reply_queue[q];
 997
 998        if (h->transMethod & CFGTBL_Trans_io_accel1)
 999                return h->access.command_completed(h, q);
1000
1001        if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
1002                return h->access.command_completed(h, q);
1003
1004        if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
1005                a = rq->head[rq->current_entry];
1006                rq->current_entry++;
1007                atomic_dec(&h->commands_outstanding);
1008        } else {
1009                a = FIFO_EMPTY;
1010        }
1011        /* Check for wraparound */
1012        if (rq->current_entry == h->max_commands) {
1013                rq->current_entry = 0;
1014                rq->wraparound ^= 1;
1015        }
1016        return a;
1017}
1018
1019/*
1020 * There are some special bits in the bus address of the
1021 * command that we have to set for the controller to know
1022 * how to process the command:
1023 *
1024 * Normal performant mode:
1025 * bit 0: 1 means performant mode, 0 means simple mode.
1026 * bits 1-3 = block fetch table entry
1027 * bits 4-6 = command type (== 0)
1028 *
1029 * ioaccel1 mode:
1030 * bit 0 = "performant mode" bit.
1031 * bits 1-3 = block fetch table entry
1032 * bits 4-6 = command type (== 110)
1033 * (command type is needed because ioaccel1 mode
1034 * commands are submitted through the same register as normal
1035 * mode commands, so this is how the controller knows whether
1036 * the command is normal mode or ioaccel1 mode.)
1037 *
1038 * ioaccel2 mode:
1039 * bit 0 = "performant mode" bit.
1040 * bits 1-4 = block fetch table entry (note extra bit)
1041 * bits 4-6 = not needed, because ioaccel2 mode has
1042 * a separate special register for submitting commands.
1043 */
1044
1045/*
1046 * set_performant_mode: Modify the tag for cciss performant
1047 * set bit 0 for pull model, bits 3-1 for block fetch
1048 * register number
1049 */
1050#define DEFAULT_REPLY_QUEUE (-1)
1051static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1052                                        int reply_queue)
1053{
1054        if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1055                c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1056                if (unlikely(!h->msix_vectors))
1057                        return;
1058                c->Header.ReplyQueue = reply_queue;
1059        }
1060}
1061
1062static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1063                                                struct CommandList *c,
1064                                                int reply_queue)
1065{
1066        struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1067
1068        /*
1069         * Tell the controller to post the reply to the queue for this
1070         * processor.  This seems to give the best I/O throughput.
1071         */
1072        cp->ReplyQueue = reply_queue;
1073        /*
1074         * Set the bits in the address sent down to include:
1075         *  - performant mode bit (bit 0)
1076         *  - pull count (bits 1-3)
1077         *  - command type (bits 4-6)
1078         */
1079        c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1080                                        IOACCEL1_BUSADDR_CMDTYPE;
1081}
1082
1083static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1084                                                struct CommandList *c,
1085                                                int reply_queue)
1086{
1087        struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1088                &h->ioaccel2_cmd_pool[c->cmdindex];
1089
1090        /* Tell the controller to post the reply to the queue for this
1091         * processor.  This seems to give the best I/O throughput.
1092         */
1093        cp->reply_queue = reply_queue;
1094        /* Set the bits in the address sent down to include:
1095         *  - performant mode bit not used in ioaccel mode 2
1096         *  - pull count (bits 0-3)
1097         *  - command type isn't needed for ioaccel2
1098         */
1099        c->busaddr |= h->ioaccel2_blockFetchTable[0];
1100}
1101
1102static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1103                                                struct CommandList *c,
1104                                                int reply_queue)
1105{
1106        struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1107
1108        /*
1109         * Tell the controller to post the reply to the queue for this
1110         * processor.  This seems to give the best I/O throughput.
1111         */
1112        cp->reply_queue = reply_queue;
1113        /*
1114         * Set the bits in the address sent down to include:
1115         *  - performant mode bit not used in ioaccel mode 2
1116         *  - pull count (bits 0-3)
1117         *  - command type isn't needed for ioaccel2
1118         */
1119        c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1120}
1121
1122static int is_firmware_flash_cmd(u8 *cdb)
1123{
1124        return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1125}
1126
1127/*
1128 * During firmware flash, the heartbeat register may not update as frequently
1129 * as it should.  So we dial down lockup detection during firmware flash. and
1130 * dial it back up when firmware flash completes.
1131 */
1132#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1133#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1134#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1135static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1136                struct CommandList *c)
1137{
1138        if (!is_firmware_flash_cmd(c->Request.CDB))
1139                return;
1140        atomic_inc(&h->firmware_flash_in_progress);
1141        h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1142}
1143
1144static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1145                struct CommandList *c)
1146{
1147        if (is_firmware_flash_cmd(c->Request.CDB) &&
1148                atomic_dec_and_test(&h->firmware_flash_in_progress))
1149                h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1150}
1151
1152static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1153        struct CommandList *c, int reply_queue)
1154{
1155        dial_down_lockup_detection_during_fw_flash(h, c);
1156        atomic_inc(&h->commands_outstanding);
1157        /*
1158         * Check to see if the command is being retried.
1159         */
1160        if (c->device && !c->retry_pending)
1161                atomic_inc(&c->device->commands_outstanding);
1162
1163        reply_queue = h->reply_map[raw_smp_processor_id()];
1164        switch (c->cmd_type) {
1165        case CMD_IOACCEL1:
1166                set_ioaccel1_performant_mode(h, c, reply_queue);
1167                writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1168                break;
1169        case CMD_IOACCEL2:
1170                set_ioaccel2_performant_mode(h, c, reply_queue);
1171                writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1172                break;
1173        case IOACCEL2_TMF:
1174                set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1175                writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1176                break;
1177        default:
1178                set_performant_mode(h, c, reply_queue);
1179                h->access.submit_command(h, c);
1180        }
1181}
1182
1183static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1184{
1185        __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1186}
1187
1188static inline int is_hba_lunid(unsigned char scsi3addr[])
1189{
1190        return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1191}
1192
1193static inline int is_scsi_rev_5(struct ctlr_info *h)
1194{
1195        if (!h->hba_inquiry_data)
1196                return 0;
1197        if ((h->hba_inquiry_data[2] & 0x07) == 5)
1198                return 1;
1199        return 0;
1200}
1201
1202static int hpsa_find_target_lun(struct ctlr_info *h,
1203        unsigned char scsi3addr[], int bus, int *target, int *lun)
1204{
1205        /* finds an unused bus, target, lun for a new physical device
1206         * assumes h->devlock is held
1207         */
1208        int i, found = 0;
1209        DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1210
1211        bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1212
1213        for (i = 0; i < h->ndevices; i++) {
1214                if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1215                        __set_bit(h->dev[i]->target, lun_taken);
1216        }
1217
1218        i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1219        if (i < HPSA_MAX_DEVICES) {
1220                /* *bus = 1; */
1221                *target = i;
1222                *lun = 0;
1223                found = 1;
1224        }
1225        return !found;
1226}
1227
1228static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1229        struct hpsa_scsi_dev_t *dev, char *description)
1230{
1231#define LABEL_SIZE 25
1232        char label[LABEL_SIZE];
1233
1234        if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1235                return;
1236
1237        switch (dev->devtype) {
1238        case TYPE_RAID:
1239                snprintf(label, LABEL_SIZE, "controller");
1240                break;
1241        case TYPE_ENCLOSURE:
1242                snprintf(label, LABEL_SIZE, "enclosure");
1243                break;
1244        case TYPE_DISK:
1245        case TYPE_ZBC:
1246                if (dev->external)
1247                        snprintf(label, LABEL_SIZE, "external");
1248                else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1249                        snprintf(label, LABEL_SIZE, "%s",
1250                                raid_label[PHYSICAL_DRIVE]);
1251                else
1252                        snprintf(label, LABEL_SIZE, "RAID-%s",
1253                                dev->raid_level > RAID_UNKNOWN ? "?" :
1254                                raid_label[dev->raid_level]);
1255                break;
1256        case TYPE_ROM:
1257                snprintf(label, LABEL_SIZE, "rom");
1258                break;
1259        case TYPE_TAPE:
1260                snprintf(label, LABEL_SIZE, "tape");
1261                break;
1262        case TYPE_MEDIUM_CHANGER:
1263                snprintf(label, LABEL_SIZE, "changer");
1264                break;
1265        default:
1266                snprintf(label, LABEL_SIZE, "UNKNOWN");
1267                break;
1268        }
1269
1270        dev_printk(level, &h->pdev->dev,
1271                        "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1272                        h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1273                        description,
1274                        scsi_device_type(dev->devtype),
1275                        dev->vendor,
1276                        dev->model,
1277                        label,
1278                        dev->offload_config ? '+' : '-',
1279                        dev->offload_to_be_enabled ? '+' : '-',
1280                        dev->expose_device);
1281}
1282
1283/* Add an entry into h->dev[] array. */
1284static int hpsa_scsi_add_entry(struct ctlr_info *h,
1285                struct hpsa_scsi_dev_t *device,
1286                struct hpsa_scsi_dev_t *added[], int *nadded)
1287{
1288        /* assumes h->devlock is held */
1289        int n = h->ndevices;
1290        int i;
1291        unsigned char addr1[8], addr2[8];
1292        struct hpsa_scsi_dev_t *sd;
1293
1294        if (n >= HPSA_MAX_DEVICES) {
1295                dev_err(&h->pdev->dev, "too many devices, some will be "
1296                        "inaccessible.\n");
1297                return -1;
1298        }
1299
1300        /* physical devices do not have lun or target assigned until now. */
1301        if (device->lun != -1)
1302                /* Logical device, lun is already assigned. */
1303                goto lun_assigned;
1304
1305        /* If this device a non-zero lun of a multi-lun device
1306         * byte 4 of the 8-byte LUN addr will contain the logical
1307         * unit no, zero otherwise.
1308         */
1309        if (device->scsi3addr[4] == 0) {
1310                /* This is not a non-zero lun of a multi-lun device */
1311                if (hpsa_find_target_lun(h, device->scsi3addr,
1312                        device->bus, &device->target, &device->lun) != 0)
1313                        return -1;
1314                goto lun_assigned;
1315        }
1316
1317        /* This is a non-zero lun of a multi-lun device.
1318         * Search through our list and find the device which
1319         * has the same 8 byte LUN address, excepting byte 4 and 5.
1320         * Assign the same bus and target for this new LUN.
1321         * Use the logical unit number from the firmware.
1322         */
1323        memcpy(addr1, device->scsi3addr, 8);
1324        addr1[4] = 0;
1325        addr1[5] = 0;
1326        for (i = 0; i < n; i++) {
1327                sd = h->dev[i];
1328                memcpy(addr2, sd->scsi3addr, 8);
1329                addr2[4] = 0;
1330                addr2[5] = 0;
1331                /* differ only in byte 4 and 5? */
1332                if (memcmp(addr1, addr2, 8) == 0) {
1333                        device->bus = sd->bus;
1334                        device->target = sd->target;
1335                        device->lun = device->scsi3addr[4];
1336                        break;
1337                }
1338        }
1339        if (device->lun == -1) {
1340                dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1341                        " suspect firmware bug or unsupported hardware "
1342                        "configuration.\n");
1343                return -1;
1344        }
1345
1346lun_assigned:
1347
1348        h->dev[n] = device;
1349        h->ndevices++;
1350        added[*nadded] = device;
1351        (*nadded)++;
1352        hpsa_show_dev_msg(KERN_INFO, h, device,
1353                device->expose_device ? "added" : "masked");
1354        return 0;
1355}
1356
1357/*
1358 * Called during a scan operation.
1359 *
1360 * Update an entry in h->dev[] array.
1361 */
1362static void hpsa_scsi_update_entry(struct ctlr_info *h,
1363        int entry, struct hpsa_scsi_dev_t *new_entry)
1364{
1365        /* assumes h->devlock is held */
1366        BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1367
1368        /* Raid level changed. */
1369        h->dev[entry]->raid_level = new_entry->raid_level;
1370
1371        /*
1372         * ioacccel_handle may have changed for a dual domain disk
1373         */
1374        h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1375
1376        /* Raid offload parameters changed.  Careful about the ordering. */
1377        if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1378                /*
1379                 * if drive is newly offload_enabled, we want to copy the
1380                 * raid map data first.  If previously offload_enabled and
1381                 * offload_config were set, raid map data had better be
1382                 * the same as it was before. If raid map data has changed
1383                 * then it had better be the case that
1384                 * h->dev[entry]->offload_enabled is currently 0.
1385                 */
1386                h->dev[entry]->raid_map = new_entry->raid_map;
1387                h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1388        }
1389        if (new_entry->offload_to_be_enabled) {
1390                h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1391                wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1392        }
1393        h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1394        h->dev[entry]->offload_config = new_entry->offload_config;
1395        h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1396        h->dev[entry]->queue_depth = new_entry->queue_depth;
1397
1398        /*
1399         * We can turn off ioaccel offload now, but need to delay turning
1400         * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1401         * can't do that until all the devices are updated.
1402         */
1403        h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1404
1405        /*
1406         * turn ioaccel off immediately if told to do so.
1407         */
1408        if (!new_entry->offload_to_be_enabled)
1409                h->dev[entry]->offload_enabled = 0;
1410
1411        hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1412}
1413
1414/* Replace an entry from h->dev[] array. */
1415static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1416        int entry, struct hpsa_scsi_dev_t *new_entry,
1417        struct hpsa_scsi_dev_t *added[], int *nadded,
1418        struct hpsa_scsi_dev_t *removed[], int *nremoved)
1419{
1420        /* assumes h->devlock is held */
1421        BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1422        removed[*nremoved] = h->dev[entry];
1423        (*nremoved)++;
1424
1425        /*
1426         * New physical devices won't have target/lun assigned yet
1427         * so we need to preserve the values in the slot we are replacing.
1428         */
1429        if (new_entry->target == -1) {
1430                new_entry->target = h->dev[entry]->target;
1431                new_entry->lun = h->dev[entry]->lun;
1432        }
1433
1434        h->dev[entry] = new_entry;
1435        added[*nadded] = new_entry;
1436        (*nadded)++;
1437
1438        hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1439}
1440
1441/* Remove an entry from h->dev[] array. */
1442static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1443        struct hpsa_scsi_dev_t *removed[], int *nremoved)
1444{
1445        /* assumes h->devlock is held */
1446        int i;
1447        struct hpsa_scsi_dev_t *sd;
1448
1449        BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1450
1451        sd = h->dev[entry];
1452        removed[*nremoved] = h->dev[entry];
1453        (*nremoved)++;
1454
1455        for (i = entry; i < h->ndevices-1; i++)
1456                h->dev[i] = h->dev[i+1];
1457        h->ndevices--;
1458        hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1459}
1460
1461#define SCSI3ADDR_EQ(a, b) ( \
1462        (a)[7] == (b)[7] && \
1463        (a)[6] == (b)[6] && \
1464        (a)[5] == (b)[5] && \
1465        (a)[4] == (b)[4] && \
1466        (a)[3] == (b)[3] && \
1467        (a)[2] == (b)[2] && \
1468        (a)[1] == (b)[1] && \
1469        (a)[0] == (b)[0])
1470
1471static void fixup_botched_add(struct ctlr_info *h,
1472        struct hpsa_scsi_dev_t *added)
1473{
1474        /* called when scsi_add_device fails in order to re-adjust
1475         * h->dev[] to match the mid layer's view.
1476         */
1477        unsigned long flags;
1478        int i, j;
1479
1480        spin_lock_irqsave(&h->lock, flags);
1481        for (i = 0; i < h->ndevices; i++) {
1482                if (h->dev[i] == added) {
1483                        for (j = i; j < h->ndevices-1; j++)
1484                                h->dev[j] = h->dev[j+1];
1485                        h->ndevices--;
1486                        break;
1487                }
1488        }
1489        spin_unlock_irqrestore(&h->lock, flags);
1490        kfree(added);
1491}
1492
1493static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1494        struct hpsa_scsi_dev_t *dev2)
1495{
1496        /* we compare everything except lun and target as these
1497         * are not yet assigned.  Compare parts likely
1498         * to differ first
1499         */
1500        if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1501                sizeof(dev1->scsi3addr)) != 0)
1502                return 0;
1503        if (memcmp(dev1->device_id, dev2->device_id,
1504                sizeof(dev1->device_id)) != 0)
1505                return 0;
1506        if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1507                return 0;
1508        if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1509                return 0;
1510        if (dev1->devtype != dev2->devtype)
1511                return 0;
1512        if (dev1->bus != dev2->bus)
1513                return 0;
1514        return 1;
1515}
1516
1517static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1518        struct hpsa_scsi_dev_t *dev2)
1519{
1520        /* Device attributes that can change, but don't mean
1521         * that the device is a different device, nor that the OS
1522         * needs to be told anything about the change.
1523         */
1524        if (dev1->raid_level != dev2->raid_level)
1525                return 1;
1526        if (dev1->offload_config != dev2->offload_config)
1527                return 1;
1528        if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1529                return 1;
1530        if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1531                if (dev1->queue_depth != dev2->queue_depth)
1532                        return 1;
1533        /*
1534         * This can happen for dual domain devices. An active
1535         * path change causes the ioaccel handle to change
1536         *
1537         * for example note the handle differences between p0 and p1
1538         * Device                    WWN               ,WWN hash,Handle
1539         * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1540         *      p1                   0x5000C5005FC4DAC9,0x6798C0,0x00040004
1541         */
1542        if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1543                return 1;
1544        return 0;
1545}
1546
1547/* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1548 * and return needle location in *index.  If scsi3addr matches, but not
1549 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1550 * location in *index.
1551 * In the case of a minor device attribute change, such as RAID level, just
1552 * return DEVICE_UPDATED, along with the updated device's location in index.
1553 * If needle not found, return DEVICE_NOT_FOUND.
1554 */
1555static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1556        struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1557        int *index)
1558{
1559        int i;
1560#define DEVICE_NOT_FOUND 0
1561#define DEVICE_CHANGED 1
1562#define DEVICE_SAME 2
1563#define DEVICE_UPDATED 3
1564        if (needle == NULL)
1565                return DEVICE_NOT_FOUND;
1566
1567        for (i = 0; i < haystack_size; i++) {
1568                if (haystack[i] == NULL) /* previously removed. */
1569                        continue;
1570                if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1571                        *index = i;
1572                        if (device_is_the_same(needle, haystack[i])) {
1573                                if (device_updated(needle, haystack[i]))
1574                                        return DEVICE_UPDATED;
1575                                return DEVICE_SAME;
1576                        } else {
1577                                /* Keep offline devices offline */
1578                                if (needle->volume_offline)
1579                                        return DEVICE_NOT_FOUND;
1580                                return DEVICE_CHANGED;
1581                        }
1582                }
1583        }
1584        *index = -1;
1585        return DEVICE_NOT_FOUND;
1586}
1587
1588static void hpsa_monitor_offline_device(struct ctlr_info *h,
1589                                        unsigned char scsi3addr[])
1590{
1591        struct offline_device_entry *device;
1592        unsigned long flags;
1593
1594        /* Check to see if device is already on the list */
1595        spin_lock_irqsave(&h->offline_device_lock, flags);
1596        list_for_each_entry(device, &h->offline_device_list, offline_list) {
1597                if (memcmp(device->scsi3addr, scsi3addr,
1598                        sizeof(device->scsi3addr)) == 0) {
1599                        spin_unlock_irqrestore(&h->offline_device_lock, flags);
1600                        return;
1601                }
1602        }
1603        spin_unlock_irqrestore(&h->offline_device_lock, flags);
1604
1605        /* Device is not on the list, add it. */
1606        device = kmalloc(sizeof(*device), GFP_KERNEL);
1607        if (!device)
1608                return;
1609
1610        memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1611        spin_lock_irqsave(&h->offline_device_lock, flags);
1612        list_add_tail(&device->offline_list, &h->offline_device_list);
1613        spin_unlock_irqrestore(&h->offline_device_lock, flags);
1614}
1615
1616/* Print a message explaining various offline volume states */
1617static void hpsa_show_volume_status(struct ctlr_info *h,
1618        struct hpsa_scsi_dev_t *sd)
1619{
1620        if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1621                dev_info(&h->pdev->dev,
1622                        "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1623                        h->scsi_host->host_no,
1624                        sd->bus, sd->target, sd->lun);
1625        switch (sd->volume_offline) {
1626        case HPSA_LV_OK:
1627                break;
1628        case HPSA_LV_UNDERGOING_ERASE:
1629                dev_info(&h->pdev->dev,
1630                        "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1631                        h->scsi_host->host_no,
1632                        sd->bus, sd->target, sd->lun);
1633                break;
1634        case HPSA_LV_NOT_AVAILABLE:
1635                dev_info(&h->pdev->dev,
1636                        "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1637                        h->scsi_host->host_no,
1638                        sd->bus, sd->target, sd->lun);
1639                break;
1640        case HPSA_LV_UNDERGOING_RPI:
1641                dev_info(&h->pdev->dev,
1642                        "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1643                        h->scsi_host->host_no,
1644                        sd->bus, sd->target, sd->lun);
1645                break;
1646        case HPSA_LV_PENDING_RPI:
1647                dev_info(&h->pdev->dev,
1648                        "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1649                        h->scsi_host->host_no,
1650                        sd->bus, sd->target, sd->lun);
1651                break;
1652        case HPSA_LV_ENCRYPTED_NO_KEY:
1653                dev_info(&h->pdev->dev,
1654                        "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1655                        h->scsi_host->host_no,
1656                        sd->bus, sd->target, sd->lun);
1657                break;
1658        case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1659                dev_info(&h->pdev->dev,
1660                        "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1661                        h->scsi_host->host_no,
1662                        sd->bus, sd->target, sd->lun);
1663                break;
1664        case HPSA_LV_UNDERGOING_ENCRYPTION:
1665                dev_info(&h->pdev->dev,
1666                        "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1667                        h->scsi_host->host_no,
1668                        sd->bus, sd->target, sd->lun);
1669                break;
1670        case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1671                dev_info(&h->pdev->dev,
1672                        "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1673                        h->scsi_host->host_no,
1674                        sd->bus, sd->target, sd->lun);
1675                break;
1676        case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1677                dev_info(&h->pdev->dev,
1678                        "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1679                        h->scsi_host->host_no,
1680                        sd->bus, sd->target, sd->lun);
1681                break;
1682        case HPSA_LV_PENDING_ENCRYPTION:
1683                dev_info(&h->pdev->dev,
1684                        "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1685                        h->scsi_host->host_no,
1686                        sd->bus, sd->target, sd->lun);
1687                break;
1688        case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1689                dev_info(&h->pdev->dev,
1690                        "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1691                        h->scsi_host->host_no,
1692                        sd->bus, sd->target, sd->lun);
1693                break;
1694        }
1695}
1696
1697/*
1698 * Figure the list of physical drive pointers for a logical drive with
1699 * raid offload configured.
1700 */
1701static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1702                                struct hpsa_scsi_dev_t *dev[], int ndevices,
1703                                struct hpsa_scsi_dev_t *logical_drive)
1704{
1705        struct raid_map_data *map = &logical_drive->raid_map;
1706        struct raid_map_disk_data *dd = &map->data[0];
1707        int i, j;
1708        int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1709                                le16_to_cpu(map->metadata_disks_per_row);
1710        int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1711                                le16_to_cpu(map->layout_map_count) *
1712                                total_disks_per_row;
1713        int nphys_disk = le16_to_cpu(map->layout_map_count) *
1714                                total_disks_per_row;
1715        int qdepth;
1716
1717        if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1718                nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1719
1720        logical_drive->nphysical_disks = nraid_map_entries;
1721
1722        qdepth = 0;
1723        for (i = 0; i < nraid_map_entries; i++) {
1724                logical_drive->phys_disk[i] = NULL;
1725                if (!logical_drive->offload_config)
1726                        continue;
1727                for (j = 0; j < ndevices; j++) {
1728                        if (dev[j] == NULL)
1729                                continue;
1730                        if (dev[j]->devtype != TYPE_DISK &&
1731                            dev[j]->devtype != TYPE_ZBC)
1732                                continue;
1733                        if (is_logical_device(dev[j]))
1734                                continue;
1735                        if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1736                                continue;
1737
1738                        logical_drive->phys_disk[i] = dev[j];
1739                        if (i < nphys_disk)
1740                                qdepth = min(h->nr_cmds, qdepth +
1741                                    logical_drive->phys_disk[i]->queue_depth);
1742                        break;
1743                }
1744
1745                /*
1746                 * This can happen if a physical drive is removed and
1747                 * the logical drive is degraded.  In that case, the RAID
1748                 * map data will refer to a physical disk which isn't actually
1749                 * present.  And in that case offload_enabled should already
1750                 * be 0, but we'll turn it off here just in case
1751                 */
1752                if (!logical_drive->phys_disk[i]) {
1753                        dev_warn(&h->pdev->dev,
1754                                "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1755                                __func__,
1756                                h->scsi_host->host_no, logical_drive->bus,
1757                                logical_drive->target, logical_drive->lun);
1758                        hpsa_turn_off_ioaccel_for_device(logical_drive);
1759                        logical_drive->queue_depth = 8;
1760                }
1761        }
1762        if (nraid_map_entries)
1763                /*
1764                 * This is correct for reads, too high for full stripe writes,
1765                 * way too high for partial stripe writes
1766                 */
1767                logical_drive->queue_depth = qdepth;
1768        else {
1769                if (logical_drive->external)
1770                        logical_drive->queue_depth = EXTERNAL_QD;
1771                else
1772                        logical_drive->queue_depth = h->nr_cmds;
1773        }
1774}
1775
1776static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1777                                struct hpsa_scsi_dev_t *dev[], int ndevices)
1778{
1779        int i;
1780
1781        for (i = 0; i < ndevices; i++) {
1782                if (dev[i] == NULL)
1783                        continue;
1784                if (dev[i]->devtype != TYPE_DISK &&
1785                    dev[i]->devtype != TYPE_ZBC)
1786                        continue;
1787                if (!is_logical_device(dev[i]))
1788                        continue;
1789
1790                /*
1791                 * If offload is currently enabled, the RAID map and
1792                 * phys_disk[] assignment *better* not be changing
1793                 * because we would be changing ioaccel phsy_disk[] pointers
1794                 * on a ioaccel volume processing I/O requests.
1795                 *
1796                 * If an ioaccel volume status changed, initially because it was
1797                 * re-configured and thus underwent a transformation, or
1798                 * a drive failed, we would have received a state change
1799                 * request and ioaccel should have been turned off. When the
1800                 * transformation completes, we get another state change
1801                 * request to turn ioaccel back on. In this case, we need
1802                 * to update the ioaccel information.
1803                 *
1804                 * Thus: If it is not currently enabled, but will be after
1805                 * the scan completes, make sure the ioaccel pointers
1806                 * are up to date.
1807                 */
1808
1809                if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1810                        hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1811        }
1812}
1813
1814static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1815{
1816        int rc = 0;
1817
1818        if (!h->scsi_host)
1819                return 1;
1820
1821        if (is_logical_device(device)) /* RAID */
1822                rc = scsi_add_device(h->scsi_host, device->bus,
1823                                        device->target, device->lun);
1824        else /* HBA */
1825                rc = hpsa_add_sas_device(h->sas_host, device);
1826
1827        return rc;
1828}
1829
1830static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1831                                                struct hpsa_scsi_dev_t *dev)
1832{
1833        int i;
1834        int count = 0;
1835
1836        for (i = 0; i < h->nr_cmds; i++) {
1837                struct CommandList *c = h->cmd_pool + i;
1838                int refcount = atomic_inc_return(&c->refcount);
1839
1840                if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1841                                dev->scsi3addr)) {
1842                        unsigned long flags;
1843
1844                        spin_lock_irqsave(&h->lock, flags);     /* Implied MB */
1845                        if (!hpsa_is_cmd_idle(c))
1846                                ++count;
1847                        spin_unlock_irqrestore(&h->lock, flags);
1848                }
1849
1850                cmd_free(h, c);
1851        }
1852
1853        return count;
1854}
1855
1856#define NUM_WAIT 20
1857static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1858                                                struct hpsa_scsi_dev_t *device)
1859{
1860        int cmds = 0;
1861        int waits = 0;
1862        int num_wait = NUM_WAIT;
1863
1864        if (device->external)
1865                num_wait = HPSA_EH_PTRAID_TIMEOUT;
1866
1867        while (1) {
1868                cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1869                if (cmds == 0)
1870                        break;
1871                if (++waits > num_wait)
1872                        break;
1873                msleep(1000);
1874        }
1875
1876        if (waits > num_wait) {
1877                dev_warn(&h->pdev->dev,
1878                        "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1879                        __func__,
1880                        h->scsi_host->host_no,
1881                        device->bus, device->target, device->lun, cmds);
1882        }
1883}
1884
1885static void hpsa_remove_device(struct ctlr_info *h,
1886                        struct hpsa_scsi_dev_t *device)
1887{
1888        struct scsi_device *sdev = NULL;
1889
1890        if (!h->scsi_host)
1891                return;
1892
1893        /*
1894         * Allow for commands to drain
1895         */
1896        device->removed = 1;
1897        hpsa_wait_for_outstanding_commands_for_dev(h, device);
1898
1899        if (is_logical_device(device)) { /* RAID */
1900                sdev = scsi_device_lookup(h->scsi_host, device->bus,
1901                                                device->target, device->lun);
1902                if (sdev) {
1903                        scsi_remove_device(sdev);
1904                        scsi_device_put(sdev);
1905                } else {
1906                        /*
1907                         * We don't expect to get here.  Future commands
1908                         * to this device will get a selection timeout as
1909                         * if the device were gone.
1910                         */
1911                        hpsa_show_dev_msg(KERN_WARNING, h, device,
1912                                        "didn't find device for removal.");
1913                }
1914        } else { /* HBA */
1915
1916                hpsa_remove_sas_device(device);
1917        }
1918}
1919
1920static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1921        struct hpsa_scsi_dev_t *sd[], int nsds)
1922{
1923        /* sd contains scsi3 addresses and devtypes, and inquiry
1924         * data.  This function takes what's in sd to be the current
1925         * reality and updates h->dev[] to reflect that reality.
1926         */
1927        int i, entry, device_change, changes = 0;
1928        struct hpsa_scsi_dev_t *csd;
1929        unsigned long flags;
1930        struct hpsa_scsi_dev_t **added, **removed;
1931        int nadded, nremoved;
1932
1933        /*
1934         * A reset can cause a device status to change
1935         * re-schedule the scan to see what happened.
1936         */
1937        spin_lock_irqsave(&h->reset_lock, flags);
1938        if (h->reset_in_progress) {
1939                h->drv_req_rescan = 1;
1940                spin_unlock_irqrestore(&h->reset_lock, flags);
1941                return;
1942        }
1943        spin_unlock_irqrestore(&h->reset_lock, flags);
1944
1945        added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1946        removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1947
1948        if (!added || !removed) {
1949                dev_warn(&h->pdev->dev, "out of memory in "
1950                        "adjust_hpsa_scsi_table\n");
1951                goto free_and_out;
1952        }
1953
1954        spin_lock_irqsave(&h->devlock, flags);
1955
1956        /* find any devices in h->dev[] that are not in
1957         * sd[] and remove them from h->dev[], and for any
1958         * devices which have changed, remove the old device
1959         * info and add the new device info.
1960         * If minor device attributes change, just update
1961         * the existing device structure.
1962         */
1963        i = 0;
1964        nremoved = 0;
1965        nadded = 0;
1966        while (i < h->ndevices) {
1967                csd = h->dev[i];
1968                device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1969                if (device_change == DEVICE_NOT_FOUND) {
1970                        changes++;
1971                        hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1972                        continue; /* remove ^^^, hence i not incremented */
1973                } else if (device_change == DEVICE_CHANGED) {
1974                        changes++;
1975                        hpsa_scsi_replace_entry(h, i, sd[entry],
1976                                added, &nadded, removed, &nremoved);
1977                        /* Set it to NULL to prevent it from being freed
1978                         * at the bottom of hpsa_update_scsi_devices()
1979                         */
1980                        sd[entry] = NULL;
1981                } else if (device_change == DEVICE_UPDATED) {
1982                        hpsa_scsi_update_entry(h, i, sd[entry]);
1983                }
1984                i++;
1985        }
1986
1987        /* Now, make sure every device listed in sd[] is also
1988         * listed in h->dev[], adding them if they aren't found
1989         */
1990
1991        for (i = 0; i < nsds; i++) {
1992                if (!sd[i]) /* if already added above. */
1993                        continue;
1994
1995                /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1996                 * as the SCSI mid-layer does not handle such devices well.
1997                 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1998                 * at 160Hz, and prevents the system from coming up.
1999                 */
2000                if (sd[i]->volume_offline) {
2001                        hpsa_show_volume_status(h, sd[i]);
2002                        hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
2003                        continue;
2004                }
2005
2006                device_change = hpsa_scsi_find_entry(sd[i], h->dev,
2007                                        h->ndevices, &entry);
2008                if (device_change == DEVICE_NOT_FOUND) {
2009                        changes++;
2010                        if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
2011                                break;
2012                        sd[i] = NULL; /* prevent from being freed later. */
2013                } else if (device_change == DEVICE_CHANGED) {
2014                        /* should never happen... */
2015                        changes++;
2016                        dev_warn(&h->pdev->dev,
2017                                "device unexpectedly changed.\n");
2018                        /* but if it does happen, we just ignore that device */
2019                }
2020        }
2021        hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2022
2023        /*
2024         * Now that h->dev[]->phys_disk[] is coherent, we can enable
2025         * any logical drives that need it enabled.
2026         *
2027         * The raid map should be current by now.
2028         *
2029         * We are updating the device list used for I/O requests.
2030         */
2031        for (i = 0; i < h->ndevices; i++) {
2032                if (h->dev[i] == NULL)
2033                        continue;
2034                h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2035        }
2036
2037        spin_unlock_irqrestore(&h->devlock, flags);
2038
2039        /* Monitor devices which are in one of several NOT READY states to be
2040         * brought online later. This must be done without holding h->devlock,
2041         * so don't touch h->dev[]
2042         */
2043        for (i = 0; i < nsds; i++) {
2044                if (!sd[i]) /* if already added above. */
2045                        continue;
2046                if (sd[i]->volume_offline)
2047                        hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2048        }
2049
2050        /* Don't notify scsi mid layer of any changes the first time through
2051         * (or if there are no changes) scsi_scan_host will do it later the
2052         * first time through.
2053         */
2054        if (!changes)
2055                goto free_and_out;
2056
2057        /* Notify scsi mid layer of any removed devices */
2058        for (i = 0; i < nremoved; i++) {
2059                if (removed[i] == NULL)
2060                        continue;
2061                if (removed[i]->expose_device)
2062                        hpsa_remove_device(h, removed[i]);
2063                kfree(removed[i]);
2064                removed[i] = NULL;
2065        }
2066
2067        /* Notify scsi mid layer of any added devices */
2068        for (i = 0; i < nadded; i++) {
2069                int rc = 0;
2070
2071                if (added[i] == NULL)
2072                        continue;
2073                if (!(added[i]->expose_device))
2074                        continue;
2075                rc = hpsa_add_device(h, added[i]);
2076                if (!rc)
2077                        continue;
2078                dev_warn(&h->pdev->dev,
2079                        "addition failed %d, device not added.", rc);
2080                /* now we have to remove it from h->dev,
2081                 * since it didn't get added to scsi mid layer
2082                 */
2083                fixup_botched_add(h, added[i]);
2084                h->drv_req_rescan = 1;
2085        }
2086
2087free_and_out:
2088        kfree(added);
2089        kfree(removed);
2090}
2091
2092/*
2093 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2094 * Assume's h->devlock is held.
2095 */
2096static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2097        int bus, int target, int lun)
2098{
2099        int i;
2100        struct hpsa_scsi_dev_t *sd;
2101
2102        for (i = 0; i < h->ndevices; i++) {
2103                sd = h->dev[i];
2104                if (sd->bus == bus && sd->target == target && sd->lun == lun)
2105                        return sd;
2106        }
2107        return NULL;
2108}
2109
2110static int hpsa_slave_alloc(struct scsi_device *sdev)
2111{
2112        struct hpsa_scsi_dev_t *sd = NULL;
2113        unsigned long flags;
2114        struct ctlr_info *h;
2115
2116        h = sdev_to_hba(sdev);
2117        spin_lock_irqsave(&h->devlock, flags);
2118        if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2119                struct scsi_target *starget;
2120                struct sas_rphy *rphy;
2121
2122                starget = scsi_target(sdev);
2123                rphy = target_to_rphy(starget);
2124                sd = hpsa_find_device_by_sas_rphy(h, rphy);
2125                if (sd) {
2126                        sd->target = sdev_id(sdev);
2127                        sd->lun = sdev->lun;
2128                }
2129        }
2130        if (!sd)
2131                sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2132                                        sdev_id(sdev), sdev->lun);
2133
2134        if (sd && sd->expose_device) {
2135                atomic_set(&sd->ioaccel_cmds_out, 0);
2136                sdev->hostdata = sd;
2137        } else
2138                sdev->hostdata = NULL;
2139        spin_unlock_irqrestore(&h->devlock, flags);
2140        return 0;
2141}
2142
2143/* configure scsi device based on internal per-device structure */
2144#define CTLR_TIMEOUT (120 * HZ)
2145static int hpsa_slave_configure(struct scsi_device *sdev)
2146{
2147        struct hpsa_scsi_dev_t *sd;
2148        int queue_depth;
2149
2150        sd = sdev->hostdata;
2151        sdev->no_uld_attach = !sd || !sd->expose_device;
2152
2153        if (sd) {
2154                sd->was_removed = 0;
2155                queue_depth = sd->queue_depth != 0 ?
2156                                sd->queue_depth : sdev->host->can_queue;
2157                if (sd->external) {
2158                        queue_depth = EXTERNAL_QD;
2159                        sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
2160                        blk_queue_rq_timeout(sdev->request_queue,
2161                                                HPSA_EH_PTRAID_TIMEOUT);
2162                }
2163                if (is_hba_lunid(sd->scsi3addr)) {
2164                        sdev->eh_timeout = CTLR_TIMEOUT;
2165                        blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT);
2166                }
2167        } else {
2168                queue_depth = sdev->host->can_queue;
2169        }
2170
2171        scsi_change_queue_depth(sdev, queue_depth);
2172
2173        return 0;
2174}
2175
2176static void hpsa_slave_destroy(struct scsi_device *sdev)
2177{
2178        struct hpsa_scsi_dev_t *hdev = NULL;
2179
2180        hdev = sdev->hostdata;
2181
2182        if (hdev)
2183                hdev->was_removed = 1;
2184}
2185
2186static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2187{
2188        int i;
2189
2190        if (!h->ioaccel2_cmd_sg_list)
2191                return;
2192        for (i = 0; i < h->nr_cmds; i++) {
2193                kfree(h->ioaccel2_cmd_sg_list[i]);
2194                h->ioaccel2_cmd_sg_list[i] = NULL;
2195        }
2196        kfree(h->ioaccel2_cmd_sg_list);
2197        h->ioaccel2_cmd_sg_list = NULL;
2198}
2199
2200static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2201{
2202        int i;
2203
2204        if (h->chainsize <= 0)
2205                return 0;
2206
2207        h->ioaccel2_cmd_sg_list =
2208                kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2209                                        GFP_KERNEL);
2210        if (!h->ioaccel2_cmd_sg_list)
2211                return -ENOMEM;
2212        for (i = 0; i < h->nr_cmds; i++) {
2213                h->ioaccel2_cmd_sg_list[i] =
2214                        kmalloc_array(h->maxsgentries,
2215                                      sizeof(*h->ioaccel2_cmd_sg_list[i]),
2216                                      GFP_KERNEL);
2217                if (!h->ioaccel2_cmd_sg_list[i])
2218                        goto clean;
2219        }
2220        return 0;
2221
2222clean:
2223        hpsa_free_ioaccel2_sg_chain_blocks(h);
2224        return -ENOMEM;
2225}
2226
2227static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2228{
2229        int i;
2230
2231        if (!h->cmd_sg_list)
2232                return;
2233        for (i = 0; i < h->nr_cmds; i++) {
2234                kfree(h->cmd_sg_list[i]);
2235                h->cmd_sg_list[i] = NULL;
2236        }
2237        kfree(h->cmd_sg_list);
2238        h->cmd_sg_list = NULL;
2239}
2240
2241static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2242{
2243        int i;
2244
2245        if (h->chainsize <= 0)
2246                return 0;
2247
2248        h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2249                                 GFP_KERNEL);
2250        if (!h->cmd_sg_list)
2251                return -ENOMEM;
2252
2253        for (i = 0; i < h->nr_cmds; i++) {
2254                h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2255                                                  sizeof(*h->cmd_sg_list[i]),
2256                                                  GFP_KERNEL);
2257                if (!h->cmd_sg_list[i])
2258                        goto clean;
2259
2260        }
2261        return 0;
2262
2263clean:
2264        hpsa_free_sg_chain_blocks(h);
2265        return -ENOMEM;
2266}
2267
2268static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2269        struct io_accel2_cmd *cp, struct CommandList *c)
2270{
2271        struct ioaccel2_sg_element *chain_block;
2272        u64 temp64;
2273        u32 chain_size;
2274
2275        chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2276        chain_size = le32_to_cpu(cp->sg[0].length);
2277        temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2278                                DMA_TO_DEVICE);
2279        if (dma_mapping_error(&h->pdev->dev, temp64)) {
2280                /* prevent subsequent unmapping */
2281                cp->sg->address = 0;
2282                return -1;
2283        }
2284        cp->sg->address = cpu_to_le64(temp64);
2285        return 0;
2286}
2287
2288static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2289        struct io_accel2_cmd *cp)
2290{
2291        struct ioaccel2_sg_element *chain_sg;
2292        u64 temp64;
2293        u32 chain_size;
2294
2295        chain_sg = cp->sg;
2296        temp64 = le64_to_cpu(chain_sg->address);
2297        chain_size = le32_to_cpu(cp->sg[0].length);
2298        dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2299}
2300
2301static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2302        struct CommandList *c)
2303{
2304        struct SGDescriptor *chain_sg, *chain_block;
2305        u64 temp64;
2306        u32 chain_len;
2307
2308        chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2309        chain_block = h->cmd_sg_list[c->cmdindex];
2310        chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2311        chain_len = sizeof(*chain_sg) *
2312                (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2313        chain_sg->Len = cpu_to_le32(chain_len);
2314        temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2315                                DMA_TO_DEVICE);
2316        if (dma_mapping_error(&h->pdev->dev, temp64)) {
2317                /* prevent subsequent unmapping */
2318                chain_sg->Addr = cpu_to_le64(0);
2319                return -1;
2320        }
2321        chain_sg->Addr = cpu_to_le64(temp64);
2322        return 0;
2323}
2324
2325static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2326        struct CommandList *c)
2327{
2328        struct SGDescriptor *chain_sg;
2329
2330        if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2331                return;
2332
2333        chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2334        dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2335                        le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2336}
2337
2338
2339/* Decode the various types of errors on ioaccel2 path.
2340 * Return 1 for any error that should generate a RAID path retry.
2341 * Return 0 for errors that don't require a RAID path retry.
2342 */
2343static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2344                                        struct CommandList *c,
2345                                        struct scsi_cmnd *cmd,
2346                                        struct io_accel2_cmd *c2,
2347                                        struct hpsa_scsi_dev_t *dev)
2348{
2349        int data_len;
2350        int retry = 0;
2351        u32 ioaccel2_resid = 0;
2352
2353        switch (c2->error_data.serv_response) {
2354        case IOACCEL2_SERV_RESPONSE_COMPLETE:
2355                switch (c2->error_data.status) {
2356                case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2357                        if (cmd)
2358                                cmd->result = 0;
2359                        break;
2360                case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2361                        cmd->result |= SAM_STAT_CHECK_CONDITION;
2362                        if (c2->error_data.data_present !=
2363                                        IOACCEL2_SENSE_DATA_PRESENT) {
2364                                memset(cmd->sense_buffer, 0,
2365                                        SCSI_SENSE_BUFFERSIZE);
2366                                break;
2367                        }
2368                        /* copy the sense data */
2369                        data_len = c2->error_data.sense_data_len;
2370                        if (data_len > SCSI_SENSE_BUFFERSIZE)
2371                                data_len = SCSI_SENSE_BUFFERSIZE;
2372                        if (data_len > sizeof(c2->error_data.sense_data_buff))
2373                                data_len =
2374                                        sizeof(c2->error_data.sense_data_buff);
2375                        memcpy(cmd->sense_buffer,
2376                                c2->error_data.sense_data_buff, data_len);
2377                        retry = 1;
2378                        break;
2379                case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2380                        retry = 1;
2381                        break;
2382                case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2383                        retry = 1;
2384                        break;
2385                case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2386                        retry = 1;
2387                        break;
2388                case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2389                        retry = 1;
2390                        break;
2391                default:
2392                        retry = 1;
2393                        break;
2394                }
2395                break;
2396        case IOACCEL2_SERV_RESPONSE_FAILURE:
2397                switch (c2->error_data.status) {
2398                case IOACCEL2_STATUS_SR_IO_ERROR:
2399                case IOACCEL2_STATUS_SR_IO_ABORTED:
2400                case IOACCEL2_STATUS_SR_OVERRUN:
2401                        retry = 1;
2402                        break;
2403                case IOACCEL2_STATUS_SR_UNDERRUN:
2404                        cmd->result = (DID_OK << 16);           /* host byte */
2405                        ioaccel2_resid = get_unaligned_le32(
2406                                                &c2->error_data.resid_cnt[0]);
2407                        scsi_set_resid(cmd, ioaccel2_resid);
2408                        break;
2409                case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2410                case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2411                case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2412                        /*
2413                         * Did an HBA disk disappear? We will eventually
2414                         * get a state change event from the controller but
2415                         * in the meantime, we need to tell the OS that the
2416                         * HBA disk is no longer there and stop I/O
2417                         * from going down. This allows the potential re-insert
2418                         * of the disk to get the same device node.
2419                         */
2420                        if (dev->physical_device && dev->expose_device) {
2421                                cmd->result = DID_NO_CONNECT << 16;
2422                                dev->removed = 1;
2423                                h->drv_req_rescan = 1;
2424                                dev_warn(&h->pdev->dev,
2425                                        "%s: device is gone!\n", __func__);
2426                        } else
2427                                /*
2428                                 * Retry by sending down the RAID path.
2429                                 * We will get an event from ctlr to
2430                                 * trigger rescan regardless.
2431                                 */
2432                                retry = 1;
2433                        break;
2434                default:
2435                        retry = 1;
2436                }
2437                break;
2438        case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2439                break;
2440        case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2441                break;
2442        case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2443                retry = 1;
2444                break;
2445        case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2446                break;
2447        default:
2448                retry = 1;
2449                break;
2450        }
2451
2452        if (dev->in_reset)
2453                retry = 0;
2454
2455        return retry;   /* retry on raid path? */
2456}
2457
2458static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2459                struct CommandList *c)
2460{
2461        struct hpsa_scsi_dev_t *dev = c->device;
2462
2463        /*
2464         * Reset c->scsi_cmd here so that the reset handler will know
2465         * this command has completed.  Then, check to see if the handler is
2466         * waiting for this command, and, if so, wake it.
2467         */
2468        c->scsi_cmd = SCSI_CMD_IDLE;
2469        mb();   /* Declare command idle before checking for pending events. */
2470        if (dev) {
2471                atomic_dec(&dev->commands_outstanding);
2472                if (dev->in_reset &&
2473                        atomic_read(&dev->commands_outstanding) <= 0)
2474                        wake_up_all(&h->event_sync_wait_queue);
2475        }
2476}
2477
2478static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2479                                      struct CommandList *c)
2480{
2481        hpsa_cmd_resolve_events(h, c);
2482        cmd_tagged_free(h, c);
2483}
2484
2485static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2486                struct CommandList *c, struct scsi_cmnd *cmd)
2487{
2488        hpsa_cmd_resolve_and_free(h, c);
2489        if (cmd)
2490                scsi_done(cmd);
2491}
2492
2493static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2494{
2495        INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2496        queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2497}
2498
2499static void process_ioaccel2_completion(struct ctlr_info *h,
2500                struct CommandList *c, struct scsi_cmnd *cmd,
2501                struct hpsa_scsi_dev_t *dev)
2502{
2503        struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2504
2505        /* check for good status */
2506        if (likely(c2->error_data.serv_response == 0 &&
2507                        c2->error_data.status == 0)) {
2508                cmd->result = 0;
2509                return hpsa_cmd_free_and_done(h, c, cmd);
2510        }
2511
2512        /*
2513         * Any RAID offload error results in retry which will use
2514         * the normal I/O path so the controller can handle whatever is
2515         * wrong.
2516         */
2517        if (is_logical_device(dev) &&
2518                c2->error_data.serv_response ==
2519                        IOACCEL2_SERV_RESPONSE_FAILURE) {
2520                if (c2->error_data.status ==
2521                        IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2522                        hpsa_turn_off_ioaccel_for_device(dev);
2523                }
2524
2525                if (dev->in_reset) {
2526                        cmd->result = DID_RESET << 16;
2527                        return hpsa_cmd_free_and_done(h, c, cmd);
2528                }
2529
2530                return hpsa_retry_cmd(h, c);
2531        }
2532
2533        if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2534                return hpsa_retry_cmd(h, c);
2535
2536        return hpsa_cmd_free_and_done(h, c, cmd);
2537}
2538
2539/* Returns 0 on success, < 0 otherwise. */
2540static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2541                                        struct CommandList *cp)
2542{
2543        u8 tmf_status = cp->err_info->ScsiStatus;
2544
2545        switch (tmf_status) {
2546        case CISS_TMF_COMPLETE:
2547                /*
2548                 * CISS_TMF_COMPLETE never happens, instead,
2549                 * ei->CommandStatus == 0 for this case.
2550                 */
2551        case CISS_TMF_SUCCESS:
2552                return 0;
2553        case CISS_TMF_INVALID_FRAME:
2554        case CISS_TMF_NOT_SUPPORTED:
2555        case CISS_TMF_FAILED:
2556        case CISS_TMF_WRONG_LUN:
2557        case CISS_TMF_OVERLAPPED_TAG:
2558                break;
2559        default:
2560                dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2561                                tmf_status);
2562                break;
2563        }
2564        return -tmf_status;
2565}
2566
2567static void complete_scsi_command(struct CommandList *cp)
2568{
2569        struct scsi_cmnd *cmd;
2570        struct ctlr_info *h;
2571        struct ErrorInfo *ei;
2572        struct hpsa_scsi_dev_t *dev;
2573        struct io_accel2_cmd *c2;
2574
2575        u8 sense_key;
2576        u8 asc;      /* additional sense code */
2577        u8 ascq;     /* additional sense code qualifier */
2578        unsigned long sense_data_size;
2579
2580        ei = cp->err_info;
2581        cmd = cp->scsi_cmd;
2582        h = cp->h;
2583
2584        if (!cmd->device) {
2585                cmd->result = DID_NO_CONNECT << 16;
2586                return hpsa_cmd_free_and_done(h, cp, cmd);
2587        }
2588
2589        dev = cmd->device->hostdata;
2590        if (!dev) {
2591                cmd->result = DID_NO_CONNECT << 16;
2592                return hpsa_cmd_free_and_done(h, cp, cmd);
2593        }
2594        c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2595
2596        scsi_dma_unmap(cmd); /* undo the DMA mappings */
2597        if ((cp->cmd_type == CMD_SCSI) &&
2598                (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2599                hpsa_unmap_sg_chain_block(h, cp);
2600
2601        if ((cp->cmd_type == CMD_IOACCEL2) &&
2602                (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2603                hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2604
2605        cmd->result = (DID_OK << 16);           /* host byte */
2606
2607        /* SCSI command has already been cleaned up in SML */
2608        if (dev->was_removed) {
2609                hpsa_cmd_resolve_and_free(h, cp);
2610                return;
2611        }
2612
2613        if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2614                if (dev->physical_device && dev->expose_device &&
2615                        dev->removed) {
2616                        cmd->result = DID_NO_CONNECT << 16;
2617                        return hpsa_cmd_free_and_done(h, cp, cmd);
2618                }
2619                if (likely(cp->phys_disk != NULL))
2620                        atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2621        }
2622
2623        /*
2624         * We check for lockup status here as it may be set for
2625         * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2626         * fail_all_oustanding_cmds()
2627         */
2628        if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2629                /* DID_NO_CONNECT will prevent a retry */
2630                cmd->result = DID_NO_CONNECT << 16;
2631                return hpsa_cmd_free_and_done(h, cp, cmd);
2632        }
2633
2634        if (cp->cmd_type == CMD_IOACCEL2)
2635                return process_ioaccel2_completion(h, cp, cmd, dev);
2636
2637        scsi_set_resid(cmd, ei->ResidualCnt);
2638        if (ei->CommandStatus == 0)
2639                return hpsa_cmd_free_and_done(h, cp, cmd);
2640
2641        /* For I/O accelerator commands, copy over some fields to the normal
2642         * CISS header used below for error handling.
2643         */
2644        if (cp->cmd_type == CMD_IOACCEL1) {
2645                struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2646                cp->Header.SGList = scsi_sg_count(cmd);
2647                cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2648                cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2649                        IOACCEL1_IOFLAGS_CDBLEN_MASK;
2650                cp->Header.tag = c->tag;
2651                memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2652                memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2653
2654                /* Any RAID offload error results in retry which will use
2655                 * the normal I/O path so the controller can handle whatever's
2656                 * wrong.
2657                 */
2658                if (is_logical_device(dev)) {
2659                        if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2660                                dev->offload_enabled = 0;
2661                        return hpsa_retry_cmd(h, cp);
2662                }
2663        }
2664
2665        /* an error has occurred */
2666        switch (ei->CommandStatus) {
2667
2668        case CMD_TARGET_STATUS:
2669                cmd->result |= ei->ScsiStatus;
2670                /* copy the sense data */
2671                if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2672                        sense_data_size = SCSI_SENSE_BUFFERSIZE;
2673                else
2674                        sense_data_size = sizeof(ei->SenseInfo);
2675                if (ei->SenseLen < sense_data_size)
2676                        sense_data_size = ei->SenseLen;
2677                memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2678                if (ei->ScsiStatus)
2679                        decode_sense_data(ei->SenseInfo, sense_data_size,
2680                                &sense_key, &asc, &ascq);
2681                if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2682                        switch (sense_key) {
2683                        case ABORTED_COMMAND:
2684                                cmd->result |= DID_SOFT_ERROR << 16;
2685                                break;
2686                        case UNIT_ATTENTION:
2687                                if (asc == 0x3F && ascq == 0x0E)
2688                                        h->drv_req_rescan = 1;
2689                                break;
2690                        case ILLEGAL_REQUEST:
2691                                if (asc == 0x25 && ascq == 0x00) {
2692                                        dev->removed = 1;
2693                                        cmd->result = DID_NO_CONNECT << 16;
2694                                }
2695                                break;
2696                        }
2697                        break;
2698                }
2699                /* Problem was not a check condition
2700                 * Pass it up to the upper layers...
2701                 */
2702                if (ei->ScsiStatus) {
2703                        dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2704                                "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2705                                "Returning result: 0x%x\n",
2706                                cp, ei->ScsiStatus,
2707                                sense_key, asc, ascq,
2708                                cmd->result);
2709                } else {  /* scsi status is zero??? How??? */
2710                        dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2711                                "Returning no connection.\n", cp),
2712
2713                        /* Ordinarily, this case should never happen,
2714                         * but there is a bug in some released firmware
2715                         * revisions that allows it to happen if, for
2716                         * example, a 4100 backplane loses power and
2717                         * the tape drive is in it.  We assume that
2718                         * it's a fatal error of some kind because we
2719                         * can't show that it wasn't. We will make it
2720                         * look like selection timeout since that is
2721                         * the most common reason for this to occur,
2722                         * and it's severe enough.
2723                         */
2724
2725                        cmd->result = DID_NO_CONNECT << 16;
2726                }
2727                break;
2728
2729        case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2730                break;
2731        case CMD_DATA_OVERRUN:
2732                dev_warn(&h->pdev->dev,
2733                        "CDB %16phN data overrun\n", cp->Request.CDB);
2734                break;
2735        case CMD_INVALID: {
2736                /* print_bytes(cp, sizeof(*cp), 1, 0);
2737                print_cmd(cp); */
2738                /* We get CMD_INVALID if you address a non-existent device
2739                 * instead of a selection timeout (no response).  You will
2740                 * see this if you yank out a drive, then try to access it.
2741                 * This is kind of a shame because it means that any other
2742                 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2743                 * missing target. */
2744                cmd->result = DID_NO_CONNECT << 16;
2745        }
2746                break;
2747        case CMD_PROTOCOL_ERR:
2748                cmd->result = DID_ERROR << 16;
2749                dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2750                                cp->Request.CDB);
2751                break;
2752        case CMD_HARDWARE_ERR:
2753                cmd->result = DID_ERROR << 16;
2754                dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2755                        cp->Request.CDB);
2756                break;
2757        case CMD_CONNECTION_LOST:
2758                cmd->result = DID_ERROR << 16;
2759                dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2760                        cp->Request.CDB);
2761                break;
2762        case CMD_ABORTED:
2763                cmd->result = DID_ABORT << 16;
2764                break;
2765        case CMD_ABORT_FAILED:
2766                cmd->result = DID_ERROR << 16;
2767                dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2768                        cp->Request.CDB);
2769                break;
2770        case CMD_UNSOLICITED_ABORT:
2771                cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2772                dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2773                        cp->Request.CDB);
2774                break;
2775        case CMD_TIMEOUT:
2776                cmd->result = DID_TIME_OUT << 16;
2777                dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2778                        cp->Request.CDB);
2779                break;
2780        case CMD_UNABORTABLE:
2781                cmd->result = DID_ERROR << 16;
2782                dev_warn(&h->pdev->dev, "Command unabortable\n");
2783                break;
2784        case CMD_TMF_STATUS:
2785                if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2786                        cmd->result = DID_ERROR << 16;
2787                break;
2788        case CMD_IOACCEL_DISABLED:
2789                /* This only handles the direct pass-through case since RAID
2790                 * offload is handled above.  Just attempt a retry.
2791                 */
2792                cmd->result = DID_SOFT_ERROR << 16;
2793                dev_warn(&h->pdev->dev,
2794                                "cp %p had HP SSD Smart Path error\n", cp);
2795                break;
2796        default:
2797                cmd->result = DID_ERROR << 16;
2798                dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2799                                cp, ei->CommandStatus);
2800        }
2801
2802        return hpsa_cmd_free_and_done(h, cp, cmd);
2803}
2804
2805static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2806                int sg_used, enum dma_data_direction data_direction)
2807{
2808        int i;
2809
2810        for (i = 0; i < sg_used; i++)
2811                dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2812                                le32_to_cpu(c->SG[i].Len),
2813                                data_direction);
2814}
2815
2816static int hpsa_map_one(struct pci_dev *pdev,
2817                struct CommandList *cp,
2818                unsigned char *buf,
2819                size_t buflen,
2820                enum dma_data_direction data_direction)
2821{
2822        u64 addr64;
2823
2824        if (buflen == 0 || data_direction == DMA_NONE) {
2825                cp->Header.SGList = 0;
2826                cp->Header.SGTotal = cpu_to_le16(0);
2827                return 0;
2828        }
2829
2830        addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2831        if (dma_mapping_error(&pdev->dev, addr64)) {
2832                /* Prevent subsequent unmap of something never mapped */
2833                cp->Header.SGList = 0;
2834                cp->Header.SGTotal = cpu_to_le16(0);
2835                return -1;
2836        }
2837        cp->SG[0].Addr = cpu_to_le64(addr64);
2838        cp->SG[0].Len = cpu_to_le32(buflen);
2839        cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2840        cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
2841        cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2842        return 0;
2843}
2844
2845#define NO_TIMEOUT ((unsigned long) -1)
2846#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2847static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2848        struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2849{
2850        DECLARE_COMPLETION_ONSTACK(wait);
2851
2852        c->waiting = &wait;
2853        __enqueue_cmd_and_start_io(h, c, reply_queue);
2854        if (timeout_msecs == NO_TIMEOUT) {
2855                /* TODO: get rid of this no-timeout thing */
2856                wait_for_completion_io(&wait);
2857                return IO_OK;
2858        }
2859        if (!wait_for_completion_io_timeout(&wait,
2860                                        msecs_to_jiffies(timeout_msecs))) {
2861                dev_warn(&h->pdev->dev, "Command timed out.\n");
2862                return -ETIMEDOUT;
2863        }
2864        return IO_OK;
2865}
2866
2867static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2868                                   int reply_queue, unsigned long timeout_msecs)
2869{
2870        if (unlikely(lockup_detected(h))) {
2871                c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2872                return IO_OK;
2873        }
2874        return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2875}
2876
2877static u32 lockup_detected(struct ctlr_info *h)
2878{
2879        int cpu;
2880        u32 rc, *lockup_detected;
2881
2882        cpu = get_cpu();
2883        lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2884        rc = *lockup_detected;
2885        put_cpu();
2886        return rc;
2887}
2888
2889#define MAX_DRIVER_CMD_RETRIES 25
2890static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2891                struct CommandList *c, enum dma_data_direction data_direction,
2892                unsigned long timeout_msecs)
2893{
2894        int backoff_time = 10, retry_count = 0;
2895        int rc;
2896
2897        do {
2898                memset(c->err_info, 0, sizeof(*c->err_info));
2899                rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2900                                                  timeout_msecs);
2901                if (rc)
2902                        break;
2903                retry_count++;
2904                if (retry_count > 3) {
2905                        msleep(backoff_time);
2906                        if (backoff_time < 1000)
2907                                backoff_time *= 2;
2908                }
2909        } while ((check_for_unit_attention(h, c) ||
2910                        check_for_busy(h, c)) &&
2911                        retry_count <= MAX_DRIVER_CMD_RETRIES);
2912        hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2913        if (retry_count > MAX_DRIVER_CMD_RETRIES)
2914                rc = -EIO;
2915        return rc;
2916}
2917
2918static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2919                                struct CommandList *c)
2920{
2921        const u8 *cdb = c->Request.CDB;
2922        const u8 *lun = c->Header.LUN.LunAddrBytes;
2923
2924        dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2925                 txt, lun, cdb);
2926}
2927
2928static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2929                        struct CommandList *cp)
2930{
2931        const struct ErrorInfo *ei = cp->err_info;
2932        struct device *d = &cp->h->pdev->dev;
2933        u8 sense_key, asc, ascq;
2934        int sense_len;
2935
2936        switch (ei->CommandStatus) {
2937        case CMD_TARGET_STATUS:
2938                if (ei->SenseLen > sizeof(ei->SenseInfo))
2939                        sense_len = sizeof(ei->SenseInfo);
2940                else
2941                        sense_len = ei->SenseLen;
2942                decode_sense_data(ei->SenseInfo, sense_len,
2943                                        &sense_key, &asc, &ascq);
2944                hpsa_print_cmd(h, "SCSI status", cp);
2945                if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2946                        dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2947                                sense_key, asc, ascq);
2948                else
2949                        dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2950                if (ei->ScsiStatus == 0)
2951                        dev_warn(d, "SCSI status is abnormally zero.  "
2952                        "(probably indicates selection timeout "
2953                        "reported incorrectly due to a known "
2954                        "firmware bug, circa July, 2001.)\n");
2955                break;
2956        case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2957                break;
2958        case CMD_DATA_OVERRUN:
2959                hpsa_print_cmd(h, "overrun condition", cp);
2960                break;
2961        case CMD_INVALID: {
2962                /* controller unfortunately reports SCSI passthru's
2963                 * to non-existent targets as invalid commands.
2964                 */
2965                hpsa_print_cmd(h, "invalid command", cp);
2966                dev_warn(d, "probably means device no longer present\n");
2967                }
2968                break;
2969        case CMD_PROTOCOL_ERR:
2970                hpsa_print_cmd(h, "protocol error", cp);
2971                break;
2972        case CMD_HARDWARE_ERR:
2973                hpsa_print_cmd(h, "hardware error", cp);
2974                break;
2975        case CMD_CONNECTION_LOST:
2976                hpsa_print_cmd(h, "connection lost", cp);
2977                break;
2978        case CMD_ABORTED:
2979                hpsa_print_cmd(h, "aborted", cp);
2980                break;
2981        case CMD_ABORT_FAILED:
2982                hpsa_print_cmd(h, "abort failed", cp);
2983                break;
2984        case CMD_UNSOLICITED_ABORT:
2985                hpsa_print_cmd(h, "unsolicited abort", cp);
2986                break;
2987        case CMD_TIMEOUT:
2988                hpsa_print_cmd(h, "timed out", cp);
2989                break;
2990        case CMD_UNABORTABLE:
2991                hpsa_print_cmd(h, "unabortable", cp);
2992                break;
2993        case CMD_CTLR_LOCKUP:
2994                hpsa_print_cmd(h, "controller lockup detected", cp);
2995                break;
2996        default:
2997                hpsa_print_cmd(h, "unknown status", cp);
2998                dev_warn(d, "Unknown command status %x\n",
2999                                ei->CommandStatus);
3000        }
3001}
3002
3003static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
3004                                        u8 page, u8 *buf, size_t bufsize)
3005{
3006        int rc = IO_OK;
3007        struct CommandList *c;
3008        struct ErrorInfo *ei;
3009
3010        c = cmd_alloc(h);
3011        if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
3012                        page, scsi3addr, TYPE_CMD)) {
3013                rc = -1;
3014                goto out;
3015        }
3016        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3017                        NO_TIMEOUT);
3018        if (rc)
3019                goto out;
3020        ei = c->err_info;
3021        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3022                hpsa_scsi_interpret_error(h, c);
3023                rc = -1;
3024        }
3025out:
3026        cmd_free(h, c);
3027        return rc;
3028}
3029
3030static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3031                                                u8 *scsi3addr)
3032{
3033        u8 *buf;
3034        u64 sa = 0;
3035        int rc = 0;
3036
3037        buf = kzalloc(1024, GFP_KERNEL);
3038        if (!buf)
3039                return 0;
3040
3041        rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3042                                        buf, 1024);
3043
3044        if (rc)
3045                goto out;
3046
3047        sa = get_unaligned_be64(buf+12);
3048
3049out:
3050        kfree(buf);
3051        return sa;
3052}
3053
3054static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3055                        u16 page, unsigned char *buf,
3056                        unsigned char bufsize)
3057{
3058        int rc = IO_OK;
3059        struct CommandList *c;
3060        struct ErrorInfo *ei;
3061
3062        c = cmd_alloc(h);
3063
3064        if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3065                        page, scsi3addr, TYPE_CMD)) {
3066                rc = -1;
3067                goto out;
3068        }
3069        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3070                        NO_TIMEOUT);
3071        if (rc)
3072                goto out;
3073        ei = c->err_info;
3074        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3075                hpsa_scsi_interpret_error(h, c);
3076                rc = -1;
3077        }
3078out:
3079        cmd_free(h, c);
3080        return rc;
3081}
3082
3083static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3084        u8 reset_type, int reply_queue)
3085{
3086        int rc = IO_OK;
3087        struct CommandList *c;
3088        struct ErrorInfo *ei;
3089
3090        c = cmd_alloc(h);
3091        c->device = dev;
3092
3093        /* fill_cmd can't fail here, no data buffer to map. */
3094        (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
3095        rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3096        if (rc) {
3097                dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3098                goto out;
3099        }
3100        /* no unmap needed here because no data xfer. */
3101
3102        ei = c->err_info;
3103        if (ei->CommandStatus != 0) {
3104                hpsa_scsi_interpret_error(h, c);
3105                rc = -1;
3106        }
3107out:
3108        cmd_free(h, c);
3109        return rc;
3110}
3111
3112static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3113                               struct hpsa_scsi_dev_t *dev,
3114                               unsigned char *scsi3addr)
3115{
3116        int i;
3117        bool match = false;
3118        struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3119        struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3120
3121        if (hpsa_is_cmd_idle(c))
3122                return false;
3123
3124        switch (c->cmd_type) {
3125        case CMD_SCSI:
3126        case CMD_IOCTL_PEND:
3127                match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3128                                sizeof(c->Header.LUN.LunAddrBytes));
3129                break;
3130
3131        case CMD_IOACCEL1:
3132        case CMD_IOACCEL2:
3133                if (c->phys_disk == dev) {
3134                        /* HBA mode match */
3135                        match = true;
3136                } else {
3137                        /* Possible RAID mode -- check each phys dev. */
3138                        /* FIXME:  Do we need to take out a lock here?  If
3139                         * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3140                         * instead. */
3141                        for (i = 0; i < dev->nphysical_disks && !match; i++) {
3142                                /* FIXME: an alternate test might be
3143                                 *
3144                                 * match = dev->phys_disk[i]->ioaccel_handle
3145                                 *              == c2->scsi_nexus;      */
3146                                match = dev->phys_disk[i] == c->phys_disk;
3147                        }
3148                }
3149                break;
3150
3151        case IOACCEL2_TMF:
3152                for (i = 0; i < dev->nphysical_disks && !match; i++) {
3153                        match = dev->phys_disk[i]->ioaccel_handle ==
3154                                        le32_to_cpu(ac->it_nexus);
3155                }
3156                break;
3157
3158        case 0:         /* The command is in the middle of being initialized. */
3159                match = false;
3160                break;
3161
3162        default:
3163                dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3164                        c->cmd_type);
3165                BUG();
3166        }
3167
3168        return match;
3169}
3170
3171static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3172        u8 reset_type, int reply_queue)
3173{
3174        int rc = 0;
3175
3176        /* We can really only handle one reset at a time */
3177        if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3178                dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3179                return -EINTR;
3180        }
3181
3182        rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
3183        if (!rc) {
3184                /* incremented by sending the reset request */
3185                atomic_dec(&dev->commands_outstanding);
3186                wait_event(h->event_sync_wait_queue,
3187                        atomic_read(&dev->commands_outstanding) <= 0 ||
3188                        lockup_detected(h));
3189        }
3190
3191        if (unlikely(lockup_detected(h))) {
3192                dev_warn(&h->pdev->dev,
3193                         "Controller lockup detected during reset wait\n");
3194                rc = -ENODEV;
3195        }
3196
3197        if (!rc)
3198                rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
3199
3200        mutex_unlock(&h->reset_mutex);
3201        return rc;
3202}
3203
3204static void hpsa_get_raid_level(struct ctlr_info *h,
3205        unsigned char *scsi3addr, unsigned char *raid_level)
3206{
3207        int rc;
3208        unsigned char *buf;
3209
3210        *raid_level = RAID_UNKNOWN;
3211        buf = kzalloc(64, GFP_KERNEL);
3212        if (!buf)
3213                return;
3214
3215        if (!hpsa_vpd_page_supported(h, scsi3addr,
3216                HPSA_VPD_LV_DEVICE_GEOMETRY))
3217                goto exit;
3218
3219        rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3220                HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3221
3222        if (rc == 0)
3223                *raid_level = buf[8];
3224        if (*raid_level > RAID_UNKNOWN)
3225                *raid_level = RAID_UNKNOWN;
3226exit:
3227        kfree(buf);
3228        return;
3229}
3230
3231#define HPSA_MAP_DEBUG
3232#ifdef HPSA_MAP_DEBUG
3233static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3234                                struct raid_map_data *map_buff)
3235{
3236        struct raid_map_disk_data *dd = &map_buff->data[0];
3237        int map, row, col;
3238        u16 map_cnt, row_cnt, disks_per_row;
3239
3240        if (rc != 0)
3241                return;
3242
3243        /* Show details only if debugging has been activated. */
3244        if (h->raid_offload_debug < 2)
3245                return;
3246
3247        dev_info(&h->pdev->dev, "structure_size = %u\n",
3248                                le32_to_cpu(map_buff->structure_size));
3249        dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3250                        le32_to_cpu(map_buff->volume_blk_size));
3251        dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3252                        le64_to_cpu(map_buff->volume_blk_cnt));
3253        dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3254                        map_buff->phys_blk_shift);
3255        dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3256                        map_buff->parity_rotation_shift);
3257        dev_info(&h->pdev->dev, "strip_size = %u\n",
3258                        le16_to_cpu(map_buff->strip_size));
3259        dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3260                        le64_to_cpu(map_buff->disk_starting_blk));
3261        dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3262                        le64_to_cpu(map_buff->disk_blk_cnt));
3263        dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3264                        le16_to_cpu(map_buff->data_disks_per_row));
3265        dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3266                        le16_to_cpu(map_buff->metadata_disks_per_row));
3267        dev_info(&h->pdev->dev, "row_cnt = %u\n",
3268                        le16_to_cpu(map_buff->row_cnt));
3269        dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3270                        le16_to_cpu(map_buff->layout_map_count));
3271        dev_info(&h->pdev->dev, "flags = 0x%x\n",
3272                        le16_to_cpu(map_buff->flags));
3273        dev_info(&h->pdev->dev, "encryption = %s\n",
3274                        le16_to_cpu(map_buff->flags) &
3275                        RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
3276        dev_info(&h->pdev->dev, "dekindex = %u\n",
3277                        le16_to_cpu(map_buff->dekindex));
3278        map_cnt = le16_to_cpu(map_buff->layout_map_count);
3279        for (map = 0; map < map_cnt; map++) {
3280                dev_info(&h->pdev->dev, "Map%u:\n", map);
3281                row_cnt = le16_to_cpu(map_buff->row_cnt);
3282                for (row = 0; row < row_cnt; row++) {
3283                        dev_info(&h->pdev->dev, "  Row%u:\n", row);
3284                        disks_per_row =
3285                                le16_to_cpu(map_buff->data_disks_per_row);
3286                        for (col = 0; col < disks_per_row; col++, dd++)
3287                                dev_info(&h->pdev->dev,
3288                                        "    D%02u: h=0x%04x xor=%u,%u\n",
3289                                        col, dd->ioaccel_handle,
3290                                        dd->xor_mult[0], dd->xor_mult[1]);
3291                        disks_per_row =
3292                                le16_to_cpu(map_buff->metadata_disks_per_row);
3293                        for (col = 0; col < disks_per_row; col++, dd++)
3294                                dev_info(&h->pdev->dev,
3295                                        "    M%02u: h=0x%04x xor=%u,%u\n",
3296                                        col, dd->ioaccel_handle,
3297                                        dd->xor_mult[0], dd->xor_mult[1]);
3298                }
3299        }
3300}
3301#else
3302static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3303                        __attribute__((unused)) int rc,
3304                        __attribute__((unused)) struct raid_map_data *map_buff)
3305{
3306}
3307#endif
3308
3309static int hpsa_get_raid_map(struct ctlr_info *h,
3310        unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3311{
3312        int rc = 0;
3313        struct CommandList *c;
3314        struct ErrorInfo *ei;
3315
3316        c = cmd_alloc(h);
3317
3318        if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3319                        sizeof(this_device->raid_map), 0,
3320                        scsi3addr, TYPE_CMD)) {
3321                dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3322                cmd_free(h, c);
3323                return -1;
3324        }
3325        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3326                        NO_TIMEOUT);
3327        if (rc)
3328                goto out;
3329        ei = c->err_info;
3330        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3331                hpsa_scsi_interpret_error(h, c);
3332                rc = -1;
3333                goto out;
3334        }
3335        cmd_free(h, c);
3336
3337        /* @todo in the future, dynamically allocate RAID map memory */
3338        if (le32_to_cpu(this_device->raid_map.structure_size) >
3339                                sizeof(this_device->raid_map)) {
3340                dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3341                rc = -1;
3342        }
3343        hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3344        return rc;
3345out:
3346        cmd_free(h, c);
3347        return rc;
3348}
3349
3350static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3351                unsigned char scsi3addr[], u16 bmic_device_index,
3352                struct bmic_sense_subsystem_info *buf, size_t bufsize)
3353{
3354        int rc = IO_OK;
3355        struct CommandList *c;
3356        struct ErrorInfo *ei;
3357
3358        c = cmd_alloc(h);
3359
3360        rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3361                0, RAID_CTLR_LUNID, TYPE_CMD);
3362        if (rc)
3363                goto out;
3364
3365        c->Request.CDB[2] = bmic_device_index & 0xff;
3366        c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3367
3368        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3369                        NO_TIMEOUT);
3370        if (rc)
3371                goto out;
3372        ei = c->err_info;
3373        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3374                hpsa_scsi_interpret_error(h, c);
3375                rc = -1;
3376        }
3377out:
3378        cmd_free(h, c);
3379        return rc;
3380}
3381
3382static int hpsa_bmic_id_controller(struct ctlr_info *h,
3383        struct bmic_identify_controller *buf, size_t bufsize)
3384{
3385        int rc = IO_OK;
3386        struct CommandList *c;
3387        struct ErrorInfo *ei;
3388
3389        c = cmd_alloc(h);
3390
3391        rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3392                0, RAID_CTLR_LUNID, TYPE_CMD);
3393        if (rc)
3394                goto out;
3395
3396        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3397                        NO_TIMEOUT);
3398        if (rc)
3399                goto out;
3400        ei = c->err_info;
3401        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3402                hpsa_scsi_interpret_error(h, c);
3403                rc = -1;
3404        }
3405out:
3406        cmd_free(h, c);
3407        return rc;
3408}
3409
3410static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3411                unsigned char scsi3addr[], u16 bmic_device_index,
3412                struct bmic_identify_physical_device *buf, size_t bufsize)
3413{
3414        int rc = IO_OK;
3415        struct CommandList *c;
3416        struct ErrorInfo *ei;
3417
3418        c = cmd_alloc(h);
3419        rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3420                0, RAID_CTLR_LUNID, TYPE_CMD);
3421        if (rc)
3422                goto out;
3423
3424        c->Request.CDB[2] = bmic_device_index & 0xff;
3425        c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3426
3427        hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3428                                                NO_TIMEOUT);
3429        ei = c->err_info;
3430        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3431                hpsa_scsi_interpret_error(h, c);
3432                rc = -1;
3433        }
3434out:
3435        cmd_free(h, c);
3436
3437        return rc;
3438}
3439
3440/*
3441 * get enclosure information
3442 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3443 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3444 * Uses id_physical_device to determine the box_index.
3445 */
3446static void hpsa_get_enclosure_info(struct ctlr_info *h,
3447                        unsigned char *scsi3addr,
3448                        struct ReportExtendedLUNdata *rlep, int rle_index,
3449                        struct hpsa_scsi_dev_t *encl_dev)
3450{
3451        int rc = -1;
3452        struct CommandList *c = NULL;
3453        struct ErrorInfo *ei = NULL;
3454        struct bmic_sense_storage_box_params *bssbp = NULL;
3455        struct bmic_identify_physical_device *id_phys = NULL;
3456        struct ext_report_lun_entry *rle;
3457        u16 bmic_device_index = 0;
3458
3459        if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
3460                return;
3461
3462        rle = &rlep->LUN[rle_index];
3463
3464        encl_dev->eli =
3465                hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3466
3467        bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3468
3469        if (encl_dev->target == -1 || encl_dev->lun == -1) {
3470                rc = IO_OK;
3471                goto out;
3472        }
3473
3474        if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3475                rc = IO_OK;
3476                goto out;
3477        }
3478
3479        bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3480        if (!bssbp)
3481                goto out;
3482
3483        id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3484        if (!id_phys)
3485                goto out;
3486
3487        rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3488                                                id_phys, sizeof(*id_phys));
3489        if (rc) {
3490                dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3491                        __func__, encl_dev->external, bmic_device_index);
3492                goto out;
3493        }
3494
3495        c = cmd_alloc(h);
3496
3497        rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3498                        sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3499
3500        if (rc)
3501                goto out;
3502
3503        if (id_phys->phys_connector[1] == 'E')
3504                c->Request.CDB[5] = id_phys->box_index;
3505        else
3506                c->Request.CDB[5] = 0;
3507
3508        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3509                                                NO_TIMEOUT);
3510        if (rc)
3511                goto out;
3512
3513        ei = c->err_info;
3514        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3515                rc = -1;
3516                goto out;
3517        }
3518
3519        encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3520        memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3521                bssbp->phys_connector, sizeof(bssbp->phys_connector));
3522
3523        rc = IO_OK;
3524out:
3525        kfree(bssbp);
3526        kfree(id_phys);
3527
3528        if (c)
3529                cmd_free(h, c);
3530
3531        if (rc != IO_OK)
3532                hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3533                        "Error, could not get enclosure information");
3534}
3535
3536static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3537                                                unsigned char *scsi3addr)
3538{
3539        struct ReportExtendedLUNdata *physdev;
3540        u32 nphysicals;
3541        u64 sa = 0;
3542        int i;
3543
3544        physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3545        if (!physdev)
3546                return 0;
3547
3548        if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3549                dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3550                kfree(physdev);
3551                return 0;
3552        }
3553        nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3554
3555        for (i = 0; i < nphysicals; i++)
3556                if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3557                        sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3558                        break;
3559                }
3560
3561        kfree(physdev);
3562
3563        return sa;
3564}
3565
3566static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3567                                        struct hpsa_scsi_dev_t *dev)
3568{
3569        int rc;
3570        u64 sa = 0;
3571
3572        if (is_hba_lunid(scsi3addr)) {
3573                struct bmic_sense_subsystem_info *ssi;
3574
3575                ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3576                if (!ssi)
3577                        return;
3578
3579                rc = hpsa_bmic_sense_subsystem_information(h,
3580                                        scsi3addr, 0, ssi, sizeof(*ssi));
3581                if (rc == 0) {
3582                        sa = get_unaligned_be64(ssi->primary_world_wide_id);
3583                        h->sas_address = sa;
3584                }
3585
3586                kfree(ssi);
3587        } else
3588                sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3589
3590        dev->sas_address = sa;
3591}
3592
3593static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3594        struct ReportExtendedLUNdata *physdev)
3595{
3596        u32 nphysicals;
3597        int i;
3598
3599        if (h->discovery_polling)
3600                return;
3601
3602        nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3603
3604        for (i = 0; i < nphysicals; i++) {
3605                if (physdev->LUN[i].device_type ==
3606                        BMIC_DEVICE_TYPE_CONTROLLER
3607                        && !is_hba_lunid(physdev->LUN[i].lunid)) {
3608                        dev_info(&h->pdev->dev,
3609                                "External controller present, activate discovery polling and disable rld caching\n");
3610                        hpsa_disable_rld_caching(h);
3611                        h->discovery_polling = 1;
3612                        break;
3613                }
3614        }
3615}
3616
3617/* Get a device id from inquiry page 0x83 */
3618static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3619        unsigned char scsi3addr[], u8 page)
3620{
3621        int rc;
3622        int i;
3623        int pages;
3624        unsigned char *buf, bufsize;
3625
3626        buf = kzalloc(256, GFP_KERNEL);
3627        if (!buf)
3628                return false;
3629
3630        /* Get the size of the page list first */
3631        rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3632                                VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3633                                buf, HPSA_VPD_HEADER_SZ);
3634        if (rc != 0)
3635                goto exit_unsupported;
3636        pages = buf[3];
3637        if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3638                bufsize = pages + HPSA_VPD_HEADER_SZ;
3639        else
3640                bufsize = 255;
3641
3642        /* Get the whole VPD page list */
3643        rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3644                                VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3645                                buf, bufsize);
3646        if (rc != 0)
3647                goto exit_unsupported;
3648
3649        pages = buf[3];
3650        for (i = 1; i <= pages; i++)
3651                if (buf[3 + i] == page)
3652                        goto exit_supported;
3653exit_unsupported:
3654        kfree(buf);
3655        return false;
3656exit_supported:
3657        kfree(buf);
3658        return true;
3659}
3660
3661/*
3662 * Called during a scan operation.
3663 * Sets ioaccel status on the new device list, not the existing device list
3664 *
3665 * The device list used during I/O will be updated later in
3666 * adjust_hpsa_scsi_table.
3667 */
3668static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3669        unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3670{
3671        int rc;
3672        unsigned char *buf;
3673        u8 ioaccel_status;
3674
3675        this_device->offload_config = 0;
3676        this_device->offload_enabled = 0;
3677        this_device->offload_to_be_enabled = 0;
3678
3679        buf = kzalloc(64, GFP_KERNEL);
3680        if (!buf)
3681                return;
3682        if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3683                goto out;
3684        rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3685                        VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3686        if (rc != 0)
3687                goto out;
3688
3689#define IOACCEL_STATUS_BYTE 4
3690#define OFFLOAD_CONFIGURED_BIT 0x01
3691#define OFFLOAD_ENABLED_BIT 0x02
3692        ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3693        this_device->offload_config =
3694                !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3695        if (this_device->offload_config) {
3696                bool offload_enabled =
3697                        !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3698                /*
3699                 * Check to see if offload can be enabled.
3700                 */
3701                if (offload_enabled) {
3702                        rc = hpsa_get_raid_map(h, scsi3addr, this_device);
3703                        if (rc) /* could not load raid_map */
3704                                goto out;
3705                        this_device->offload_to_be_enabled = 1;
3706                }
3707        }
3708
3709out:
3710        kfree(buf);
3711        return;
3712}
3713
3714/* Get the device id from inquiry page 0x83 */
3715static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3716        unsigned char *device_id, int index, int buflen)
3717{
3718        int rc;
3719        unsigned char *buf;
3720
3721        /* Does controller have VPD for device id? */
3722        if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3723                return 1; /* not supported */
3724
3725        buf = kzalloc(64, GFP_KERNEL);
3726        if (!buf)
3727                return -ENOMEM;
3728
3729        rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3730                                        HPSA_VPD_LV_DEVICE_ID, buf, 64);
3731        if (rc == 0) {
3732                if (buflen > 16)
3733                        buflen = 16;
3734                memcpy(device_id, &buf[8], buflen);
3735        }
3736
3737        kfree(buf);
3738
3739        return rc; /*0 - got id,  otherwise, didn't */
3740}
3741
3742static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3743                void *buf, int bufsize,
3744                int extended_response)
3745{
3746        int rc = IO_OK;
3747        struct CommandList *c;
3748        unsigned char scsi3addr[8];
3749        struct ErrorInfo *ei;
3750
3751        c = cmd_alloc(h);
3752
3753        /* address the controller */
3754        memset(scsi3addr, 0, sizeof(scsi3addr));
3755        if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3756                buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3757                rc = -EAGAIN;
3758                goto out;
3759        }
3760        if (extended_response)
3761                c->Request.CDB[1] = extended_response;
3762        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3763                        NO_TIMEOUT);
3764        if (rc)
3765                goto out;
3766        ei = c->err_info;
3767        if (ei->CommandStatus != 0 &&
3768            ei->CommandStatus != CMD_DATA_UNDERRUN) {
3769                hpsa_scsi_interpret_error(h, c);
3770                rc = -EIO;
3771        } else {
3772                struct ReportLUNdata *rld = buf;
3773
3774                if (rld->extended_response_flag != extended_response) {
3775                        if (!h->legacy_board) {
3776                                dev_err(&h->pdev->dev,
3777                                        "report luns requested format %u, got %u\n",
3778                                        extended_response,
3779                                        rld->extended_response_flag);
3780                                rc = -EINVAL;
3781                        } else
3782                                rc = -EOPNOTSUPP;
3783                }
3784        }
3785out:
3786        cmd_free(h, c);
3787        return rc;
3788}
3789
3790static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3791                struct ReportExtendedLUNdata *buf, int bufsize)
3792{
3793        int rc;
3794        struct ReportLUNdata *lbuf;
3795
3796        rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3797                                      HPSA_REPORT_PHYS_EXTENDED);
3798        if (!rc || rc != -EOPNOTSUPP)
3799                return rc;
3800
3801        /* REPORT PHYS EXTENDED is not supported */
3802        lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3803        if (!lbuf)
3804                return -ENOMEM;
3805
3806        rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3807        if (!rc) {
3808                int i;
3809                u32 nphys;
3810
3811                /* Copy ReportLUNdata header */
3812                memcpy(buf, lbuf, 8);
3813                nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3814                for (i = 0; i < nphys; i++)
3815                        memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3816        }
3817        kfree(lbuf);
3818        return rc;
3819}
3820
3821static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3822                struct ReportLUNdata *buf, int bufsize)
3823{
3824        return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3825}
3826
3827static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3828        int bus, int target, int lun)
3829{
3830        device->bus = bus;
3831        device->target = target;
3832        device->lun = lun;
3833}
3834
3835/* Use VPD inquiry to get details of volume status */
3836static int hpsa_get_volume_status(struct ctlr_info *h,
3837                                        unsigned char scsi3addr[])
3838{
3839        int rc;
3840        int status;
3841        int size;
3842        unsigned char *buf;
3843
3844        buf = kzalloc(64, GFP_KERNEL);
3845        if (!buf)
3846                return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3847
3848        /* Does controller have VPD for logical volume status? */
3849        if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3850                goto exit_failed;
3851
3852        /* Get the size of the VPD return buffer */
3853        rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3854                                        buf, HPSA_VPD_HEADER_SZ);
3855        if (rc != 0)
3856                goto exit_failed;
3857        size = buf[3];
3858
3859        /* Now get the whole VPD buffer */
3860        rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3861                                        buf, size + HPSA_VPD_HEADER_SZ);
3862        if (rc != 0)
3863                goto exit_failed;
3864        status = buf[4]; /* status byte */
3865
3866        kfree(buf);
3867        return status;
3868exit_failed:
3869        kfree(buf);
3870        return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3871}
3872
3873/* Determine offline status of a volume.
3874 * Return either:
3875 *  0 (not offline)
3876 *  0xff (offline for unknown reasons)
3877 *  # (integer code indicating one of several NOT READY states
3878 *     describing why a volume is to be kept offline)
3879 */
3880static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3881                                        unsigned char scsi3addr[])
3882{
3883        struct CommandList *c;
3884        unsigned char *sense;
3885        u8 sense_key, asc, ascq;
3886        int sense_len;
3887        int rc, ldstat = 0;
3888#define ASC_LUN_NOT_READY 0x04
3889#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3890#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3891
3892        c = cmd_alloc(h);
3893
3894        (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3895        rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3896                                        NO_TIMEOUT);
3897        if (rc) {
3898                cmd_free(h, c);
3899                return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3900        }
3901        sense = c->err_info->SenseInfo;
3902        if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3903                sense_len = sizeof(c->err_info->SenseInfo);
3904        else
3905                sense_len = c->err_info->SenseLen;
3906        decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3907        cmd_free(h, c);
3908
3909        /* Determine the reason for not ready state */
3910        ldstat = hpsa_get_volume_status(h, scsi3addr);
3911
3912        /* Keep volume offline in certain cases: */
3913        switch (ldstat) {
3914        case HPSA_LV_FAILED:
3915        case HPSA_LV_UNDERGOING_ERASE:
3916        case HPSA_LV_NOT_AVAILABLE:
3917        case HPSA_LV_UNDERGOING_RPI:
3918        case HPSA_LV_PENDING_RPI:
3919        case HPSA_LV_ENCRYPTED_NO_KEY:
3920        case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3921        case HPSA_LV_UNDERGOING_ENCRYPTION:
3922        case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3923        case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3924                return ldstat;
3925        case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3926                /* If VPD status page isn't available,
3927                 * use ASC/ASCQ to determine state
3928                 */
3929                if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3930                        (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3931                        return ldstat;
3932                break;
3933        default:
3934                break;
3935        }
3936        return HPSA_LV_OK;
3937}
3938
3939static int hpsa_update_device_info(struct ctlr_info *h,
3940        unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3941        unsigned char *is_OBDR_device)
3942{
3943
3944#define OBDR_SIG_OFFSET 43
3945#define OBDR_TAPE_SIG "$DR-10"
3946#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3947#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3948
3949        unsigned char *inq_buff;
3950        unsigned char *obdr_sig;
3951        int rc = 0;
3952
3953        inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3954        if (!inq_buff) {
3955                rc = -ENOMEM;
3956                goto bail_out;
3957        }
3958
3959        /* Do an inquiry to the device to see what it is. */
3960        if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3961                (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3962                dev_err(&h->pdev->dev,
3963                        "%s: inquiry failed, device will be skipped.\n",
3964                        __func__);
3965                rc = HPSA_INQUIRY_FAILED;
3966                goto bail_out;
3967        }
3968
3969        scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3970        scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3971
3972        this_device->devtype = (inq_buff[0] & 0x1f);
3973        memcpy(this_device->scsi3addr, scsi3addr, 8);
3974        memcpy(this_device->vendor, &inq_buff[8],
3975                sizeof(this_device->vendor));
3976        memcpy(this_device->model, &inq_buff[16],
3977                sizeof(this_device->model));
3978        this_device->rev = inq_buff[2];
3979        memset(this_device->device_id, 0,
3980                sizeof(this_device->device_id));
3981        if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3982                sizeof(this_device->device_id)) < 0) {
3983                dev_err(&h->pdev->dev,
3984                        "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3985                        h->ctlr, __func__,
3986                        h->scsi_host->host_no,
3987                        this_device->bus, this_device->target,
3988                        this_device->lun,
3989                        scsi_device_type(this_device->devtype),
3990                        this_device->model);
3991                rc = HPSA_LV_FAILED;
3992                goto bail_out;
3993        }
3994
3995        if ((this_device->devtype == TYPE_DISK ||
3996                this_device->devtype == TYPE_ZBC) &&
3997                is_logical_dev_addr_mode(scsi3addr)) {
3998                unsigned char volume_offline;
3999
4000                hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
4001                if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
4002                        hpsa_get_ioaccel_status(h, scsi3addr, this_device);
4003                volume_offline = hpsa_volume_offline(h, scsi3addr);
4004                if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
4005                    h->legacy_board) {
4006                        /*
4007                         * Legacy boards might not support volume status
4008                         */
4009                        dev_info(&h->pdev->dev,
4010                                 "C0:T%d:L%d Volume status not available, assuming online.\n",
4011                                 this_device->target, this_device->lun);
4012                        volume_offline = 0;
4013                }
4014                this_device->volume_offline = volume_offline;
4015                if (volume_offline == HPSA_LV_FAILED) {
4016                        rc = HPSA_LV_FAILED;
4017                        dev_err(&h->pdev->dev,
4018                                "%s: LV failed, device will be skipped.\n",
4019                                __func__);
4020                        goto bail_out;
4021                }
4022        } else {
4023                this_device->raid_level = RAID_UNKNOWN;
4024                this_device->offload_config = 0;
4025                hpsa_turn_off_ioaccel_for_device(this_device);
4026                this_device->hba_ioaccel_enabled = 0;
4027                this_device->volume_offline = 0;
4028                this_device->queue_depth = h->nr_cmds;
4029        }
4030
4031        if (this_device->external)
4032                this_device->queue_depth = EXTERNAL_QD;
4033
4034        if (is_OBDR_device) {
4035                /* See if this is a One-Button-Disaster-Recovery device
4036                 * by looking for "$DR-10" at offset 43 in inquiry data.
4037                 */
4038                obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4039                *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4040                                        strncmp(obdr_sig, OBDR_TAPE_SIG,
4041                                                OBDR_SIG_LEN) == 0);
4042        }
4043        kfree(inq_buff);
4044        return 0;
4045
4046bail_out:
4047        kfree(inq_buff);
4048        return rc;
4049}
4050
4051/*
4052 * Helper function to assign bus, target, lun mapping of devices.
4053 * Logical drive target and lun are assigned at this time, but
4054 * physical device lun and target assignment are deferred (assigned
4055 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
4056*/
4057static void figure_bus_target_lun(struct ctlr_info *h,
4058        u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4059{
4060        u32 lunid = get_unaligned_le32(lunaddrbytes);
4061
4062        if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4063                /* physical device, target and lun filled in later */
4064                if (is_hba_lunid(lunaddrbytes)) {
4065                        int bus = HPSA_HBA_BUS;
4066
4067                        if (!device->rev)
4068                                bus = HPSA_LEGACY_HBA_BUS;
4069                        hpsa_set_bus_target_lun(device,
4070                                        bus, 0, lunid & 0x3fff);
4071                } else
4072                        /* defer target, lun assignment for physical devices */
4073                        hpsa_set_bus_target_lun(device,
4074                                        HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4075                return;
4076        }
4077        /* It's a logical device */
4078        if (device->external) {
4079                hpsa_set_bus_target_lun(device,
4080                        HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4081                        lunid & 0x00ff);
4082                return;
4083        }
4084        hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4085                                0, lunid & 0x3fff);
4086}
4087
4088static int  figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4089        int i, int nphysicals, int nlocal_logicals)
4090{
4091        /* In report logicals, local logicals are listed first,
4092        * then any externals.
4093        */
4094        int logicals_start = nphysicals + (raid_ctlr_position == 0);
4095
4096        if (i == raid_ctlr_position)
4097                return 0;
4098
4099        if (i < logicals_start)
4100                return 0;
4101
4102        /* i is in logicals range, but still within local logicals */
4103        if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4104                return 0;
4105
4106        return 1; /* it's an external lun */
4107}
4108
4109/*
4110 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
4111 * logdev.  The number of luns in physdev and logdev are returned in
4112 * *nphysicals and *nlogicals, respectively.
4113 * Returns 0 on success, -1 otherwise.
4114 */
4115static int hpsa_gather_lun_info(struct ctlr_info *h,
4116        struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4117        struct ReportLUNdata *logdev, u32 *nlogicals)
4118{
4119        if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4120                dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4121                return -1;
4122        }
4123        *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4124        if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4125                dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4126                        HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4127                *nphysicals = HPSA_MAX_PHYS_LUN;
4128        }
4129        if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4130                dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4131                return -1;
4132        }
4133        *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4134        /* Reject Logicals in excess of our max capability. */
4135        if (*nlogicals > HPSA_MAX_LUN) {
4136                dev_warn(&h->pdev->dev,
4137                        "maximum logical LUNs (%d) exceeded.  "
4138                        "%d LUNs ignored.\n", HPSA_MAX_LUN,
4139                        *nlogicals - HPSA_MAX_LUN);
4140                *nlogicals = HPSA_MAX_LUN;
4141        }
4142        if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4143                dev_warn(&h->pdev->dev,
4144                        "maximum logical + physical LUNs (%d) exceeded. "
4145                        "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4146                        *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4147                *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4148        }
4149        return 0;
4150}
4151
4152static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4153        int i, int nphysicals, int nlogicals,
4154        struct ReportExtendedLUNdata *physdev_list,
4155        struct ReportLUNdata *logdev_list)
4156{
4157        /* Helper function, figure out where the LUN ID info is coming from
4158         * given index i, lists of physical and logical devices, where in
4159         * the list the raid controller is supposed to appear (first or last)
4160         */
4161
4162        int logicals_start = nphysicals + (raid_ctlr_position == 0);
4163        int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4164
4165        if (i == raid_ctlr_position)
4166                return RAID_CTLR_LUNID;
4167
4168        if (i < logicals_start)
4169                return &physdev_list->LUN[i -
4170                                (raid_ctlr_position == 0)].lunid[0];
4171
4172        if (i < last_device)
4173                return &logdev_list->LUN[i - nphysicals -
4174                        (raid_ctlr_position == 0)][0];
4175        BUG();
4176        return NULL;
4177}
4178
4179/* get physical drive ioaccel handle and queue depth */
4180static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4181                struct hpsa_scsi_dev_t *dev,
4182                struct ReportExtendedLUNdata *rlep, int rle_index,
4183                struct bmic_identify_physical_device *id_phys)
4184{
4185        int rc;
4186        struct ext_report_lun_entry *rle;
4187
4188        if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4189                return;
4190
4191        rle = &rlep->LUN[rle_index];
4192
4193        dev->ioaccel_handle = rle->ioaccel_handle;
4194        if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4195                dev->hba_ioaccel_enabled = 1;
4196        memset(id_phys, 0, sizeof(*id_phys));
4197        rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4198                        GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4199                        sizeof(*id_phys));
4200        if (!rc)
4201                /* Reserve space for FW operations */
4202#define DRIVE_CMDS_RESERVED_FOR_FW 2
4203#define DRIVE_QUEUE_DEPTH 7
4204                dev->queue_depth =
4205                        le16_to_cpu(id_phys->current_queue_depth_limit) -
4206                                DRIVE_CMDS_RESERVED_FOR_FW;
4207        else
4208                dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
4209}
4210
4211static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4212        struct ReportExtendedLUNdata *rlep, int rle_index,
4213        struct bmic_identify_physical_device *id_phys)
4214{
4215        struct ext_report_lun_entry *rle;
4216
4217        if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4218                return;
4219
4220        rle = &rlep->LUN[rle_index];
4221
4222        if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4223                this_device->hba_ioaccel_enabled = 1;
4224
4225        memcpy(&this_device->active_path_index,
4226                &id_phys->active_path_number,
4227                sizeof(this_device->active_path_index));
4228        memcpy(&this_device->path_map,
4229                &id_phys->redundant_path_present_map,
4230                sizeof(this_device->path_map));
4231        memcpy(&this_device->box,
4232                &id_phys->alternate_paths_phys_box_on_port,
4233                sizeof(this_device->box));
4234        memcpy(&this_device->phys_connector,
4235                &id_phys->alternate_paths_phys_connector,
4236                sizeof(this_device->phys_connector));
4237        memcpy(&this_device->bay,
4238                &id_phys->phys_bay_in_box,
4239                sizeof(this_device->bay));
4240}
4241
4242/* get number of local logical disks. */
4243static int hpsa_set_local_logical_count(struct ctlr_info *h,
4244        struct bmic_identify_controller *id_ctlr,
4245        u32 *nlocals)
4246{
4247        int rc;
4248
4249        if (!id_ctlr) {
4250                dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4251                        __func__);
4252                return -ENOMEM;
4253        }
4254        memset(id_ctlr, 0, sizeof(*id_ctlr));
4255        rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4256        if (!rc)
4257                if (id_ctlr->configured_logical_drive_count < 255)
4258                        *nlocals = id_ctlr->configured_logical_drive_count;
4259                else
4260                        *nlocals = le16_to_cpu(
4261                                        id_ctlr->extended_logical_unit_count);
4262        else
4263                *nlocals = -1;
4264        return rc;
4265}
4266
4267static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4268{
4269        struct bmic_identify_physical_device *id_phys;
4270        bool is_spare = false;
4271        int rc;
4272
4273        id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4274        if (!id_phys)
4275                return false;
4276
4277        rc = hpsa_bmic_id_physical_device(h,
4278                                        lunaddrbytes,
4279                                        GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4280                                        id_phys, sizeof(*id_phys));
4281        if (rc == 0)
4282                is_spare = (id_phys->more_flags >> 6) & 0x01;
4283
4284        kfree(id_phys);
4285        return is_spare;
4286}
4287
4288#define RPL_DEV_FLAG_NON_DISK                           0x1
4289#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED  0x2
4290#define RPL_DEV_FLAG_UNCONFIG_DISK                      0x4
4291
4292#define BMIC_DEVICE_TYPE_ENCLOSURE  6
4293
4294static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4295                                struct ext_report_lun_entry *rle)
4296{
4297        u8 device_flags;
4298        u8 device_type;
4299
4300        if (!MASKED_DEVICE(lunaddrbytes))
4301                return false;
4302
4303        device_flags = rle->device_flags;
4304        device_type = rle->device_type;
4305
4306        if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4307                if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4308                        return false;
4309                return true;
4310        }
4311
4312        if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4313                return false;
4314
4315        if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4316                return false;
4317
4318        /*
4319         * Spares may be spun down, we do not want to
4320         * do an Inquiry to a RAID set spare drive as
4321         * that would have them spun up, that is a
4322         * performance hit because I/O to the RAID device
4323         * stops while the spin up occurs which can take
4324         * over 50 seconds.
4325         */
4326        if (hpsa_is_disk_spare(h, lunaddrbytes))
4327                return true;
4328
4329        return false;
4330}
4331
4332static void hpsa_update_scsi_devices(struct ctlr_info *h)
4333{
4334        /* the idea here is we could get notified
4335         * that some devices have changed, so we do a report
4336         * physical luns and report logical luns cmd, and adjust
4337         * our list of devices accordingly.
4338         *
4339         * The scsi3addr's of devices won't change so long as the
4340         * adapter is not reset.  That means we can rescan and
4341         * tell which devices we already know about, vs. new
4342         * devices, vs.  disappearing devices.
4343         */
4344        struct ReportExtendedLUNdata *physdev_list = NULL;
4345        struct ReportLUNdata *logdev_list = NULL;
4346        struct bmic_identify_physical_device *id_phys = NULL;
4347        struct bmic_identify_controller *id_ctlr = NULL;
4348        u32 nphysicals = 0;
4349        u32 nlogicals = 0;
4350        u32 nlocal_logicals = 0;
4351        u32 ndev_allocated = 0;
4352        struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4353        int ncurrent = 0;
4354        int i, ndevs_to_allocate;
4355        int raid_ctlr_position;
4356        bool physical_device;
4357        DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4358
4359        currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4360        physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4361        logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4362        tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4363        id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4364        id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4365
4366        if (!currentsd || !physdev_list || !logdev_list ||
4367                !tmpdevice || !id_phys || !id_ctlr) {
4368                dev_err(&h->pdev->dev, "out of memory\n");
4369                goto out;
4370        }
4371        memset(lunzerobits, 0, sizeof(lunzerobits));
4372
4373        h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4374
4375        if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4376                        logdev_list, &nlogicals)) {
4377                h->drv_req_rescan = 1;
4378                goto out;
4379        }
4380
4381        /* Set number of local logicals (non PTRAID) */
4382        if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4383                dev_warn(&h->pdev->dev,
4384                        "%s: Can't determine number of local logical devices.\n",
4385                        __func__);
4386        }
4387
4388        /* We might see up to the maximum number of logical and physical disks
4389         * plus external target devices, and a device for the local RAID
4390         * controller.
4391         */
4392        ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4393
4394        hpsa_ext_ctrl_present(h, physdev_list);
4395
4396        /* Allocate the per device structures */
4397        for (i = 0; i < ndevs_to_allocate; i++) {
4398                if (i >= HPSA_MAX_DEVICES) {
4399                        dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4400                                "  %d devices ignored.\n", HPSA_MAX_DEVICES,
4401                                ndevs_to_allocate - HPSA_MAX_DEVICES);
4402                        break;
4403                }
4404
4405                currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4406                if (!currentsd[i]) {
4407                        h->drv_req_rescan = 1;
4408                        goto out;
4409                }
4410                ndev_allocated++;
4411        }
4412
4413        if (is_scsi_rev_5(h))
4414                raid_ctlr_position = 0;
4415        else
4416                raid_ctlr_position = nphysicals + nlogicals;
4417
4418        /* adjust our table of devices */
4419        for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4420                u8 *lunaddrbytes, is_OBDR = 0;
4421                int rc = 0;
4422                int phys_dev_index = i - (raid_ctlr_position == 0);
4423                bool skip_device = false;
4424
4425                memset(tmpdevice, 0, sizeof(*tmpdevice));
4426
4427                physical_device = i < nphysicals + (raid_ctlr_position == 0);
4428
4429                /* Figure out where the LUN ID info is coming from */
4430                lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4431                        i, nphysicals, nlogicals, physdev_list, logdev_list);
4432
4433                /* Determine if this is a lun from an external target array */
4434                tmpdevice->external =
4435                        figure_external_status(h, raid_ctlr_position, i,
4436                                                nphysicals, nlocal_logicals);
4437
4438                /*
4439                 * Skip over some devices such as a spare.
4440                 */
4441                if (phys_dev_index >= 0 && !tmpdevice->external &&
4442                        physical_device) {
4443                        skip_device = hpsa_skip_device(h, lunaddrbytes,
4444                                        &physdev_list->LUN[phys_dev_index]);
4445                        if (skip_device)
4446                                continue;
4447                }
4448
4449                /* Get device type, vendor, model, device id, raid_map */
4450                rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4451                                                        &is_OBDR);
4452                if (rc == -ENOMEM) {
4453                        dev_warn(&h->pdev->dev,
4454                                "Out of memory, rescan deferred.\n");
4455                        h->drv_req_rescan = 1;
4456                        goto out;
4457                }
4458                if (rc) {
4459                        h->drv_req_rescan = 1;
4460                        continue;
4461                }
4462
4463                figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4464                this_device = currentsd[ncurrent];
4465
4466                *this_device = *tmpdevice;
4467                this_device->physical_device = physical_device;
4468
4469                /*
4470                 * Expose all devices except for physical devices that
4471                 * are masked.
4472                 */
4473                if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4474                        this_device->expose_device = 0;
4475                else
4476                        this_device->expose_device = 1;
4477
4478
4479                /*
4480                 * Get the SAS address for physical devices that are exposed.
4481                 */
4482                if (this_device->physical_device && this_device->expose_device)
4483                        hpsa_get_sas_address(h, lunaddrbytes, this_device);
4484
4485                switch (this_device->devtype) {
4486                case TYPE_ROM:
4487                        /* We don't *really* support actual CD-ROM devices,
4488                         * just "One Button Disaster Recovery" tape drive
4489                         * which temporarily pretends to be a CD-ROM drive.
4490                         * So we check that the device is really an OBDR tape
4491                         * device by checking for "$DR-10" in bytes 43-48 of
4492                         * the inquiry data.
4493                         */
4494                        if (is_OBDR)
4495                                ncurrent++;
4496                        break;
4497                case TYPE_DISK:
4498                case TYPE_ZBC:
4499                        if (this_device->physical_device) {
4500                                /* The disk is in HBA mode. */
4501                                /* Never use RAID mapper in HBA mode. */
4502                                this_device->offload_enabled = 0;
4503                                hpsa_get_ioaccel_drive_info(h, this_device,
4504                                        physdev_list, phys_dev_index, id_phys);
4505                                hpsa_get_path_info(this_device,
4506                                        physdev_list, phys_dev_index, id_phys);
4507                        }
4508                        ncurrent++;
4509                        break;
4510                case TYPE_TAPE:
4511                case TYPE_MEDIUM_CHANGER:
4512                        ncurrent++;
4513                        break;
4514                case TYPE_ENCLOSURE:
4515                        if (!this_device->external)
4516                                hpsa_get_enclosure_info(h, lunaddrbytes,
4517                                                physdev_list, phys_dev_index,
4518                                                this_device);
4519                        ncurrent++;
4520                        break;
4521                case TYPE_RAID:
4522                        /* Only present the Smartarray HBA as a RAID controller.
4523                         * If it's a RAID controller other than the HBA itself
4524                         * (an external RAID controller, MSA500 or similar)
4525                         * don't present it.
4526                         */
4527                        if (!is_hba_lunid(lunaddrbytes))
4528                                break;
4529                        ncurrent++;
4530                        break;
4531                default:
4532                        break;
4533                }
4534                if (ncurrent >= HPSA_MAX_DEVICES)
4535                        break;
4536        }
4537
4538        if (h->sas_host == NULL) {
4539                int rc = 0;
4540
4541                rc = hpsa_add_sas_host(h);
4542                if (rc) {
4543                        dev_warn(&h->pdev->dev,
4544                                "Could not add sas host %d\n", rc);
4545                        goto out;
4546                }
4547        }
4548
4549        adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4550out:
4551        kfree(tmpdevice);
4552        for (i = 0; i < ndev_allocated; i++)
4553                kfree(currentsd[i]);
4554        kfree(currentsd);
4555        kfree(physdev_list);
4556        kfree(logdev_list);
4557        kfree(id_ctlr);
4558        kfree(id_phys);
4559}
4560
4561static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4562                                   struct scatterlist *sg)
4563{
4564        u64 addr64 = (u64) sg_dma_address(sg);
4565        unsigned int len = sg_dma_len(sg);
4566
4567        desc->Addr = cpu_to_le64(addr64);
4568        desc->Len = cpu_to_le32(len);
4569        desc->Ext = 0;
4570}
4571
4572/*
4573 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4574 * dma mapping  and fills in the scatter gather entries of the
4575 * hpsa command, cp.
4576 */
4577static int hpsa_scatter_gather(struct ctlr_info *h,
4578                struct CommandList *cp,
4579                struct scsi_cmnd *cmd)
4580{
4581        struct scatterlist *sg;
4582        int use_sg, i, sg_limit, chained;
4583        struct SGDescriptor *curr_sg;
4584
4585        BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4586
4587        use_sg = scsi_dma_map(cmd);
4588        if (use_sg < 0)
4589                return use_sg;
4590
4591        if (!use_sg)
4592                goto sglist_finished;
4593
4594        /*
4595         * If the number of entries is greater than the max for a single list,
4596         * then we have a chained list; we will set up all but one entry in the
4597         * first list (the last entry is saved for link information);
4598         * otherwise, we don't have a chained list and we'll set up at each of
4599         * the entries in the one list.
4600         */
4601        curr_sg = cp->SG;
4602        chained = use_sg > h->max_cmd_sg_entries;
4603        sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4604        scsi_for_each_sg(cmd, sg, sg_limit, i) {
4605                hpsa_set_sg_descriptor(curr_sg, sg);
4606                curr_sg++;
4607        }
4608
4609        if (chained) {
4610                /*
4611                 * Continue with the chained list.  Set curr_sg to the chained
4612                 * list.  Modify the limit to the total count less the entries
4613                 * we've already set up.  Resume the scan at the list entry
4614                 * where the previous loop left off.
4615                 */
4616                curr_sg = h->cmd_sg_list[cp->cmdindex];
4617                sg_limit = use_sg - sg_limit;
4618                for_each_sg(sg, sg, sg_limit, i) {
4619                        hpsa_set_sg_descriptor(curr_sg, sg);
4620                        curr_sg++;
4621                }
4622        }
4623
4624        /* Back the pointer up to the last entry and mark it as "last". */
4625        (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4626
4627        if (use_sg + chained > h->maxSG)
4628                h->maxSG = use_sg + chained;
4629
4630        if (chained) {
4631                cp->Header.SGList = h->max_cmd_sg_entries;
4632                cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4633                if (hpsa_map_sg_chain_block(h, cp)) {
4634                        scsi_dma_unmap(cmd);
4635                        return -1;
4636                }
4637                return 0;
4638        }
4639
4640sglist_finished:
4641
4642        cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
4643        cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4644        return 0;
4645}
4646
4647static inline void warn_zero_length_transfer(struct ctlr_info *h,
4648                                                u8 *cdb, int cdb_len,
4649                                                const char *func)
4650{
4651        dev_warn(&h->pdev->dev,
4652                 "%s: Blocking zero-length request: CDB:%*phN\n",
4653                 func, cdb_len, cdb);
4654}
4655
4656#define IO_ACCEL_INELIGIBLE 1
4657/* zero-length transfers trigger hardware errors. */
4658static bool is_zero_length_transfer(u8 *cdb)
4659{
4660        u32 block_cnt;
4661
4662        /* Block zero-length transfer sizes on certain commands. */
4663        switch (cdb[0]) {
4664        case READ_10:
4665        case WRITE_10:
4666        case VERIFY:            /* 0x2F */
4667        case WRITE_VERIFY:      /* 0x2E */
4668                block_cnt = get_unaligned_be16(&cdb[7]);
4669                break;
4670        case READ_12:
4671        case WRITE_12:
4672        case VERIFY_12: /* 0xAF */
4673        case WRITE_VERIFY_12:   /* 0xAE */
4674                block_cnt = get_unaligned_be32(&cdb[6]);
4675                break;
4676        case READ_16:
4677        case WRITE_16:
4678        case VERIFY_16:         /* 0x8F */
4679                block_cnt = get_unaligned_be32(&cdb[10]);
4680                break;
4681        default:
4682                return false;
4683        }
4684
4685        return block_cnt == 0;
4686}
4687
4688static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4689{
4690        int is_write = 0;
4691        u32 block;
4692        u32 block_cnt;
4693
4694        /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4695        switch (cdb[0]) {
4696        case WRITE_6:
4697        case WRITE_12:
4698                is_write = 1;
4699                fallthrough;
4700        case READ_6:
4701        case READ_12:
4702                if (*cdb_len == 6) {
4703                        block = (((cdb[1] & 0x1F) << 16) |
4704                                (cdb[2] << 8) |
4705                                cdb[3]);
4706                        block_cnt = cdb[4];
4707                        if (block_cnt == 0)
4708                                block_cnt = 256;
4709                } else {
4710                        BUG_ON(*cdb_len != 12);
4711                        block = get_unaligned_be32(&cdb[2]);
4712                        block_cnt = get_unaligned_be32(&cdb[6]);
4713                }
4714                if (block_cnt > 0xffff)
4715                        return IO_ACCEL_INELIGIBLE;
4716
4717                cdb[0] = is_write ? WRITE_10 : READ_10;
4718                cdb[1] = 0;
4719                cdb[2] = (u8) (block >> 24);
4720                cdb[3] = (u8) (block >> 16);
4721                cdb[4] = (u8) (block >> 8);
4722                cdb[5] = (u8) (block);
4723                cdb[6] = 0;
4724                cdb[7] = (u8) (block_cnt >> 8);
4725                cdb[8] = (u8) (block_cnt);
4726                cdb[9] = 0;
4727                *cdb_len = 10;
4728                break;
4729        }
4730        return 0;
4731}
4732
4733static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4734        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4735        u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4736{
4737        struct scsi_cmnd *cmd = c->scsi_cmd;
4738        struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4739        unsigned int len;
4740        unsigned int total_len = 0;
4741        struct scatterlist *sg;
4742        u64 addr64;
4743        int use_sg, i;
4744        struct SGDescriptor *curr_sg;
4745        u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4746
4747        /* TODO: implement chaining support */
4748        if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4749                atomic_dec(&phys_disk->ioaccel_cmds_out);
4750                return IO_ACCEL_INELIGIBLE;
4751        }
4752
4753        BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4754
4755        if (is_zero_length_transfer(cdb)) {
4756                warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4757                atomic_dec(&phys_disk->ioaccel_cmds_out);
4758                return IO_ACCEL_INELIGIBLE;
4759        }
4760
4761        if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4762                atomic_dec(&phys_disk->ioaccel_cmds_out);
4763                return IO_ACCEL_INELIGIBLE;
4764        }
4765
4766        c->cmd_type = CMD_IOACCEL1;
4767
4768        /* Adjust the DMA address to point to the accelerated command buffer */
4769        c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4770                                (c->cmdindex * sizeof(*cp));
4771        BUG_ON(c->busaddr & 0x0000007F);
4772
4773        use_sg = scsi_dma_map(cmd);
4774        if (use_sg < 0) {
4775                atomic_dec(&phys_disk->ioaccel_cmds_out);
4776                return use_sg;
4777        }
4778
4779        if (use_sg) {
4780                curr_sg = cp->SG;
4781                scsi_for_each_sg(cmd, sg, use_sg, i) {
4782                        addr64 = (u64) sg_dma_address(sg);
4783                        len  = sg_dma_len(sg);
4784                        total_len += len;
4785                        curr_sg->Addr = cpu_to_le64(addr64);
4786                        curr_sg->Len = cpu_to_le32(len);
4787                        curr_sg->Ext = cpu_to_le32(0);
4788                        curr_sg++;
4789                }
4790                (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4791
4792                switch (cmd->sc_data_direction) {
4793                case DMA_TO_DEVICE:
4794                        control |= IOACCEL1_CONTROL_DATA_OUT;
4795                        break;
4796                case DMA_FROM_DEVICE:
4797                        control |= IOACCEL1_CONTROL_DATA_IN;
4798                        break;
4799                case DMA_NONE:
4800                        control |= IOACCEL1_CONTROL_NODATAXFER;
4801                        break;
4802                default:
4803                        dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4804                        cmd->sc_data_direction);
4805                        BUG();
4806                        break;
4807                }
4808        } else {
4809                control |= IOACCEL1_CONTROL_NODATAXFER;
4810        }
4811
4812        c->Header.SGList = use_sg;
4813        /* Fill out the command structure to submit */
4814        cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4815        cp->transfer_len = cpu_to_le32(total_len);
4816        cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4817                        (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4818        cp->control = cpu_to_le32(control);
4819        memcpy(cp->CDB, cdb, cdb_len);
4820        memcpy(cp->CISS_LUN, scsi3addr, 8);
4821        /* Tag was already set at init time. */
4822        enqueue_cmd_and_start_io(h, c);
4823        return 0;
4824}
4825
4826/*
4827 * Queue a command directly to a device behind the controller using the
4828 * I/O accelerator path.
4829 */
4830static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4831        struct CommandList *c)
4832{
4833        struct scsi_cmnd *cmd = c->scsi_cmd;
4834        struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4835
4836        if (!dev)
4837                return -1;
4838
4839        c->phys_disk = dev;
4840
4841        if (dev->in_reset)
4842                return -1;
4843
4844        return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4845                cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4846}
4847
4848/*
4849 * Set encryption parameters for the ioaccel2 request
4850 */
4851static void set_encrypt_ioaccel2(struct ctlr_info *h,
4852        struct CommandList *c, struct io_accel2_cmd *cp)
4853{
4854        struct scsi_cmnd *cmd = c->scsi_cmd;
4855        struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4856        struct raid_map_data *map = &dev->raid_map;
4857        u64 first_block;
4858
4859        /* Are we doing encryption on this device */
4860        if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4861                return;
4862        /* Set the data encryption key index. */
4863        cp->dekindex = map->dekindex;
4864
4865        /* Set the encryption enable flag, encoded into direction field. */
4866        cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4867
4868        /* Set encryption tweak values based on logical block address
4869         * If block size is 512, tweak value is LBA.
4870         * For other block sizes, tweak is (LBA * block size)/ 512)
4871         */
4872        switch (cmd->cmnd[0]) {
4873        /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4874        case READ_6:
4875        case WRITE_6:
4876                first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4877                                (cmd->cmnd[2] << 8) |
4878                                cmd->cmnd[3]);
4879                break;
4880        case WRITE_10:
4881        case READ_10:
4882        /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4883        case WRITE_12:
4884        case READ_12:
4885                first_block = get_unaligned_be32(&cmd->cmnd[2]);
4886                break;
4887        case WRITE_16:
4888        case READ_16:
4889                first_block = get_unaligned_be64(&cmd->cmnd[2]);
4890                break;
4891        default:
4892                dev_err(&h->pdev->dev,
4893                        "ERROR: %s: size (0x%x) not supported for encryption\n",
4894                        __func__, cmd->cmnd[0]);
4895                BUG();
4896                break;
4897        }
4898
4899        if (le32_to_cpu(map->volume_blk_size) != 512)
4900                first_block = first_block *
4901                                le32_to_cpu(map->volume_blk_size)/512;
4902
4903        cp->tweak_lower = cpu_to_le32(first_block);
4904        cp->tweak_upper = cpu_to_le32(first_block >> 32);
4905}
4906
4907static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4908        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4909        u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4910{
4911        struct scsi_cmnd *cmd = c->scsi_cmd;
4912        struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4913        struct ioaccel2_sg_element *curr_sg;
4914        int use_sg, i;
4915        struct scatterlist *sg;
4916        u64 addr64;
4917        u32 len;
4918        u32 total_len = 0;
4919
4920        if (!cmd->device)
4921                return -1;
4922
4923        if (!cmd->device->hostdata)
4924                return -1;
4925
4926        BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4927
4928        if (is_zero_length_transfer(cdb)) {
4929                warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4930                atomic_dec(&phys_disk->ioaccel_cmds_out);
4931                return IO_ACCEL_INELIGIBLE;
4932        }
4933
4934        if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4935                atomic_dec(&phys_disk->ioaccel_cmds_out);
4936                return IO_ACCEL_INELIGIBLE;
4937        }
4938
4939        c->cmd_type = CMD_IOACCEL2;
4940        /* Adjust the DMA address to point to the accelerated command buffer */
4941        c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4942                                (c->cmdindex * sizeof(*cp));
4943        BUG_ON(c->busaddr & 0x0000007F);
4944
4945        memset(cp, 0, sizeof(*cp));
4946        cp->IU_type = IOACCEL2_IU_TYPE;
4947
4948        use_sg = scsi_dma_map(cmd);
4949        if (use_sg < 0) {
4950                atomic_dec(&phys_disk->ioaccel_cmds_out);
4951                return use_sg;
4952        }
4953
4954        if (use_sg) {
4955                curr_sg = cp->sg;
4956                if (use_sg > h->ioaccel_maxsg) {
4957                        addr64 = le64_to_cpu(
4958                                h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4959                        curr_sg->address = cpu_to_le64(addr64);
4960                        curr_sg->length = 0;
4961                        curr_sg->reserved[0] = 0;
4962                        curr_sg->reserved[1] = 0;
4963                        curr_sg->reserved[2] = 0;
4964                        curr_sg->chain_indicator = IOACCEL2_CHAIN;
4965
4966                        curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4967                }
4968                scsi_for_each_sg(cmd, sg, use_sg, i) {
4969                        addr64 = (u64) sg_dma_address(sg);
4970                        len  = sg_dma_len(sg);
4971                        total_len += len;
4972                        curr_sg->address = cpu_to_le64(addr64);
4973                        curr_sg->length = cpu_to_le32(len);
4974                        curr_sg->reserved[0] = 0;
4975                        curr_sg->reserved[1] = 0;
4976                        curr_sg->reserved[2] = 0;
4977                        curr_sg->chain_indicator = 0;
4978                        curr_sg++;
4979                }
4980
4981                /*
4982                 * Set the last s/g element bit
4983                 */
4984                (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4985
4986                switch (cmd->sc_data_direction) {
4987                case DMA_TO_DEVICE:
4988                        cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4989                        cp->direction |= IOACCEL2_DIR_DATA_OUT;
4990                        break;
4991                case DMA_FROM_DEVICE:
4992                        cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4993                        cp->direction |= IOACCEL2_DIR_DATA_IN;
4994                        break;
4995                case DMA_NONE:
4996                        cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4997                        cp->direction |= IOACCEL2_DIR_NO_DATA;
4998                        break;
4999                default:
5000                        dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5001                                cmd->sc_data_direction);
5002                        BUG();
5003                        break;
5004                }
5005        } else {
5006                cp->direction &= ~IOACCEL2_DIRECTION_MASK;
5007                cp->direction |= IOACCEL2_DIR_NO_DATA;
5008        }
5009
5010        /* Set encryption parameters, if necessary */
5011        set_encrypt_ioaccel2(h, c, cp);
5012
5013        cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
5014        cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5015        memcpy(cp->cdb, cdb, sizeof(cp->cdb));
5016
5017        cp->data_len = cpu_to_le32(total_len);
5018        cp->err_ptr = cpu_to_le64(c->busaddr +
5019                        offsetof(struct io_accel2_cmd, error_data));
5020        cp->err_len = cpu_to_le32(sizeof(cp->error_data));
5021
5022        /* fill in sg elements */
5023        if (use_sg > h->ioaccel_maxsg) {
5024                cp->sg_count = 1;
5025                cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
5026                if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
5027                        atomic_dec(&phys_disk->ioaccel_cmds_out);
5028                        scsi_dma_unmap(cmd);
5029                        return -1;
5030                }
5031        } else
5032                cp->sg_count = (u8) use_sg;
5033
5034        if (phys_disk->in_reset) {
5035                cmd->result = DID_RESET << 16;
5036                return -1;
5037        }
5038
5039        enqueue_cmd_and_start_io(h, c);
5040        return 0;
5041}
5042
5043/*
5044 * Queue a command to the correct I/O accelerator path.
5045 */
5046static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5047        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5048        u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5049{
5050        if (!c->scsi_cmd->device)
5051                return -1;
5052
5053        if (!c->scsi_cmd->device->hostdata)
5054                return -1;
5055
5056        if (phys_disk->in_reset)
5057                return -1;
5058
5059        /* Try to honor the device's queue depth */
5060        if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5061                                        phys_disk->queue_depth) {
5062                atomic_dec(&phys_disk->ioaccel_cmds_out);
5063                return IO_ACCEL_INELIGIBLE;
5064        }
5065        if (h->transMethod & CFGTBL_Trans_io_accel1)
5066                return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5067                                                cdb, cdb_len, scsi3addr,
5068                                                phys_disk);
5069        else
5070                return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5071                                                cdb, cdb_len, scsi3addr,
5072                                                phys_disk);
5073}
5074
5075static void raid_map_helper(struct raid_map_data *map,
5076                int offload_to_mirror, u32 *map_index, u32 *current_group)
5077{
5078        if (offload_to_mirror == 0)  {
5079                /* use physical disk in the first mirrored group. */
5080                *map_index %= le16_to_cpu(map->data_disks_per_row);
5081                return;
5082        }
5083        do {
5084                /* determine mirror group that *map_index indicates */
5085                *current_group = *map_index /
5086                        le16_to_cpu(map->data_disks_per_row);
5087                if (offload_to_mirror == *current_group)
5088                        continue;
5089                if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5090                        /* select map index from next group */
5091                        *map_index += le16_to_cpu(map->data_disks_per_row);
5092                        (*current_group)++;
5093                } else {
5094                        /* select map index from first group */
5095                        *map_index %= le16_to_cpu(map->data_disks_per_row);
5096                        *current_group = 0;
5097                }
5098        } while (offload_to_mirror != *current_group);
5099}
5100
5101/*
5102 * Attempt to perform offload RAID mapping for a logical volume I/O.
5103 */
5104static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5105        struct CommandList *c)
5106{
5107        struct scsi_cmnd *cmd = c->scsi_cmd;
5108        struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5109        struct raid_map_data *map = &dev->raid_map;
5110        struct raid_map_disk_data *dd = &map->data[0];
5111        int is_write = 0;
5112        u32 map_index;
5113        u64 first_block, last_block;
5114        u32 block_cnt;
5115        u32 blocks_per_row;
5116        u64 first_row, last_row;
5117        u32 first_row_offset, last_row_offset;
5118        u32 first_column, last_column;
5119        u64 r0_first_row, r0_last_row;
5120        u32 r5or6_blocks_per_row;
5121        u64 r5or6_first_row, r5or6_last_row;
5122        u32 r5or6_first_row_offset, r5or6_last_row_offset;
5123        u32 r5or6_first_column, r5or6_last_column;
5124        u32 total_disks_per_row;
5125        u32 stripesize;
5126        u32 first_group, last_group, current_group;
5127        u32 map_row;
5128        u32 disk_handle;
5129        u64 disk_block;
5130        u32 disk_block_cnt;
5131        u8 cdb[16];
5132        u8 cdb_len;
5133        u16 strip_size;
5134#if BITS_PER_LONG == 32
5135        u64 tmpdiv;
5136#endif
5137        int offload_to_mirror;
5138
5139        if (!dev)
5140                return -1;
5141
5142        if (dev->in_reset)
5143                return -1;
5144
5145        /* check for valid opcode, get LBA and block count */
5146        switch (cmd->cmnd[0]) {
5147        case WRITE_6:
5148                is_write = 1;
5149                fallthrough;
5150        case READ_6:
5151                first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5152                                (cmd->cmnd[2] << 8) |
5153                                cmd->cmnd[3]);
5154                block_cnt = cmd->cmnd[4];
5155                if (block_cnt == 0)
5156                        block_cnt = 256;
5157                break;
5158        case WRITE_10:
5159                is_write = 1;
5160                fallthrough;
5161        case READ_10:
5162                first_block =
5163                        (((u64) cmd->cmnd[2]) << 24) |
5164                        (((u64) cmd->cmnd[3]) << 16) |
5165                        (((u64) cmd->cmnd[4]) << 8) |
5166                        cmd->cmnd[5];
5167                block_cnt =
5168                        (((u32) cmd->cmnd[7]) << 8) |
5169                        cmd->cmnd[8];
5170                break;
5171        case WRITE_12:
5172                is_write = 1;
5173                fallthrough;
5174        case READ_12:
5175                first_block =
5176                        (((u64) cmd->cmnd[2]) << 24) |
5177                        (((u64) cmd->cmnd[3]) << 16) |
5178                        (((u64) cmd->cmnd[4]) << 8) |
5179                        cmd->cmnd[5];
5180                block_cnt =
5181                        (((u32) cmd->cmnd[6]) << 24) |
5182                        (((u32) cmd->cmnd[7]) << 16) |
5183                        (((u32) cmd->cmnd[8]) << 8) |
5184                cmd->cmnd[9];
5185                break;
5186        case WRITE_16:
5187                is_write = 1;
5188                fallthrough;
5189        case READ_16:
5190                first_block =
5191                        (((u64) cmd->cmnd[2]) << 56) |
5192                        (((u64) cmd->cmnd[3]) << 48) |
5193                        (((u64) cmd->cmnd[4]) << 40) |
5194                        (((u64) cmd->cmnd[5]) << 32) |
5195                        (((u64) cmd->cmnd[6]) << 24) |
5196                        (((u64) cmd->cmnd[7]) << 16) |
5197                        (((u64) cmd->cmnd[8]) << 8) |
5198                        cmd->cmnd[9];
5199                block_cnt =
5200                        (((u32) cmd->cmnd[10]) << 24) |
5201                        (((u32) cmd->cmnd[11]) << 16) |
5202                        (((u32) cmd->cmnd[12]) << 8) |
5203                        cmd->cmnd[13];
5204                break;
5205        default:
5206                return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5207        }
5208        last_block = first_block + block_cnt - 1;
5209
5210        /* check for write to non-RAID-0 */
5211        if (is_write && dev->raid_level != 0)
5212                return IO_ACCEL_INELIGIBLE;
5213
5214        /* check for invalid block or wraparound */
5215        if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5216                last_block < first_block)
5217                return IO_ACCEL_INELIGIBLE;
5218
5219        /* calculate stripe information for the request */
5220        blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5221                                le16_to_cpu(map->strip_size);
5222        strip_size = le16_to_cpu(map->strip_size);
5223#if BITS_PER_LONG == 32
5224        tmpdiv = first_block;
5225        (void) do_div(tmpdiv, blocks_per_row);
5226        first_row = tmpdiv;
5227        tmpdiv = last_block;
5228        (void) do_div(tmpdiv, blocks_per_row);
5229        last_row = tmpdiv;
5230        first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5231        last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5232        tmpdiv = first_row_offset;
5233        (void) do_div(tmpdiv, strip_size);
5234        first_column = tmpdiv;
5235        tmpdiv = last_row_offset;
5236        (void) do_div(tmpdiv, strip_size);
5237        last_column = tmpdiv;
5238#else
5239        first_row = first_block / blocks_per_row;
5240        last_row = last_block / blocks_per_row;
5241        first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5242        last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5243        first_column = first_row_offset / strip_size;
5244        last_column = last_row_offset / strip_size;
5245#endif
5246
5247        /* if this isn't a single row/column then give to the controller */
5248        if ((first_row != last_row) || (first_column != last_column))
5249                return IO_ACCEL_INELIGIBLE;
5250
5251        /* proceeding with driver mapping */
5252        total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5253                                le16_to_cpu(map->metadata_disks_per_row);
5254        map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5255                                le16_to_cpu(map->row_cnt);
5256        map_index = (map_row * total_disks_per_row) + first_column;
5257
5258        switch (dev->raid_level) {
5259        case HPSA_RAID_0:
5260                break; /* nothing special to do */
5261        case HPSA_RAID_1:
5262                /* Handles load balance across RAID 1 members.
5263                 * (2-drive R1 and R10 with even # of drives.)
5264                 * Appropriate for SSDs, not optimal for HDDs
5265                 * Ensure we have the correct raid_map.
5266                 */
5267                if (le16_to_cpu(map->layout_map_count) != 2) {
5268                        hpsa_turn_off_ioaccel_for_device(dev);
5269                        return IO_ACCEL_INELIGIBLE;
5270                }
5271                if (dev->offload_to_mirror)
5272                        map_index += le16_to_cpu(map->data_disks_per_row);
5273                dev->offload_to_mirror = !dev->offload_to_mirror;
5274                break;
5275        case HPSA_RAID_ADM:
5276                /* Handles N-way mirrors  (R1-ADM)
5277                 * and R10 with # of drives divisible by 3.)
5278                 * Ensure we have the correct raid_map.
5279                 */
5280                if (le16_to_cpu(map->layout_map_count) != 3) {
5281                        hpsa_turn_off_ioaccel_for_device(dev);
5282                        return IO_ACCEL_INELIGIBLE;
5283                }
5284
5285                offload_to_mirror = dev->offload_to_mirror;
5286                raid_map_helper(map, offload_to_mirror,
5287                                &map_index, &current_group);
5288                /* set mirror group to use next time */
5289                offload_to_mirror =
5290                        (offload_to_mirror >=
5291                        le16_to_cpu(map->layout_map_count) - 1)
5292                        ? 0 : offload_to_mirror + 1;
5293                dev->offload_to_mirror = offload_to_mirror;
5294                /* Avoid direct use of dev->offload_to_mirror within this
5295                 * function since multiple threads might simultaneously
5296                 * increment it beyond the range of dev->layout_map_count -1.
5297                 */
5298                break;
5299        case HPSA_RAID_5:
5300        case HPSA_RAID_6:
5301                if (le16_to_cpu(map->layout_map_count) <= 1)
5302                        break;
5303
5304                /* Verify first and last block are in same RAID group */
5305                r5or6_blocks_per_row =
5306                        le16_to_cpu(map->strip_size) *
5307                        le16_to_cpu(map->data_disks_per_row);
5308                if (r5or6_blocks_per_row == 0) {
5309                        hpsa_turn_off_ioaccel_for_device(dev);
5310                        return IO_ACCEL_INELIGIBLE;
5311                }
5312                stripesize = r5or6_blocks_per_row *
5313                        le16_to_cpu(map->layout_map_count);
5314#if BITS_PER_LONG == 32
5315                tmpdiv = first_block;
5316                first_group = do_div(tmpdiv, stripesize);
5317                tmpdiv = first_group;
5318                (void) do_div(tmpdiv, r5or6_blocks_per_row);
5319                first_group = tmpdiv;
5320                tmpdiv = last_block;
5321                last_group = do_div(tmpdiv, stripesize);
5322                tmpdiv = last_group;
5323                (void) do_div(tmpdiv, r5or6_blocks_per_row);
5324                last_group = tmpdiv;
5325#else
5326                first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5327                last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5328#endif
5329                if (first_group != last_group)
5330                        return IO_ACCEL_INELIGIBLE;
5331
5332                /* Verify request is in a single row of RAID 5/6 */
5333#if BITS_PER_LONG == 32
5334                tmpdiv = first_block;
5335                (void) do_div(tmpdiv, stripesize);
5336                first_row = r5or6_first_row = r0_first_row = tmpdiv;
5337                tmpdiv = last_block;
5338                (void) do_div(tmpdiv, stripesize);
5339                r5or6_last_row = r0_last_row = tmpdiv;
5340#else
5341                first_row = r5or6_first_row = r0_first_row =
5342                                                first_block / stripesize;
5343                r5or6_last_row = r0_last_row = last_block / stripesize;
5344#endif
5345                if (r5or6_first_row != r5or6_last_row)
5346                        return IO_ACCEL_INELIGIBLE;
5347
5348
5349                /* Verify request is in a single column */
5350#if BITS_PER_LONG == 32
5351                tmpdiv = first_block;
5352                first_row_offset = do_div(tmpdiv, stripesize);
5353                tmpdiv = first_row_offset;
5354                first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5355                r5or6_first_row_offset = first_row_offset;
5356                tmpdiv = last_block;
5357                r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5358                tmpdiv = r5or6_last_row_offset;
5359                r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5360                tmpdiv = r5or6_first_row_offset;
5361                (void) do_div(tmpdiv, map->strip_size);
5362                first_column = r5or6_first_column = tmpdiv;
5363                tmpdiv = r5or6_last_row_offset;
5364                (void) do_div(tmpdiv, map->strip_size);
5365                r5or6_last_column = tmpdiv;
5366#else
5367                first_row_offset = r5or6_first_row_offset =
5368                        (u32)((first_block % stripesize) %
5369                                                r5or6_blocks_per_row);
5370
5371                r5or6_last_row_offset =
5372                        (u32)((last_block % stripesize) %
5373                                                r5or6_blocks_per_row);
5374
5375                first_column = r5or6_first_column =
5376                        r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5377                r5or6_last_column =
5378                        r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5379#endif
5380                if (r5or6_first_column != r5or6_last_column)
5381                        return IO_ACCEL_INELIGIBLE;
5382
5383                /* Request is eligible */
5384                map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5385                        le16_to_cpu(map->row_cnt);
5386
5387                map_index = (first_group *
5388                        (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5389                        (map_row * total_disks_per_row) + first_column;
5390                break;
5391        default:
5392                return IO_ACCEL_INELIGIBLE;
5393        }
5394
5395        if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5396                return IO_ACCEL_INELIGIBLE;
5397
5398        c->phys_disk = dev->phys_disk[map_index];
5399        if (!c->phys_disk)
5400                return IO_ACCEL_INELIGIBLE;
5401
5402        disk_handle = dd[map_index].ioaccel_handle;
5403        disk_block = le64_to_cpu(map->disk_starting_blk) +
5404                        first_row * le16_to_cpu(map->strip_size) +
5405                        (first_row_offset - first_column *
5406                        le16_to_cpu(map->strip_size));
5407        disk_block_cnt = block_cnt;
5408
5409        /* handle differing logical/physical block sizes */
5410        if (map->phys_blk_shift) {
5411                disk_block <<= map->phys_blk_shift;
5412                disk_block_cnt <<= map->phys_blk_shift;
5413        }
5414        BUG_ON(disk_block_cnt > 0xffff);
5415
5416        /* build the new CDB for the physical disk I/O */
5417        if (disk_block > 0xffffffff) {
5418                cdb[0] = is_write ? WRITE_16 : READ_16;
5419                cdb[1] = 0;
5420                cdb[2] = (u8) (disk_block >> 56);
5421                cdb[3] = (u8) (disk_block >> 48);
5422                cdb[4] = (u8) (disk_block >> 40);
5423                cdb[5] = (u8) (disk_block >> 32);
5424                cdb[6] = (u8) (disk_block >> 24);
5425                cdb[7] = (u8) (disk_block >> 16);
5426                cdb[8] = (u8) (disk_block >> 8);
5427                cdb[9] = (u8) (disk_block);
5428                cdb[10] = (u8) (disk_block_cnt >> 24);
5429                cdb[11] = (u8) (disk_block_cnt >> 16);
5430                cdb[12] = (u8) (disk_block_cnt >> 8);
5431                cdb[13] = (u8) (disk_block_cnt);
5432                cdb[14] = 0;
5433                cdb[15] = 0;
5434                cdb_len = 16;
5435        } else {
5436                cdb[0] = is_write ? WRITE_10 : READ_10;
5437                cdb[1] = 0;
5438                cdb[2] = (u8) (disk_block >> 24);
5439                cdb[3] = (u8) (disk_block >> 16);
5440                cdb[4] = (u8) (disk_block >> 8);
5441                cdb[5] = (u8) (disk_block);
5442                cdb[6] = 0;
5443                cdb[7] = (u8) (disk_block_cnt >> 8);
5444                cdb[8] = (u8) (disk_block_cnt);
5445                cdb[9] = 0;
5446                cdb_len = 10;
5447        }
5448        return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5449                                                dev->scsi3addr,
5450                                                dev->phys_disk[map_index]);
5451}
5452
5453/*
5454 * Submit commands down the "normal" RAID stack path
5455 * All callers to hpsa_ciss_submit must check lockup_detected
5456 * beforehand, before (opt.) and after calling cmd_alloc
5457 */
5458static int hpsa_ciss_submit(struct ctlr_info *h,
5459        struct CommandList *c, struct scsi_cmnd *cmd,
5460        struct hpsa_scsi_dev_t *dev)
5461{
5462        cmd->host_scribble = (unsigned char *) c;
5463        c->cmd_type = CMD_SCSI;
5464        c->scsi_cmd = cmd;
5465        c->Header.ReplyQueue = 0;  /* unused in simple mode */
5466        memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
5467        c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5468
5469        /* Fill in the request block... */
5470
5471        c->Request.Timeout = 0;
5472        BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5473        c->Request.CDBLen = cmd->cmd_len;
5474        memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5475        switch (cmd->sc_data_direction) {
5476        case DMA_TO_DEVICE:
5477                c->Request.type_attr_dir =
5478                        TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5479                break;
5480        case DMA_FROM_DEVICE:
5481                c->Request.type_attr_dir =
5482                        TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5483                break;
5484        case DMA_NONE:
5485                c->Request.type_attr_dir =
5486                        TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5487                break;
5488        case DMA_BIDIRECTIONAL:
5489                /* This can happen if a buggy application does a scsi passthru
5490                 * and sets both inlen and outlen to non-zero. ( see
5491                 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5492                 */
5493
5494                c->Request.type_attr_dir =
5495                        TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5496                /* This is technically wrong, and hpsa controllers should
5497                 * reject it with CMD_INVALID, which is the most correct
5498                 * response, but non-fibre backends appear to let it
5499                 * slide by, and give the same results as if this field
5500                 * were set correctly.  Either way is acceptable for
5501                 * our purposes here.
5502                 */
5503
5504                break;
5505
5506        default:
5507                dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5508                        cmd->sc_data_direction);
5509                BUG();
5510                break;
5511        }
5512
5513        if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5514                hpsa_cmd_resolve_and_free(h, c);
5515                return SCSI_MLQUEUE_HOST_BUSY;
5516        }
5517
5518        if (dev->in_reset) {
5519                hpsa_cmd_resolve_and_free(h, c);
5520                return SCSI_MLQUEUE_HOST_BUSY;
5521        }
5522
5523        c->device = dev;
5524
5525        enqueue_cmd_and_start_io(h, c);
5526        /* the cmd'll come back via intr handler in complete_scsi_command()  */
5527        return 0;
5528}
5529
5530static void hpsa_cmd_init(struct ctlr_info *h, int index,
5531                                struct CommandList *c)
5532{
5533        dma_addr_t cmd_dma_handle, err_dma_handle;
5534
5535        /* Zero out all of commandlist except the last field, refcount */
5536        memset(c, 0, offsetof(struct CommandList, refcount));
5537        c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5538        cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5539        c->err_info = h->errinfo_pool + index;
5540        memset(c->err_info, 0, sizeof(*c->err_info));
5541        err_dma_handle = h->errinfo_pool_dhandle
5542            + index * sizeof(*c->err_info);
5543        c->cmdindex = index;
5544        c->busaddr = (u32) cmd_dma_handle;
5545        c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5546        c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5547        c->h = h;
5548        c->scsi_cmd = SCSI_CMD_IDLE;
5549}
5550
5551static void hpsa_preinitialize_commands(struct ctlr_info *h)
5552{
5553        int i;
5554
5555        for (i = 0; i < h->nr_cmds; i++) {
5556                struct CommandList *c = h->cmd_pool + i;
5557
5558                hpsa_cmd_init(h, i, c);
5559                atomic_set(&c->refcount, 0);
5560        }
5561}
5562
5563static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5564                                struct CommandList *c)
5565{
5566        dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5567
5568        BUG_ON(c->cmdindex != index);
5569
5570        memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5571        memset(c->err_info, 0, sizeof(*c->err_info));
5572        c->busaddr = (u32) cmd_dma_handle;
5573}
5574
5575static int hpsa_ioaccel_submit(struct ctlr_info *h,
5576                struct CommandList *c, struct scsi_cmnd *cmd,
5577                bool retry)
5578{
5579        struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5580        int rc = IO_ACCEL_INELIGIBLE;
5581
5582        if (!dev)
5583                return SCSI_MLQUEUE_HOST_BUSY;
5584
5585        if (dev->in_reset)
5586                return SCSI_MLQUEUE_HOST_BUSY;
5587
5588        if (hpsa_simple_mode)
5589                return IO_ACCEL_INELIGIBLE;
5590
5591        cmd->host_scribble = (unsigned char *) c;
5592
5593        if (dev->offload_enabled) {
5594                hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
5595                c->cmd_type = CMD_SCSI;
5596                c->scsi_cmd = cmd;
5597                c->device = dev;
5598                if (retry) /* Resubmit but do not increment device->commands_outstanding. */
5599                        c->retry_pending = true;
5600                rc = hpsa_scsi_ioaccel_raid_map(h, c);
5601                if (rc < 0)     /* scsi_dma_map failed. */
5602                        rc = SCSI_MLQUEUE_HOST_BUSY;
5603        } else if (dev->hba_ioaccel_enabled) {
5604                hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
5605                c->cmd_type = CMD_SCSI;
5606                c->scsi_cmd = cmd;
5607                c->device = dev;
5608                if (retry) /* Resubmit but do not increment device->commands_outstanding. */
5609                        c->retry_pending = true;
5610                rc = hpsa_scsi_ioaccel_direct_map(h, c);
5611                if (rc < 0)     /* scsi_dma_map failed. */
5612                        rc = SCSI_MLQUEUE_HOST_BUSY;
5613        }
5614        return rc;
5615}
5616
5617static void hpsa_command_resubmit_worker(struct work_struct *work)
5618{
5619        struct scsi_cmnd *cmd;
5620        struct hpsa_scsi_dev_t *dev;
5621        struct CommandList *c = container_of(work, struct CommandList, work);
5622
5623        cmd = c->scsi_cmd;
5624        dev = cmd->device->hostdata;
5625        if (!dev) {
5626                cmd->result = DID_NO_CONNECT << 16;
5627                return hpsa_cmd_free_and_done(c->h, c, cmd);
5628        }
5629
5630        if (dev->in_reset) {
5631                cmd->result = DID_RESET << 16;
5632                return hpsa_cmd_free_and_done(c->h, c, cmd);
5633        }
5634
5635        if (c->cmd_type == CMD_IOACCEL2) {
5636                struct ctlr_info *h = c->h;
5637                struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5638                int rc;
5639
5640                if (c2->error_data.serv_response ==
5641                                IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5642                        /* Resubmit with the retry_pending flag set. */
5643                        rc = hpsa_ioaccel_submit(h, c, cmd, true);
5644                        if (rc == 0)
5645                                return;
5646                        if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5647                                /*
5648                                 * If we get here, it means dma mapping failed.
5649                                 * Try again via scsi mid layer, which will
5650                                 * then get SCSI_MLQUEUE_HOST_BUSY.
5651                                 */
5652                                cmd->result = DID_IMM_RETRY << 16;
5653                                return hpsa_cmd_free_and_done(h, c, cmd);
5654                        }
5655                        /* else, fall thru and resubmit down CISS path */
5656                }
5657        }
5658        hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5659        /*
5660         * Here we have not come in though queue_command, so we
5661         * can set the retry_pending flag to true for a driver initiated
5662         * retry attempt (I.E. not a SML retry).
5663         * I.E. We are submitting a driver initiated retry.
5664         * Note: hpsa_ciss_submit does not zero out the command fields like
5665         *       ioaccel submit does.
5666         */
5667        c->retry_pending = true;
5668        if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
5669                /*
5670                 * If we get here, it means dma mapping failed. Try
5671                 * again via scsi mid layer, which will then get
5672                 * SCSI_MLQUEUE_HOST_BUSY.
5673                 *
5674                 * hpsa_ciss_submit will have already freed c
5675                 * if it encountered a dma mapping failure.
5676                 */
5677                cmd->result = DID_IMM_RETRY << 16;
5678                scsi_done(cmd);
5679        }
5680}
5681
5682/* Running in struct Scsi_Host->host_lock less mode */
5683static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5684{
5685        struct ctlr_info *h;
5686        struct hpsa_scsi_dev_t *dev;
5687        struct CommandList *c;
5688        int rc = 0;
5689
5690        /* Get the ptr to our adapter structure out of cmd->host. */
5691        h = sdev_to_hba(cmd->device);
5692
5693        BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0);
5694
5695        dev = cmd->device->hostdata;
5696        if (!dev) {
5697                cmd->result = DID_NO_CONNECT << 16;
5698                scsi_done(cmd);
5699                return 0;
5700        }
5701
5702        if (dev->removed) {
5703                cmd->result = DID_NO_CONNECT << 16;
5704                scsi_done(cmd);
5705                return 0;
5706        }
5707
5708        if (unlikely(lockup_detected(h))) {
5709                cmd->result = DID_NO_CONNECT << 16;
5710                scsi_done(cmd);
5711                return 0;
5712        }
5713
5714        if (dev->in_reset)
5715                return SCSI_MLQUEUE_DEVICE_BUSY;
5716
5717        c = cmd_tagged_alloc(h, cmd);
5718        if (c == NULL)
5719                return SCSI_MLQUEUE_DEVICE_BUSY;
5720
5721        /*
5722         * This is necessary because the SML doesn't zero out this field during
5723         * error recovery.
5724         */
5725        cmd->result = 0;
5726
5727        /*
5728         * Call alternate submit routine for I/O accelerated commands.
5729         * Retries always go down the normal I/O path.
5730         * Note: If cmd->retries is non-zero, then this is a SML
5731         *       initiated retry and not a driver initiated retry.
5732         *       This command has been obtained from cmd_tagged_alloc
5733         *       and is therefore a brand-new command.
5734         */
5735        if (likely(cmd->retries == 0 &&
5736                        !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) &&
5737                        h->acciopath_status)) {
5738                /* Submit with the retry_pending flag unset. */
5739                rc = hpsa_ioaccel_submit(h, c, cmd, false);
5740                if (rc == 0)
5741                        return 0;
5742                if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5743                        hpsa_cmd_resolve_and_free(h, c);
5744                        return SCSI_MLQUEUE_HOST_BUSY;
5745                }
5746        }
5747        return hpsa_ciss_submit(h, c, cmd, dev);
5748}
5749
5750static void hpsa_scan_complete(struct ctlr_info *h)
5751{
5752        unsigned long flags;
5753
5754        spin_lock_irqsave(&h->scan_lock, flags);
5755        h->scan_finished = 1;
5756        wake_up(&h->scan_wait_queue);
5757        spin_unlock_irqrestore(&h->scan_lock, flags);
5758}
5759
5760static void hpsa_scan_start(struct Scsi_Host *sh)
5761{
5762        struct ctlr_info *h = shost_to_hba(sh);
5763        unsigned long flags;
5764
5765        /*
5766         * Don't let rescans be initiated on a controller known to be locked
5767         * up.  If the controller locks up *during* a rescan, that thread is
5768         * probably hosed, but at least we can prevent new rescan threads from
5769         * piling up on a locked up controller.
5770         */
5771        if (unlikely(lockup_detected(h)))
5772                return hpsa_scan_complete(h);
5773
5774        /*
5775         * If a scan is already waiting to run, no need to add another
5776         */
5777        spin_lock_irqsave(&h->scan_lock, flags);
5778        if (h->scan_waiting) {
5779                spin_unlock_irqrestore(&h->scan_lock, flags);
5780                return;
5781        }
5782
5783        spin_unlock_irqrestore(&h->scan_lock, flags);
5784
5785        /* wait until any scan already in progress is finished. */
5786        while (1) {
5787                spin_lock_irqsave(&h->scan_lock, flags);
5788                if (h->scan_finished)
5789                        break;
5790                h->scan_waiting = 1;
5791                spin_unlock_irqrestore(&h->scan_lock, flags);
5792                wait_event(h->scan_wait_queue, h->scan_finished);
5793                /* Note: We don't need to worry about a race between this
5794                 * thread and driver unload because the midlayer will
5795                 * have incremented the reference count, so unload won't
5796                 * happen if we're in here.
5797                 */
5798        }
5799        h->scan_finished = 0; /* mark scan as in progress */
5800        h->scan_waiting = 0;
5801        spin_unlock_irqrestore(&h->scan_lock, flags);
5802
5803        if (unlikely(lockup_detected(h)))
5804                return hpsa_scan_complete(h);
5805
5806        /*
5807         * Do the scan after a reset completion
5808         */
5809        spin_lock_irqsave(&h->reset_lock, flags);
5810        if (h->reset_in_progress) {
5811                h->drv_req_rescan = 1;
5812                spin_unlock_irqrestore(&h->reset_lock, flags);
5813                hpsa_scan_complete(h);
5814                return;
5815        }
5816        spin_unlock_irqrestore(&h->reset_lock, flags);
5817
5818        hpsa_update_scsi_devices(h);
5819
5820        hpsa_scan_complete(h);
5821}
5822
5823static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5824{
5825        struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5826
5827        if (!logical_drive)
5828                return -ENODEV;
5829
5830        if (qdepth < 1)
5831                qdepth = 1;
5832        else if (qdepth > logical_drive->queue_depth)
5833                qdepth = logical_drive->queue_depth;
5834
5835        return scsi_change_queue_depth(sdev, qdepth);
5836}
5837
5838static int hpsa_scan_finished(struct Scsi_Host *sh,
5839        unsigned long elapsed_time)
5840{
5841        struct ctlr_info *h = shost_to_hba(sh);
5842        unsigned long flags;
5843        int finished;
5844
5845        spin_lock_irqsave(&h->scan_lock, flags);
5846        finished = h->scan_finished;
5847        spin_unlock_irqrestore(&h->scan_lock, flags);
5848        return finished;
5849}
5850
5851static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5852{
5853        struct Scsi_Host *sh;
5854
5855        sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5856        if (sh == NULL) {
5857                dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5858                return -ENOMEM;
5859        }
5860
5861        sh->io_port = 0;
5862        sh->n_io_port = 0;
5863        sh->this_id = -1;
5864        sh->max_channel = 3;
5865        sh->max_cmd_len = MAX_COMMAND_SIZE;
5866        sh->max_lun = HPSA_MAX_LUN;
5867        sh->max_id = HPSA_MAX_LUN;
5868        sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5869        sh->cmd_per_lun = sh->can_queue;
5870        sh->sg_tablesize = h->maxsgentries;
5871        sh->transportt = hpsa_sas_transport_template;
5872        sh->hostdata[0] = (unsigned long) h;
5873        sh->irq = pci_irq_vector(h->pdev, 0);
5874        sh->unique_id = sh->irq;
5875
5876        h->scsi_host = sh;
5877        return 0;
5878}
5879
5880static int hpsa_scsi_add_host(struct ctlr_info *h)
5881{
5882        int rv;
5883
5884        rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5885        if (rv) {
5886                dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5887                return rv;
5888        }
5889        scsi_scan_host(h->scsi_host);
5890        return 0;
5891}
5892
5893/*
5894 * The block layer has already gone to the trouble of picking out a unique,
5895 * small-integer tag for this request.  We use an offset from that value as
5896 * an index to select our command block.  (The offset allows us to reserve the
5897 * low-numbered entries for our own uses.)
5898 */
5899static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5900{
5901        int idx = scsi_cmd_to_rq(scmd)->tag;
5902
5903        if (idx < 0)
5904                return idx;
5905
5906        /* Offset to leave space for internal cmds. */
5907        return idx += HPSA_NRESERVED_CMDS;
5908}
5909
5910/*
5911 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5912 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5913 */
5914static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5915                                struct CommandList *c, unsigned char lunaddr[],
5916                                int reply_queue)
5917{
5918        int rc;
5919
5920        /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5921        (void) fill_cmd(c, TEST_UNIT_READY, h,
5922                        NULL, 0, 0, lunaddr, TYPE_CMD);
5923        rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5924        if (rc)
5925                return rc;
5926        /* no unmap needed here because no data xfer. */
5927
5928        /* Check if the unit is already ready. */
5929        if (c->err_info->CommandStatus == CMD_SUCCESS)
5930                return 0;
5931
5932        /*
5933         * The first command sent after reset will receive "unit attention" to
5934         * indicate that the LUN has been reset...this is actually what we're
5935         * looking for (but, success is good too).
5936         */
5937        if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5938                c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5939                        (c->err_info->SenseInfo[2] == NO_SENSE ||
5940                         c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5941                return 0;
5942
5943        return 1;
5944}
5945
5946/*
5947 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5948 * returns zero when the unit is ready, and non-zero when giving up.
5949 */
5950static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5951                                struct CommandList *c,
5952                                unsigned char lunaddr[], int reply_queue)
5953{
5954        int rc;
5955        int count = 0;
5956        int waittime = 1; /* seconds */
5957
5958        /* Send test unit ready until device ready, or give up. */
5959        for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5960
5961                /*
5962                 * Wait for a bit.  do this first, because if we send
5963                 * the TUR right away, the reset will just abort it.
5964                 */
5965                msleep(1000 * waittime);
5966
5967                rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5968                if (!rc)
5969                        break;
5970
5971                /* Increase wait time with each try, up to a point. */
5972                if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5973                        waittime *= 2;
5974
5975                dev_warn(&h->pdev->dev,
5976                         "waiting %d secs for device to become ready.\n",
5977                         waittime);
5978        }
5979
5980        return rc;
5981}
5982
5983static int wait_for_device_to_become_ready(struct ctlr_info *h,
5984                                           unsigned char lunaddr[],
5985                                           int reply_queue)
5986{
5987        int first_queue;
5988        int last_queue;
5989        int rq;
5990        int rc = 0;
5991        struct CommandList *c;
5992
5993        c = cmd_alloc(h);
5994
5995        /*
5996         * If no specific reply queue was requested, then send the TUR
5997         * repeatedly, requesting a reply on each reply queue; otherwise execute
5998         * the loop exactly once using only the specified queue.
5999         */
6000        if (reply_queue == DEFAULT_REPLY_QUEUE) {
6001                first_queue = 0;
6002                last_queue = h->nreply_queues - 1;
6003        } else {
6004                first_queue = reply_queue;
6005                last_queue = reply_queue;
6006        }
6007
6008        for (rq = first_queue; rq <= last_queue; rq++) {
6009                rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
6010                if (rc)
6011                        break;
6012        }
6013
6014        if (rc)
6015                dev_warn(&h->pdev->dev, "giving up on device.\n");
6016        else
6017                dev_warn(&h->pdev->dev, "device is ready.\n");
6018
6019        cmd_free(h, c);
6020        return rc;
6021}
6022
6023/* Need at least one of these error handlers to keep ../scsi/hosts.c from
6024 * complaining.  Doing a host- or bus-reset can't do anything good here.
6025 */
6026static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
6027{
6028        int rc = SUCCESS;
6029        int i;
6030        struct ctlr_info *h;
6031        struct hpsa_scsi_dev_t *dev = NULL;
6032        u8 reset_type;
6033        char msg[48];
6034        unsigned long flags;
6035
6036        /* find the controller to which the command to be aborted was sent */
6037        h = sdev_to_hba(scsicmd->device);
6038        if (h == NULL) /* paranoia */
6039                return FAILED;
6040
6041        spin_lock_irqsave(&h->reset_lock, flags);
6042        h->reset_in_progress = 1;
6043        spin_unlock_irqrestore(&h->reset_lock, flags);
6044
6045        if (lockup_detected(h)) {
6046                rc = FAILED;
6047                goto return_reset_status;
6048        }
6049
6050        dev = scsicmd->device->hostdata;
6051        if (!dev) {
6052                dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
6053                rc = FAILED;
6054                goto return_reset_status;
6055        }
6056
6057        if (dev->devtype == TYPE_ENCLOSURE) {
6058                rc = SUCCESS;
6059                goto return_reset_status;
6060        }
6061
6062        /* if controller locked up, we can guarantee command won't complete */
6063        if (lockup_detected(h)) {
6064                snprintf(msg, sizeof(msg),
6065                         "cmd %d RESET FAILED, lockup detected",
6066                         hpsa_get_cmd_index(scsicmd));
6067                hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6068                rc = FAILED;
6069                goto return_reset_status;
6070        }
6071
6072        /* this reset request might be the result of a lockup; check */
6073        if (detect_controller_lockup(h)) {
6074                snprintf(msg, sizeof(msg),
6075                         "cmd %d RESET FAILED, new lockup detected",
6076                         hpsa_get_cmd_index(scsicmd));
6077                hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6078                rc = FAILED;
6079                goto return_reset_status;
6080        }
6081
6082        /* Do not attempt on controller */
6083        if (is_hba_lunid(dev->scsi3addr)) {
6084                rc = SUCCESS;
6085                goto return_reset_status;
6086        }
6087
6088        if (is_logical_dev_addr_mode(dev->scsi3addr))
6089                reset_type = HPSA_DEVICE_RESET_MSG;
6090        else
6091                reset_type = HPSA_PHYS_TARGET_RESET;
6092
6093        sprintf(msg, "resetting %s",
6094                reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
6095        hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6096
6097        /*
6098         * wait to see if any commands will complete before sending reset
6099         */
6100        dev->in_reset = true; /* block any new cmds from OS for this device */
6101        for (i = 0; i < 10; i++) {
6102                if (atomic_read(&dev->commands_outstanding) > 0)
6103                        msleep(1000);
6104                else
6105                        break;
6106        }
6107
6108        /* send a reset to the SCSI LUN which the command was sent to */
6109        rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
6110        if (rc == 0)
6111                rc = SUCCESS;
6112        else
6113                rc = FAILED;
6114
6115        sprintf(msg, "reset %s %s",
6116                reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
6117                rc == SUCCESS ? "completed successfully" : "failed");
6118        hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6119
6120return_reset_status:
6121        spin_lock_irqsave(&h->reset_lock, flags);
6122        h->reset_in_progress = 0;
6123        if (dev)
6124                dev->in_reset = false;
6125        spin_unlock_irqrestore(&h->reset_lock, flags);
6126        return rc;
6127}
6128
6129/*
6130 * For operations with an associated SCSI command, a command block is allocated
6131 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6132 * block request tag as an index into a table of entries.  cmd_tagged_free() is
6133 * the complement, although cmd_free() may be called instead.
6134 * This function is only called for new requests from queue_command.
6135 */
6136static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6137                                            struct scsi_cmnd *scmd)
6138{
6139        int idx = hpsa_get_cmd_index(scmd);
6140        struct CommandList *c = h->cmd_pool + idx;
6141
6142        if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6143                dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6144                        idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6145                /* The index value comes from the block layer, so if it's out of
6146                 * bounds, it's probably not our bug.
6147                 */
6148                BUG();
6149        }
6150
6151        if (unlikely(!hpsa_is_cmd_idle(c))) {
6152                /*
6153                 * We expect that the SCSI layer will hand us a unique tag
6154                 * value.  Thus, there should never be a collision here between
6155                 * two requests...because if the selected command isn't idle
6156                 * then someone is going to be very disappointed.
6157                 */
6158                if (idx != h->last_collision_tag) { /* Print once per tag */
6159                        dev_warn(&h->pdev->dev,
6160                                "%s: tag collision (tag=%d)\n", __func__, idx);
6161                        if (scmd)
6162                                scsi_print_command(scmd);
6163                        h->last_collision_tag = idx;
6164                }
6165                return NULL;
6166        }
6167
6168        atomic_inc(&c->refcount);
6169        hpsa_cmd_partial_init(h, idx, c);
6170
6171        /*
6172         * This is a new command obtained from queue_command so
6173         * there have not been any driver initiated retry attempts.
6174         */
6175        c->retry_pending = false;
6176
6177        return c;
6178}
6179
6180static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6181{
6182        /*
6183         * Release our reference to the block.  We don't need to do anything
6184         * else to free it, because it is accessed by index.
6185         */
6186        (void)atomic_dec(&c->refcount);
6187}
6188
6189/*
6190 * For operations that cannot sleep, a command block is allocated at init,
6191 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6192 * which ones are free or in use.  Lock must be held when calling this.
6193 * cmd_free() is the complement.
6194 * This function never gives up and returns NULL.  If it hangs,
6195 * another thread must call cmd_free() to free some tags.
6196 */
6197
6198static struct CommandList *cmd_alloc(struct ctlr_info *h)
6199{
6200        struct CommandList *c;
6201        int refcount, i;
6202        int offset = 0;
6203
6204        /*
6205         * There is some *extremely* small but non-zero chance that that
6206         * multiple threads could get in here, and one thread could
6207         * be scanning through the list of bits looking for a free
6208         * one, but the free ones are always behind him, and other
6209         * threads sneak in behind him and eat them before he can
6210         * get to them, so that while there is always a free one, a
6211         * very unlucky thread might be starved anyway, never able to
6212         * beat the other threads.  In reality, this happens so
6213         * infrequently as to be indistinguishable from never.
6214         *
6215         * Note that we start allocating commands before the SCSI host structure
6216         * is initialized.  Since the search starts at bit zero, this
6217         * all works, since we have at least one command structure available;
6218         * however, it means that the structures with the low indexes have to be
6219         * reserved for driver-initiated requests, while requests from the block
6220         * layer will use the higher indexes.
6221         */
6222
6223        for (;;) {
6224                i = find_next_zero_bit(h->cmd_pool_bits,
6225                                        HPSA_NRESERVED_CMDS,
6226                                        offset);
6227                if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6228                        offset = 0;
6229                        continue;
6230                }
6231                c = h->cmd_pool + i;
6232                refcount = atomic_inc_return(&c->refcount);
6233                if (unlikely(refcount > 1)) {
6234                        cmd_free(h, c); /* already in use */
6235                        offset = (i + 1) % HPSA_NRESERVED_CMDS;
6236                        continue;
6237                }
6238                set_bit(i & (BITS_PER_LONG - 1),
6239                        h->cmd_pool_bits + (i / BITS_PER_LONG));
6240                break; /* it's ours now. */
6241        }
6242        hpsa_cmd_partial_init(h, i, c);
6243        c->device = NULL;
6244
6245        /*
6246         * cmd_alloc is for "internal" commands and they are never
6247         * retried.
6248         */
6249        c->retry_pending = false;
6250
6251        return c;
6252}
6253
6254/*
6255 * This is the complementary operation to cmd_alloc().  Note, however, in some
6256 * corner cases it may also be used to free blocks allocated by
6257 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6258 * the clear-bit is harmless.
6259 */
6260static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6261{
6262        if (atomic_dec_and_test(&c->refcount)) {
6263                int i;
6264
6265                i = c - h->cmd_pool;
6266                clear_bit(i & (BITS_PER_LONG - 1),
6267                          h->cmd_pool_bits + (i / BITS_PER_LONG));
6268        }
6269}
6270
6271#ifdef CONFIG_COMPAT
6272
6273static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
6274        void __user *arg)
6275{
6276        struct ctlr_info *h = sdev_to_hba(dev);
6277        IOCTL32_Command_struct __user *arg32 = arg;
6278        IOCTL_Command_struct arg64;
6279        int err;
6280        u32 cp;
6281
6282        if (!arg)
6283                return -EINVAL;
6284
6285        memset(&arg64, 0, sizeof(arg64));
6286        if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf)))
6287                return -EFAULT;
6288        if (get_user(cp, &arg32->buf))
6289                return -EFAULT;
6290        arg64.buf = compat_ptr(cp);
6291
6292        if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6293                return -EAGAIN;
6294        err = hpsa_passthru_ioctl(h, &arg64);
6295        atomic_inc(&h->passthru_cmds_avail);
6296        if (err)
6297                return err;
6298        if (copy_to_user(&arg32->error_info, &arg64.error_info,
6299                         sizeof(arg32->error_info)))
6300                return -EFAULT;
6301        return 0;
6302}
6303
6304static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6305        unsigned int cmd, void __user *arg)
6306{
6307        struct ctlr_info *h = sdev_to_hba(dev);
6308        BIG_IOCTL32_Command_struct __user *arg32 = arg;
6309        BIG_IOCTL_Command_struct arg64;
6310        int err;
6311        u32 cp;
6312
6313        if (!arg)
6314                return -EINVAL;
6315        memset(&arg64, 0, sizeof(arg64));
6316        if (copy_from_user(&arg64, arg32,
6317                           offsetof(BIG_IOCTL32_Command_struct, buf)))
6318                return -EFAULT;
6319        if (get_user(cp, &arg32->buf))
6320                return -EFAULT;
6321        arg64.buf = compat_ptr(cp);
6322
6323        if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6324                return -EAGAIN;
6325        err = hpsa_big_passthru_ioctl(h, &arg64);
6326        atomic_inc(&h->passthru_cmds_avail);
6327        if (err)
6328                return err;
6329        if (copy_to_user(&arg32->error_info, &arg64.error_info,
6330                         sizeof(arg32->error_info)))
6331                return -EFAULT;
6332        return 0;
6333}
6334
6335static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
6336                             void __user *arg)
6337{
6338        switch (cmd) {
6339        case CCISS_GETPCIINFO:
6340        case CCISS_GETINTINFO:
6341        case CCISS_SETINTINFO:
6342        case CCISS_GETNODENAME:
6343        case CCISS_SETNODENAME:
6344        case CCISS_GETHEARTBEAT:
6345        case CCISS_GETBUSTYPES:
6346        case CCISS_GETFIRMVER:
6347        case CCISS_GETDRIVVER:
6348        case CCISS_REVALIDVOLS:
6349        case CCISS_DEREGDISK:
6350        case CCISS_REGNEWDISK:
6351        case CCISS_REGNEWD:
6352        case CCISS_RESCANDISK:
6353        case CCISS_GETLUNINFO:
6354                return hpsa_ioctl(dev, cmd, arg);
6355
6356        case CCISS_PASSTHRU32:
6357                return hpsa_ioctl32_passthru(dev, cmd, arg);
6358        case CCISS_BIG_PASSTHRU32:
6359                return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6360
6361        default:
6362                return -ENOIOCTLCMD;
6363        }
6364}
6365#endif
6366
6367static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6368{
6369        struct hpsa_pci_info pciinfo;
6370
6371        if (!argp)
6372                return -EINVAL;
6373        pciinfo.domain = pci_domain_nr(h->pdev->bus);
6374        pciinfo.bus = h->pdev->bus->number;
6375        pciinfo.dev_fn = h->pdev->devfn;
6376        pciinfo.board_id = h->board_id;
6377        if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6378                return -EFAULT;
6379        return 0;
6380}
6381
6382static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6383{
6384        DriverVer_type DriverVer;
6385        unsigned char vmaj, vmin, vsubmin;
6386        int rc;
6387
6388        rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6389                &vmaj, &vmin, &vsubmin);
6390        if (rc != 3) {
6391                dev_info(&h->pdev->dev, "driver version string '%s' "
6392                        "unrecognized.", HPSA_DRIVER_VERSION);
6393                vmaj = 0;
6394                vmin = 0;
6395                vsubmin = 0;
6396        }
6397        DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6398        if (!argp)
6399                return -EINVAL;
6400        if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6401                return -EFAULT;
6402        return 0;
6403}
6404
6405static int hpsa_passthru_ioctl(struct ctlr_info *h,
6406                               IOCTL_Command_struct *iocommand)
6407{
6408        struct CommandList *c;
6409        char *buff = NULL;
6410        u64 temp64;
6411        int rc = 0;
6412
6413        if (!capable(CAP_SYS_RAWIO))
6414                return -EPERM;
6415        if ((iocommand->buf_size < 1) &&
6416            (iocommand->Request.Type.Direction != XFER_NONE)) {
6417                return -EINVAL;
6418        }
6419        if (iocommand->buf_size > 0) {
6420                buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
6421                if (buff == NULL)
6422                        return -ENOMEM;
6423                if (iocommand->Request.Type.Direction & XFER_WRITE) {
6424                        /* Copy the data into the buffer we created */
6425                        if (copy_from_user(buff, iocommand->buf,
6426                                iocommand->buf_size)) {
6427                                rc = -EFAULT;
6428                                goto out_kfree;
6429                        }
6430                } else {
6431                        memset(buff, 0, iocommand->buf_size);
6432                }
6433        }
6434        c = cmd_alloc(h);
6435
6436        /* Fill in the command type */
6437        c->cmd_type = CMD_IOCTL_PEND;
6438        c->scsi_cmd = SCSI_CMD_BUSY;
6439        /* Fill in Command Header */
6440        c->Header.ReplyQueue = 0; /* unused in simple mode */
6441        if (iocommand->buf_size > 0) {  /* buffer to fill */
6442                c->Header.SGList = 1;
6443                c->Header.SGTotal = cpu_to_le16(1);
6444        } else  { /* no buffers to fill */
6445                c->Header.SGList = 0;
6446                c->Header.SGTotal = cpu_to_le16(0);
6447        }
6448        memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN));
6449
6450        /* Fill in Request block */
6451        memcpy(&c->Request, &iocommand->Request,
6452                sizeof(c->Request));
6453
6454        /* Fill in the scatter gather information */
6455        if (iocommand->buf_size > 0) {
6456                temp64 = dma_map_single(&h->pdev->dev, buff,
6457                        iocommand->buf_size, DMA_BIDIRECTIONAL);
6458                if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6459                        c->SG[0].Addr = cpu_to_le64(0);
6460                        c->SG[0].Len = cpu_to_le32(0);
6461                        rc = -ENOMEM;
6462                        goto out;
6463                }
6464                c->SG[0].Addr = cpu_to_le64(temp64);
6465                c->SG[0].Len = cpu_to_le32(iocommand->buf_size);
6466                c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6467        }
6468        rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6469                                        NO_TIMEOUT);
6470        if (iocommand->buf_size > 0)
6471                hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
6472        check_ioctl_unit_attention(h, c);
6473        if (rc) {
6474                rc = -EIO;
6475                goto out;
6476        }
6477
6478        /* Copy the error information out */
6479        memcpy(&iocommand->error_info, c->err_info,
6480                sizeof(iocommand->error_info));
6481        if ((iocommand->Request.Type.Direction & XFER_READ) &&
6482                iocommand->buf_size > 0) {
6483                /* Copy the data out of the buffer we created */
6484                if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) {
6485                        rc = -EFAULT;
6486                        goto out;
6487                }
6488        }
6489out:
6490        cmd_free(h, c);
6491out_kfree:
6492        kfree(buff);
6493        return rc;
6494}
6495
6496static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
6497                                   BIG_IOCTL_Command_struct *ioc)
6498{
6499        struct CommandList *c;
6500        unsigned char **buff = NULL;
6501        int *buff_size = NULL;
6502        u64 temp64;
6503        BYTE sg_used = 0;
6504        int status = 0;
6505        u32 left;
6506        u32 sz;
6507        BYTE __user *data_ptr;
6508
6509        if (!capable(CAP_SYS_RAWIO))
6510                return -EPERM;
6511
6512        if ((ioc->buf_size < 1) &&
6513            (ioc->Request.Type.Direction != XFER_NONE))
6514                return -EINVAL;
6515        /* Check kmalloc limits  using all SGs */
6516        if (ioc->malloc_size > MAX_KMALLOC_SIZE)
6517                return -EINVAL;
6518        if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD)
6519                return -EINVAL;
6520        buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
6521        if (!buff) {
6522                status = -ENOMEM;
6523                goto cleanup1;
6524        }
6525        buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
6526        if (!buff_size) {
6527                status = -ENOMEM;
6528                goto cleanup1;
6529        }
6530        left = ioc->buf_size;
6531        data_ptr = ioc->buf;
6532        while (left) {
6533                sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6534                buff_size[sg_used] = sz;
6535                buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6536                if (buff[sg_used] == NULL) {
6537                        status = -ENOMEM;
6538                        goto cleanup1;
6539                }
6540                if (ioc->Request.Type.Direction & XFER_WRITE) {
6541                        if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6542                                status = -EFAULT;
6543                                goto cleanup1;
6544                        }
6545                } else
6546                        memset(buff[sg_used], 0, sz);
6547                left -= sz;
6548                data_ptr += sz;
6549                sg_used++;
6550        }
6551        c = cmd_alloc(h);
6552
6553        c->cmd_type = CMD_IOCTL_PEND;
6554        c->scsi_cmd = SCSI_CMD_BUSY;
6555        c->Header.ReplyQueue = 0;
6556        c->Header.SGList = (u8) sg_used;
6557        c->Header.SGTotal = cpu_to_le16(sg_used);
6558        memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6559        memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6560        if (ioc->buf_size > 0) {
6561                int i;
6562                for (i = 0; i < sg_used; i++) {
6563                        temp64 = dma_map_single(&h->pdev->dev, buff[i],
6564                                    buff_size[i], DMA_BIDIRECTIONAL);
6565                        if (dma_mapping_error(&h->pdev->dev,
6566                                                        (dma_addr_t) temp64)) {
6567                                c->SG[i].Addr = cpu_to_le64(0);
6568                                c->SG[i].Len = cpu_to_le32(0);
6569                                hpsa_pci_unmap(h->pdev, c, i,
6570                                        DMA_BIDIRECTIONAL);
6571                                status = -ENOMEM;
6572                                goto cleanup0;
6573                        }
6574                        c->SG[i].Addr = cpu_to_le64(temp64);
6575                        c->SG[i].Len = cpu_to_le32(buff_size[i]);
6576                        c->SG[i].Ext = cpu_to_le32(0);
6577                }
6578                c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6579        }
6580        status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6581                                                NO_TIMEOUT);
6582        if (sg_used)
6583                hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
6584        check_ioctl_unit_attention(h, c);
6585        if (status) {
6586                status = -EIO;
6587                goto cleanup0;
6588        }
6589
6590        /* Copy the error information out */
6591        memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6592        if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6593                int i;
6594
6595                /* Copy the data out of the buffer we created */
6596                BYTE __user *ptr = ioc->buf;
6597                for (i = 0; i < sg_used; i++) {
6598                        if (copy_to_user(ptr, buff[i], buff_size[i])) {
6599                                status = -EFAULT;
6600                                goto cleanup0;
6601                        }
6602                        ptr += buff_size[i];
6603                }
6604        }
6605        status = 0;
6606cleanup0:
6607        cmd_free(h, c);
6608cleanup1:
6609        if (buff) {
6610                int i;
6611
6612                for (i = 0; i < sg_used; i++)
6613                        kfree(buff[i]);
6614                kfree(buff);
6615        }
6616        kfree(buff_size);
6617        return status;
6618}
6619
6620static void check_ioctl_unit_attention(struct ctlr_info *h,
6621        struct CommandList *c)
6622{
6623        if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6624                        c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6625                (void) check_for_unit_attention(h, c);
6626}
6627
6628/*
6629 * ioctl
6630 */
6631static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
6632                      void __user *argp)
6633{
6634        struct ctlr_info *h = sdev_to_hba(dev);
6635        int rc;
6636
6637        switch (cmd) {
6638        case CCISS_DEREGDISK:
6639        case CCISS_REGNEWDISK:
6640        case CCISS_REGNEWD:
6641                hpsa_scan_start(h->scsi_host);
6642                return 0;
6643        case CCISS_GETPCIINFO:
6644                return hpsa_getpciinfo_ioctl(h, argp);
6645        case CCISS_GETDRIVVER:
6646                return hpsa_getdrivver_ioctl(h, argp);
6647        case CCISS_PASSTHRU: {
6648                IOCTL_Command_struct iocommand;
6649
6650                if (!argp)
6651                        return -EINVAL;
6652                if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6653                        return -EFAULT;
6654                if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6655                        return -EAGAIN;
6656                rc = hpsa_passthru_ioctl(h, &iocommand);
6657                atomic_inc(&h->passthru_cmds_avail);
6658                if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand)))
6659                        rc = -EFAULT;
6660                return rc;
6661        }
6662        case CCISS_BIG_PASSTHRU: {
6663                BIG_IOCTL_Command_struct ioc;
6664                if (!argp)
6665                        return -EINVAL;
6666                if (copy_from_user(&ioc, argp, sizeof(ioc)))
6667                        return -EFAULT;
6668                if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6669                        return -EAGAIN;
6670                rc = hpsa_big_passthru_ioctl(h, &ioc);
6671                atomic_inc(&h->passthru_cmds_avail);
6672                if (!rc && copy_to_user(argp, &ioc, sizeof(ioc)))
6673                        rc = -EFAULT;
6674                return rc;
6675        }
6676        default:
6677                return -ENOTTY;
6678        }
6679}
6680
6681static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
6682{
6683        struct CommandList *c;
6684
6685        c = cmd_alloc(h);
6686
6687        /* fill_cmd can't fail here, no data buffer to map */
6688        (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6689                RAID_CTLR_LUNID, TYPE_MSG);
6690        c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6691        c->waiting = NULL;
6692        enqueue_cmd_and_start_io(h, c);
6693        /* Don't wait for completion, the reset won't complete.  Don't free
6694         * the command either.  This is the last command we will send before
6695         * re-initializing everything, so it doesn't matter and won't leak.
6696         */
6697        return;
6698}
6699
6700static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6701        void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6702        int cmd_type)
6703{
6704        enum dma_data_direction dir = DMA_NONE;
6705
6706        c->cmd_type = CMD_IOCTL_PEND;
6707        c->scsi_cmd = SCSI_CMD_BUSY;
6708        c->Header.ReplyQueue = 0;
6709        if (buff != NULL && size > 0) {
6710                c->Header.SGList = 1;
6711                c->Header.SGTotal = cpu_to_le16(1);
6712        } else {
6713                c->Header.SGList = 0;
6714                c->Header.SGTotal = cpu_to_le16(0);
6715        }
6716        memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6717
6718        if (cmd_type == TYPE_CMD) {
6719                switch (cmd) {
6720                case HPSA_INQUIRY:
6721                        /* are we trying to read a vital product page */
6722                        if (page_code & VPD_PAGE) {
6723                                c->Request.CDB[1] = 0x01;
6724                                c->Request.CDB[2] = (page_code & 0xff);
6725                        }
6726                        c->Request.CDBLen = 6;
6727                        c->Request.type_attr_dir =
6728                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6729                        c->Request.Timeout = 0;
6730                        c->Request.CDB[0] = HPSA_INQUIRY;
6731                        c->Request.CDB[4] = size & 0xFF;
6732                        break;
6733                case RECEIVE_DIAGNOSTIC:
6734                        c->Request.CDBLen = 6;
6735                        c->Request.type_attr_dir =
6736                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6737                        c->Request.Timeout = 0;
6738                        c->Request.CDB[0] = cmd;
6739                        c->Request.CDB[1] = 1;
6740                        c->Request.CDB[2] = 1;
6741                        c->Request.CDB[3] = (size >> 8) & 0xFF;
6742                        c->Request.CDB[4] = size & 0xFF;
6743                        break;
6744                case HPSA_REPORT_LOG:
6745                case HPSA_REPORT_PHYS:
6746                        /* Talking to controller so It's a physical command
6747                           mode = 00 target = 0.  Nothing to write.
6748                         */
6749                        c->Request.CDBLen = 12;
6750                        c->Request.type_attr_dir =
6751                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6752                        c->Request.Timeout = 0;
6753                        c->Request.CDB[0] = cmd;
6754                        c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6755                        c->Request.CDB[7] = (size >> 16) & 0xFF;
6756                        c->Request.CDB[8] = (size >> 8) & 0xFF;
6757                        c->Request.CDB[9] = size & 0xFF;
6758                        break;
6759                case BMIC_SENSE_DIAG_OPTIONS:
6760                        c->Request.CDBLen = 16;
6761                        c->Request.type_attr_dir =
6762                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6763                        c->Request.Timeout = 0;
6764                        /* Spec says this should be BMIC_WRITE */
6765                        c->Request.CDB[0] = BMIC_READ;
6766                        c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6767                        break;
6768                case BMIC_SET_DIAG_OPTIONS:
6769                        c->Request.CDBLen = 16;
6770                        c->Request.type_attr_dir =
6771                                        TYPE_ATTR_DIR(cmd_type,
6772                                                ATTR_SIMPLE, XFER_WRITE);
6773                        c->Request.Timeout = 0;
6774                        c->Request.CDB[0] = BMIC_WRITE;
6775                        c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6776                        break;
6777                case HPSA_CACHE_FLUSH:
6778                        c->Request.CDBLen = 12;
6779                        c->Request.type_attr_dir =
6780                                        TYPE_ATTR_DIR(cmd_type,
6781                                                ATTR_SIMPLE, XFER_WRITE);
6782                        c->Request.Timeout = 0;
6783                        c->Request.CDB[0] = BMIC_WRITE;
6784                        c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6785                        c->Request.CDB[7] = (size >> 8) & 0xFF;
6786                        c->Request.CDB[8] = size & 0xFF;
6787                        break;
6788                case TEST_UNIT_READY:
6789                        c->Request.CDBLen = 6;
6790                        c->Request.type_attr_dir =
6791                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6792                        c->Request.Timeout = 0;
6793                        break;
6794                case HPSA_GET_RAID_MAP:
6795                        c->Request.CDBLen = 12;
6796                        c->Request.type_attr_dir =
6797                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6798                        c->Request.Timeout = 0;
6799                        c->Request.CDB[0] = HPSA_CISS_READ;
6800                        c->Request.CDB[1] = cmd;
6801                        c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6802                        c->Request.CDB[7] = (size >> 16) & 0xFF;
6803                        c->Request.CDB[8] = (size >> 8) & 0xFF;
6804                        c->Request.CDB[9] = size & 0xFF;
6805                        break;
6806                case BMIC_SENSE_CONTROLLER_PARAMETERS:
6807                        c->Request.CDBLen = 10;
6808                        c->Request.type_attr_dir =
6809                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6810                        c->Request.Timeout = 0;
6811                        c->Request.CDB[0] = BMIC_READ;
6812                        c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6813                        c->Request.CDB[7] = (size >> 16) & 0xFF;
6814                        c->Request.CDB[8] = (size >> 8) & 0xFF;
6815                        break;
6816                case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6817                        c->Request.CDBLen = 10;
6818                        c->Request.type_attr_dir =
6819                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6820                        c->Request.Timeout = 0;
6821                        c->Request.CDB[0] = BMIC_READ;
6822                        c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6823                        c->Request.CDB[7] = (size >> 16) & 0xFF;
6824                        c->Request.CDB[8] = (size >> 8) & 0XFF;
6825                        break;
6826                case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6827                        c->Request.CDBLen = 10;
6828                        c->Request.type_attr_dir =
6829                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6830                        c->Request.Timeout = 0;
6831                        c->Request.CDB[0] = BMIC_READ;
6832                        c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6833                        c->Request.CDB[7] = (size >> 16) & 0xFF;
6834                        c->Request.CDB[8] = (size >> 8) & 0XFF;
6835                        break;
6836                case BMIC_SENSE_STORAGE_BOX_PARAMS:
6837                        c->Request.CDBLen = 10;
6838                        c->Request.type_attr_dir =
6839                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6840                        c->Request.Timeout = 0;
6841                        c->Request.CDB[0] = BMIC_READ;
6842                        c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6843                        c->Request.CDB[7] = (size >> 16) & 0xFF;
6844                        c->Request.CDB[8] = (size >> 8) & 0XFF;
6845                        break;
6846                case BMIC_IDENTIFY_CONTROLLER:
6847                        c->Request.CDBLen = 10;
6848                        c->Request.type_attr_dir =
6849                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6850                        c->Request.Timeout = 0;
6851                        c->Request.CDB[0] = BMIC_READ;
6852                        c->Request.CDB[1] = 0;
6853                        c->Request.CDB[2] = 0;
6854                        c->Request.CDB[3] = 0;
6855                        c->Request.CDB[4] = 0;
6856                        c->Request.CDB[5] = 0;
6857                        c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6858                        c->Request.CDB[7] = (size >> 16) & 0xFF;
6859                        c->Request.CDB[8] = (size >> 8) & 0XFF;
6860                        c->Request.CDB[9] = 0;
6861                        break;
6862                default:
6863                        dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6864                        BUG();
6865                }
6866        } else if (cmd_type == TYPE_MSG) {
6867                switch (cmd) {
6868
6869                case  HPSA_PHYS_TARGET_RESET:
6870                        c->Request.CDBLen = 16;
6871                        c->Request.type_attr_dir =
6872                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6873                        c->Request.Timeout = 0; /* Don't time out */
6874                        memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6875                        c->Request.CDB[0] = HPSA_RESET;
6876                        c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6877                        /* Physical target reset needs no control bytes 4-7*/
6878                        c->Request.CDB[4] = 0x00;
6879                        c->Request.CDB[5] = 0x00;
6880                        c->Request.CDB[6] = 0x00;
6881                        c->Request.CDB[7] = 0x00;
6882                        break;
6883                case  HPSA_DEVICE_RESET_MSG:
6884                        c->Request.CDBLen = 16;
6885                        c->Request.type_attr_dir =
6886                                TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6887                        c->Request.Timeout = 0; /* Don't time out */
6888                        memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6889                        c->Request.CDB[0] =  cmd;
6890                        c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6891                        /* If bytes 4-7 are zero, it means reset the */
6892                        /* LunID device */
6893                        c->Request.CDB[4] = 0x00;
6894                        c->Request.CDB[5] = 0x00;
6895                        c->Request.CDB[6] = 0x00;
6896                        c->Request.CDB[7] = 0x00;
6897                        break;
6898                default:
6899                        dev_warn(&h->pdev->dev, "unknown message type %d\n",
6900                                cmd);
6901                        BUG();
6902                }
6903        } else {
6904                dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6905                BUG();
6906        }
6907
6908        switch (GET_DIR(c->Request.type_attr_dir)) {
6909        case XFER_READ:
6910                dir = DMA_FROM_DEVICE;
6911                break;
6912        case XFER_WRITE:
6913                dir = DMA_TO_DEVICE;
6914                break;
6915        case XFER_NONE:
6916                dir = DMA_NONE;
6917                break;
6918        default:
6919                dir = DMA_BIDIRECTIONAL;
6920        }
6921        if (hpsa_map_one(h->pdev, c, buff, size, dir))
6922                return -1;
6923        return 0;
6924}
6925
6926/*
6927 * Map (physical) PCI mem into (virtual) kernel space
6928 */
6929static void __iomem *remap_pci_mem(ulong base, ulong size)
6930{
6931        ulong page_base = ((ulong) base) & PAGE_MASK;
6932        ulong page_offs = ((ulong) base) - page_base;
6933        void __iomem *page_remapped = ioremap(page_base,
6934                page_offs + size);
6935
6936        return page_remapped ? (page_remapped + page_offs) : NULL;
6937}
6938
6939static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6940{
6941        return h->access.command_completed(h, q);
6942}
6943
6944static inline bool interrupt_pending(struct ctlr_info *h)
6945{
6946        return h->access.intr_pending(h);
6947}
6948
6949static inline long interrupt_not_for_us(struct ctlr_info *h)
6950{
6951        return (h->access.intr_pending(h) == 0) ||
6952                (h->interrupts_enabled == 0);
6953}
6954
6955static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6956        u32 raw_tag)
6957{
6958        if (unlikely(tag_index >= h->nr_cmds)) {
6959                dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6960                return 1;
6961        }
6962        return 0;
6963}
6964
6965static inline void finish_cmd(struct CommandList *c)
6966{
6967        dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6968        if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6969                        || c->cmd_type == CMD_IOACCEL2))
6970                complete_scsi_command(c);
6971        else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6972                complete(c->waiting);
6973}
6974
6975/* process completion of an indexed ("direct lookup") command */
6976static inline void process_indexed_cmd(struct ctlr_info *h,
6977        u32 raw_tag)
6978{
6979        u32 tag_index;
6980        struct CommandList *c;
6981
6982        tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6983        if (!bad_tag(h, tag_index, raw_tag)) {
6984                c = h->cmd_pool + tag_index;
6985                finish_cmd(c);
6986        }
6987}
6988
6989/* Some controllers, like p400, will give us one interrupt
6990 * after a soft reset, even if we turned interrupts off.
6991 * Only need to check for this in the hpsa_xxx_discard_completions
6992 * functions.
6993 */
6994static int ignore_bogus_interrupt(struct ctlr_info *h)
6995{
6996        if (likely(!reset_devices))
6997                return 0;
6998
6999        if (likely(h->interrupts_enabled))
7000                return 0;
7001
7002        dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
7003                "(known firmware bug.)  Ignoring.\n");
7004
7005        return 1;
7006}
7007
7008/*
7009 * Convert &h->q[x] (passed to interrupt handlers) back to h.
7010 * Relies on (h-q[x] == x) being true for x such that
7011 * 0 <= x < MAX_REPLY_QUEUES.
7012 */
7013static struct ctlr_info *queue_to_hba(u8 *queue)
7014{
7015        return container_of((queue - *queue), struct ctlr_info, q[0]);
7016}
7017
7018static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
7019{
7020        struct ctlr_info *h = queue_to_hba(queue);
7021        u8 q = *(u8 *) queue;
7022        u32 raw_tag;
7023
7024        if (ignore_bogus_interrupt(h))
7025                return IRQ_NONE;
7026
7027        if (interrupt_not_for_us(h))
7028                return IRQ_NONE;
7029        h->last_intr_timestamp = get_jiffies_64();
7030        while (interrupt_pending(h)) {
7031                raw_tag = get_next_completion(h, q);
7032                while (raw_tag != FIFO_EMPTY)
7033                        raw_tag = next_command(h, q);
7034        }
7035        return IRQ_HANDLED;
7036}
7037
7038static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
7039{
7040        struct ctlr_info *h = queue_to_hba(queue);
7041        u32 raw_tag;
7042        u8 q = *(u8 *) queue;
7043
7044        if (ignore_bogus_interrupt(h))
7045                return IRQ_NONE;
7046
7047        h->last_intr_timestamp = get_jiffies_64();
7048        raw_tag = get_next_completion(h, q);
7049        while (raw_tag != FIFO_EMPTY)
7050                raw_tag = next_command(h, q);
7051        return IRQ_HANDLED;
7052}
7053
7054static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7055{
7056        struct ctlr_info *h = queue_to_hba((u8 *) queue);
7057        u32 raw_tag;
7058        u8 q = *(u8 *) queue;
7059
7060        if (interrupt_not_for_us(h))
7061                return IRQ_NONE;
7062        h->last_intr_timestamp = get_jiffies_64();
7063        while (interrupt_pending(h)) {
7064                raw_tag = get_next_completion(h, q);
7065                while (raw_tag != FIFO_EMPTY) {
7066                        process_indexed_cmd(h, raw_tag);
7067                        raw_tag = next_command(h, q);
7068                }
7069        }
7070        return IRQ_HANDLED;
7071}
7072
7073static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7074{
7075        struct ctlr_info *h = queue_to_hba(queue);
7076        u32 raw_tag;
7077        u8 q = *(u8 *) queue;
7078
7079        h->last_intr_timestamp = get_jiffies_64();
7080        raw_tag = get_next_completion(h, q);
7081        while (raw_tag != FIFO_EMPTY) {
7082                process_indexed_cmd(h, raw_tag);
7083                raw_tag = next_command(h, q);
7084        }
7085        return IRQ_HANDLED;
7086}
7087
7088/* Send a message CDB to the firmware. Careful, this only works
7089 * in simple mode, not performant mode due to the tag lookup.
7090 * We only ever use this immediately after a controller reset.
7091 */
7092static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7093                        unsigned char type)
7094{
7095        struct Command {
7096                struct CommandListHeader CommandHeader;
7097                struct RequestBlock Request;
7098                struct ErrDescriptor ErrorDescriptor;
7099        };
7100        struct Command *cmd;
7101        static const size_t cmd_sz = sizeof(*cmd) +
7102                                        sizeof(cmd->ErrorDescriptor);
7103        dma_addr_t paddr64;
7104        __le32 paddr32;
7105        u32 tag;
7106        void __iomem *vaddr;
7107        int i, err;
7108
7109        vaddr = pci_ioremap_bar(pdev, 0);
7110        if (vaddr == NULL)
7111                return -ENOMEM;
7112
7113        /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7114         * CCISS commands, so they must be allocated from the lower 4GiB of
7115         * memory.
7116         */
7117        err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7118        if (err) {
7119                iounmap(vaddr);
7120                return err;
7121        }
7122
7123        cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
7124        if (cmd == NULL) {
7125                iounmap(vaddr);
7126                return -ENOMEM;
7127        }
7128
7129        /* This must fit, because of the 32-bit consistent DMA mask.  Also,
7130         * although there's no guarantee, we assume that the address is at
7131         * least 4-byte aligned (most likely, it's page-aligned).
7132         */
7133        paddr32 = cpu_to_le32(paddr64);
7134
7135        cmd->CommandHeader.ReplyQueue = 0;
7136        cmd->CommandHeader.SGList = 0;
7137        cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7138        cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7139        memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7140
7141        cmd->Request.CDBLen = 16;
7142        cmd->Request.type_attr_dir =
7143                        TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7144        cmd->Request.Timeout = 0; /* Don't time out */
7145        cmd->Request.CDB[0] = opcode;
7146        cmd->Request.CDB[1] = type;
7147        memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7148        cmd->ErrorDescriptor.Addr =
7149                        cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7150        cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7151
7152        writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7153
7154        for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7155                tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7156                if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7157                        break;
7158                msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7159        }
7160
7161        iounmap(vaddr);
7162
7163        /* we leak the DMA buffer here ... no choice since the controller could
7164         *  still complete the command.
7165         */
7166        if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7167                dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7168                        opcode, type);
7169                return -ETIMEDOUT;
7170        }
7171
7172        dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
7173
7174        if (tag & HPSA_ERROR_BIT) {
7175                dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7176                        opcode, type);
7177                return -EIO;
7178        }
7179
7180        dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7181                opcode, type);
7182        return 0;
7183}
7184
7185#define hpsa_noop(p) hpsa_message(p, 3, 0)
7186
7187static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7188        void __iomem *vaddr, u32 use_doorbell)
7189{
7190
7191        if (use_doorbell) {
7192                /* For everything after the P600, the PCI power state method
7193                 * of resetting the controller doesn't work, so we have this
7194                 * other way using the doorbell register.
7195                 */
7196                dev_info(&pdev->dev, "using doorbell to reset controller\n");
7197                writel(use_doorbell, vaddr + SA5_DOORBELL);
7198
7199                /* PMC hardware guys tell us we need a 10 second delay after
7200                 * doorbell reset and before any attempt to talk to the board
7201                 * at all to ensure that this actually works and doesn't fall
7202                 * over in some weird corner cases.
7203                 */
7204                msleep(10000);
7205        } else { /* Try to do it the PCI power state way */
7206
7207                /* Quoting from the Open CISS Specification: "The Power
7208                 * Management Control/Status Register (CSR) controls the power
7209                 * state of the device.  The normal operating state is D0,
7210                 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
7211                 * the controller, place the interface device in D3 then to D0,
7212                 * this causes a secondary PCI reset which will reset the
7213                 * controller." */
7214
7215                int rc = 0;
7216
7217                dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7218
7219                /* enter the D3hot power management state */
7220                rc = pci_set_power_state(pdev, PCI_D3hot);
7221                if (rc)
7222                        return rc;
7223
7224                msleep(500);
7225
7226                /* enter the D0 power management state */
7227                rc = pci_set_power_state(pdev, PCI_D0);
7228                if (rc)
7229                        return rc;
7230
7231                /*
7232                 * The P600 requires a small delay when changing states.
7233                 * Otherwise we may think the board did not reset and we bail.
7234                 * This for kdump only and is particular to the P600.
7235                 */
7236                msleep(500);
7237        }
7238        return 0;
7239}
7240
7241static void init_driver_version(char *driver_version, int len)
7242{
7243        memset(driver_version, 0, len);
7244        strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7245}
7246
7247static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7248{
7249        char *driver_version;
7250        int i, size = sizeof(cfgtable->driver_version);
7251
7252        driver_version = kmalloc(size, GFP_KERNEL);
7253        if (!driver_version)
7254                return -ENOMEM;
7255
7256        init_driver_version(driver_version, size);
7257        for (i = 0; i < size; i++)
7258                writeb(driver_version[i], &cfgtable->driver_version[i]);
7259        kfree(driver_version);
7260        return 0;
7261}
7262
7263static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7264                                          unsigned char *driver_ver)
7265{
7266        int i;
7267
7268        for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7269                driver_ver[i] = readb(&cfgtable->driver_version[i]);
7270}
7271
7272static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7273{
7274
7275        char *driver_ver, *old_driver_ver;
7276        int rc, size = sizeof(cfgtable->driver_version);
7277
7278        old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
7279        if (!old_driver_ver)
7280                return -ENOMEM;
7281        driver_ver = old_driver_ver + size;
7282
7283        /* After a reset, the 32 bytes of "driver version" in the cfgtable
7284         * should have been changed, otherwise we know the reset failed.
7285         */
7286        init_driver_version(old_driver_ver, size);
7287        read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7288        rc = !memcmp(driver_ver, old_driver_ver, size);
7289        kfree(old_driver_ver);
7290        return rc;
7291}
7292/* This does a hard reset of the controller using PCI power management
7293 * states or the using the doorbell register.
7294 */
7295static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7296{
7297        u64 cfg_offset;
7298        u32 cfg_base_addr;
7299        u64 cfg_base_addr_index;
7300        void __iomem *vaddr;
7301        unsigned long paddr;
7302        u32 misc_fw_support;
7303        int rc;
7304        struct CfgTable __iomem *cfgtable;
7305        u32 use_doorbell;
7306        u16 command_register;
7307
7308        /* For controllers as old as the P600, this is very nearly
7309         * the same thing as
7310         *
7311         * pci_save_state(pci_dev);
7312         * pci_set_power_state(pci_dev, PCI_D3hot);
7313         * pci_set_power_state(pci_dev, PCI_D0);
7314         * pci_restore_state(pci_dev);
7315         *
7316         * For controllers newer than the P600, the pci power state
7317         * method of resetting doesn't work so we have another way
7318         * using the doorbell register.
7319         */
7320
7321        if (!ctlr_is_resettable(board_id)) {
7322                dev_warn(&pdev->dev, "Controller not resettable\n");
7323                return -ENODEV;
7324        }
7325
7326        /* if controller is soft- but not hard resettable... */
7327        if (!ctlr_is_hard_resettable(board_id))
7328                return -ENOTSUPP; /* try soft reset later. */
7329
7330        /* Save the PCI command register */
7331        pci_read_config_word(pdev, 4, &command_register);
7332        pci_save_state(pdev);
7333
7334        /* find the first memory BAR, so we can find the cfg table */
7335        rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7336        if (rc)
7337                return rc;
7338        vaddr = remap_pci_mem(paddr, 0x250);
7339        if (!vaddr)
7340                return -ENOMEM;
7341
7342        /* find cfgtable in order to check if reset via doorbell is supported */
7343        rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7344                                        &cfg_base_addr_index, &cfg_offset);
7345        if (rc)
7346                goto unmap_vaddr;
7347        cfgtable = remap_pci_mem(pci_resource_start(pdev,
7348                       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7349        if (!cfgtable) {
7350                rc = -ENOMEM;
7351                goto unmap_vaddr;
7352        }
7353        rc = write_driver_ver_to_cfgtable(cfgtable);
7354        if (rc)
7355                goto unmap_cfgtable;
7356
7357        /* If reset via doorbell register is supported, use that.
7358         * There are two such methods.  Favor the newest method.
7359         */
7360        misc_fw_support = readl(&cfgtable->misc_fw_support);
7361        use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7362        if (use_doorbell) {
7363                use_doorbell = DOORBELL_CTLR_RESET2;
7364        } else {
7365                use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7366                if (use_doorbell) {
7367                        dev_warn(&pdev->dev,
7368                                "Soft reset not supported. Firmware update is required.\n");
7369                        rc = -ENOTSUPP; /* try soft reset */
7370                        goto unmap_cfgtable;
7371                }
7372        }
7373
7374        rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7375        if (rc)
7376                goto unmap_cfgtable;
7377
7378        pci_restore_state(pdev);
7379        pci_write_config_word(pdev, 4, command_register);
7380
7381        /* Some devices (notably the HP Smart Array 5i Controller)
7382           need a little pause here */
7383        msleep(HPSA_POST_RESET_PAUSE_MSECS);
7384
7385        rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7386        if (rc) {
7387                dev_warn(&pdev->dev,
7388                        "Failed waiting for board to become ready after hard reset\n");
7389                goto unmap_cfgtable;
7390        }
7391
7392        rc = controller_reset_failed(vaddr);
7393        if (rc < 0)
7394                goto unmap_cfgtable;
7395        if (rc) {
7396                dev_warn(&pdev->dev, "Unable to successfully reset "
7397                        "controller. Will try soft reset.\n");
7398                rc = -ENOTSUPP;
7399        } else {
7400                dev_info(&pdev->dev, "board ready after hard reset.\n");
7401        }
7402
7403unmap_cfgtable:
7404        iounmap(cfgtable);
7405
7406unmap_vaddr:
7407        iounmap(vaddr);
7408        return rc;
7409}
7410
7411/*
7412 *  We cannot read the structure directly, for portability we must use
7413 *   the io functions.
7414 *   This is for debug only.
7415 */
7416static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7417{
7418#ifdef HPSA_DEBUG
7419        int i;
7420        char temp_name[17];
7421
7422        dev_info(dev, "Controller Configuration information\n");
7423        dev_info(dev, "------------------------------------\n");
7424        for (i = 0; i < 4; i++)
7425                temp_name[i] = readb(&(tb->Signature[i]));
7426        temp_name[4] = '\0';
7427        dev_info(dev, "   Signature = %s\n", temp_name);
7428        dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
7429        dev_info(dev, "   Transport methods supported = 0x%x\n",
7430               readl(&(tb->TransportSupport)));
7431        dev_info(dev, "   Transport methods active = 0x%x\n",
7432               readl(&(tb->TransportActive)));
7433        dev_info(dev, "   Requested transport Method = 0x%x\n",
7434               readl(&(tb->HostWrite.TransportRequest)));
7435        dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
7436               readl(&(tb->HostWrite.CoalIntDelay)));
7437        dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
7438               readl(&(tb->HostWrite.CoalIntCount)));
7439        dev_info(dev, "   Max outstanding commands = %d\n",
7440               readl(&(tb->CmdsOutMax)));
7441        dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7442        for (i = 0; i < 16; i++)
7443                temp_name[i] = readb(&(tb->ServerName[i]));
7444        temp_name[16] = '\0';
7445        dev_info(dev, "   Server Name = %s\n", temp_name);
7446        dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
7447                readl(&(tb->HeartBeat)));
7448#endif                          /* HPSA_DEBUG */
7449}
7450
7451static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7452{
7453        int i, offset, mem_type, bar_type;
7454
7455        if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7456                return 0;
7457        offset = 0;
7458        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7459                bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7460                if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7461                        offset += 4;
7462                else {
7463                        mem_type = pci_resource_flags(pdev, i) &
7464                            PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7465                        switch (mem_type) {
7466                        case PCI_BASE_ADDRESS_MEM_TYPE_32:
7467                        case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7468                                offset += 4;    /* 32 bit */
7469                                break;
7470                        case PCI_BASE_ADDRESS_MEM_TYPE_64:
7471                                offset += 8;
7472                                break;
7473                        default:        /* reserved in PCI 2.2 */
7474                                dev_warn(&pdev->dev,
7475                                       "base address is invalid\n");
7476                                return -1;
7477                        }
7478                }
7479                if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7480                        return i + 1;
7481        }
7482        return -1;
7483}
7484
7485static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7486{
7487        pci_free_irq_vectors(h->pdev);
7488        h->msix_vectors = 0;
7489}
7490
7491static void hpsa_setup_reply_map(struct ctlr_info *h)
7492{
7493        const struct cpumask *mask;
7494        unsigned int queue, cpu;
7495
7496        for (queue = 0; queue < h->msix_vectors; queue++) {
7497                mask = pci_irq_get_affinity(h->pdev, queue);
7498                if (!mask)
7499                        goto fallback;
7500
7501                for_each_cpu(cpu, mask)
7502                        h->reply_map[cpu] = queue;
7503        }
7504        return;
7505
7506fallback:
7507        for_each_possible_cpu(cpu)
7508                h->reply_map[cpu] = 0;
7509}
7510
7511/* If MSI/MSI-X is supported by the kernel we will try to enable it on
7512 * controllers that are capable. If not, we use legacy INTx mode.
7513 */
7514static int hpsa_interrupt_mode(struct ctlr_info *h)
7515{
7516        unsigned int flags = PCI_IRQ_LEGACY;
7517        int ret;
7518
7519        /* Some boards advertise MSI but don't really support it */
7520        switch (h->board_id) {
7521        case 0x40700E11:
7522        case 0x40800E11:
7523        case 0x40820E11:
7524        case 0x40830E11:
7525                break;
7526        default:
7527                ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7528                                PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7529                if (ret > 0) {
7530                        h->msix_vectors = ret;
7531                        return 0;
7532                }
7533
7534                flags |= PCI_IRQ_MSI;
7535                break;
7536        }
7537
7538        ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7539        if (ret < 0)
7540                return ret;
7541        return 0;
7542}
7543
7544static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7545                                bool *legacy_board)
7546{
7547        int i;
7548        u32 subsystem_vendor_id, subsystem_device_id;
7549
7550        subsystem_vendor_id = pdev->subsystem_vendor;
7551        subsystem_device_id = pdev->subsystem_device;
7552        *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7553                    subsystem_vendor_id;
7554
7555        if (legacy_board)
7556                *legacy_board = false;
7557        for (i = 0; i < ARRAY_SIZE(products); i++)
7558                if (*board_id == products[i].board_id) {
7559                        if (products[i].access != &SA5A_access &&
7560                            products[i].access != &SA5B_access)
7561                                return i;
7562                        dev_warn(&pdev->dev,
7563                                 "legacy board ID: 0x%08x\n",
7564                                 *board_id);
7565                        if (legacy_board)
7566                            *legacy_board = true;
7567                        return i;
7568                }
7569
7570        dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7571        if (legacy_board)
7572                *legacy_board = true;
7573        return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7574}
7575
7576static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7577                                    unsigned long *memory_bar)
7578{
7579        int i;
7580
7581        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7582                if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7583                        /* addressing mode bits already removed */
7584                        *memory_bar = pci_resource_start(pdev, i);
7585                        dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7586                                *memory_bar);
7587                        return 0;
7588                }
7589        dev_warn(&pdev->dev, "no memory BAR found\n");
7590        return -ENODEV;
7591}
7592
7593static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7594                                     int wait_for_ready)
7595{
7596        int i, iterations;
7597        u32 scratchpad;
7598        if (wait_for_ready)
7599                iterations = HPSA_BOARD_READY_ITERATIONS;
7600        else
7601                iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7602
7603        for (i = 0; i < iterations; i++) {
7604                scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7605                if (wait_for_ready) {
7606                        if (scratchpad == HPSA_FIRMWARE_READY)
7607                                return 0;
7608                } else {
7609                        if (scratchpad != HPSA_FIRMWARE_READY)
7610                                return 0;
7611                }
7612                msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7613        }
7614        dev_warn(&pdev->dev, "board not ready, timed out.\n");
7615        return -ENODEV;
7616}
7617
7618static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7619                               u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7620                               u64 *cfg_offset)
7621{
7622        *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7623        *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7624        *cfg_base_addr &= (u32) 0x0000ffff;
7625        *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7626        if (*cfg_base_addr_index == -1) {
7627                dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7628                return -ENODEV;
7629        }
7630        return 0;
7631}
7632
7633static void hpsa_free_cfgtables(struct ctlr_info *h)
7634{
7635        if (h->transtable) {
7636                iounmap(h->transtable);
7637                h->transtable = NULL;
7638        }
7639        if (h->cfgtable) {
7640                iounmap(h->cfgtable);
7641                h->cfgtable = NULL;
7642        }
7643}
7644
7645/* Find and map CISS config table and transfer table
7646+ * several items must be unmapped (freed) later
7647+ * */
7648static int hpsa_find_cfgtables(struct ctlr_info *h)
7649{
7650        u64 cfg_offset;
7651        u32 cfg_base_addr;
7652        u64 cfg_base_addr_index;
7653        u32 trans_offset;
7654        int rc;
7655
7656        rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7657                &cfg_base_addr_index, &cfg_offset);
7658        if (rc)
7659                return rc;
7660        h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7661                       cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7662        if (!h->cfgtable) {
7663                dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7664                return -ENOMEM;
7665        }
7666        rc = write_driver_ver_to_cfgtable(h->cfgtable);
7667        if (rc)
7668                return rc;
7669        /* Find performant mode table. */
7670        trans_offset = readl(&h->cfgtable->TransMethodOffset);
7671        h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7672                                cfg_base_addr_index)+cfg_offset+trans_offset,
7673                                sizeof(*h->transtable));
7674        if (!h->transtable) {
7675                dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7676                hpsa_free_cfgtables(h);
7677                return -ENOMEM;
7678        }
7679        return 0;
7680}
7681
7682static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7683{
7684#define MIN_MAX_COMMANDS 16
7685        BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7686
7687        h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7688
7689        /* Limit commands in memory limited kdump scenario. */
7690        if (reset_devices && h->max_commands > 32)
7691                h->max_commands = 32;
7692
7693        if (h->max_commands < MIN_MAX_COMMANDS) {
7694                dev_warn(&h->pdev->dev,
7695                        "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7696                        h->max_commands,
7697                        MIN_MAX_COMMANDS);
7698                h->max_commands = MIN_MAX_COMMANDS;
7699        }
7700}
7701
7702/* If the controller reports that the total max sg entries is greater than 512,
7703 * then we know that chained SG blocks work.  (Original smart arrays did not
7704 * support chained SG blocks and would return zero for max sg entries.)
7705 */
7706static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7707{
7708        return h->maxsgentries > 512;
7709}
7710
7711/* Interrogate the hardware for some limits:
7712 * max commands, max SG elements without chaining, and with chaining,
7713 * SG chain block size, etc.
7714 */
7715static void hpsa_find_board_params(struct ctlr_info *h)
7716{
7717        hpsa_get_max_perf_mode_cmds(h);
7718        h->nr_cmds = h->max_commands;
7719        h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7720        h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7721        if (hpsa_supports_chained_sg_blocks(h)) {
7722                /* Limit in-command s/g elements to 32 save dma'able memory. */
7723                h->max_cmd_sg_entries = 32;
7724                h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7725                h->maxsgentries--; /* save one for chain pointer */
7726        } else {
7727                /*
7728                 * Original smart arrays supported at most 31 s/g entries
7729                 * embedded inline in the command (trying to use more
7730                 * would lock up the controller)
7731                 */
7732                h->max_cmd_sg_entries = 31;
7733                h->maxsgentries = 31; /* default to traditional values */
7734                h->chainsize = 0;
7735        }
7736
7737        /* Find out what task management functions are supported and cache */
7738        h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7739        if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7740                dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7741        if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7742                dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7743        if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7744                dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7745}
7746
7747static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7748{
7749        if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7750                dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7751                return false;
7752        }
7753        return true;
7754}
7755
7756static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7757{
7758        u32 driver_support;
7759
7760        driver_support = readl(&(h->cfgtable->driver_support));
7761        /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7762#ifdef CONFIG_X86
7763        driver_support |= ENABLE_SCSI_PREFETCH;
7764#endif
7765        driver_support |= ENABLE_UNIT_ATTN;
7766        writel(driver_support, &(h->cfgtable->driver_support));
7767}
7768
7769/* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
7770 * in a prefetch beyond physical memory.
7771 */
7772static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7773{
7774        u32 dma_prefetch;
7775
7776        if (h->board_id != 0x3225103C)
7777                return;
7778        dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7779        dma_prefetch |= 0x8000;
7780        writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7781}
7782
7783static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7784{
7785        int i;
7786        u32 doorbell_value;
7787        unsigned long flags;
7788        /* wait until the clear_event_notify bit 6 is cleared by controller. */
7789        for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7790                spin_lock_irqsave(&h->lock, flags);
7791                doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7792                spin_unlock_irqrestore(&h->lock, flags);
7793                if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7794                        goto done;
7795                /* delay and try again */
7796                msleep(CLEAR_EVENT_WAIT_INTERVAL);
7797        }
7798        return -ENODEV;
7799done:
7800        return 0;
7801}
7802
7803static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7804{
7805        int i;
7806        u32 doorbell_value;
7807        unsigned long flags;
7808
7809        /* under certain very rare conditions, this can take awhile.
7810         * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7811         * as we enter this code.)
7812         */
7813        for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7814                if (h->remove_in_progress)
7815                        goto done;
7816                spin_lock_irqsave(&h->lock, flags);
7817                doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7818                spin_unlock_irqrestore(&h->lock, flags);
7819                if (!(doorbell_value & CFGTBL_ChangeReq))
7820                        goto done;
7821                /* delay and try again */
7822                msleep(MODE_CHANGE_WAIT_INTERVAL);
7823        }
7824        return -ENODEV;
7825done:
7826        return 0;
7827}
7828
7829/* return -ENODEV or other reason on error, 0 on success */
7830static int hpsa_enter_simple_mode(struct ctlr_info *h)
7831{
7832        u32 trans_support;
7833
7834        trans_support = readl(&(h->cfgtable->TransportSupport));
7835        if (!(trans_support & SIMPLE_MODE))
7836                return -ENOTSUPP;
7837
7838        h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7839
7840        /* Update the field, and then ring the doorbell */
7841        writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7842        writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7843        writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7844        if (hpsa_wait_for_mode_change_ack(h))
7845                goto error;
7846        print_cfg_table(&h->pdev->dev, h->cfgtable);
7847        if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7848                goto error;
7849        h->transMethod = CFGTBL_Trans_Simple;
7850        return 0;
7851error:
7852        dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7853        return -ENODEV;
7854}
7855
7856/* free items allocated or mapped by hpsa_pci_init */
7857static void hpsa_free_pci_init(struct ctlr_info *h)
7858{
7859        hpsa_free_cfgtables(h);                 /* pci_init 4 */
7860        iounmap(h->vaddr);                      /* pci_init 3 */
7861        h->vaddr = NULL;
7862        hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
7863        /*
7864         * call pci_disable_device before pci_release_regions per
7865         * Documentation/driver-api/pci/pci.rst
7866         */
7867        pci_disable_device(h->pdev);            /* pci_init 1 */
7868        pci_release_regions(h->pdev);           /* pci_init 2 */
7869}
7870
7871/* several items must be freed later */
7872static int hpsa_pci_init(struct ctlr_info *h)
7873{
7874        int prod_index, err;
7875        bool legacy_board;
7876
7877        prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7878        if (prod_index < 0)
7879                return prod_index;
7880        h->product_name = products[prod_index].product_name;
7881        h->access = *(products[prod_index].access);
7882        h->legacy_board = legacy_board;
7883        pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7884                               PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7885
7886        err = pci_enable_device(h->pdev);
7887        if (err) {
7888                dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7889                pci_disable_device(h->pdev);
7890                return err;
7891        }
7892
7893        err = pci_request_regions(h->pdev, HPSA);
7894        if (err) {
7895                dev_err(&h->pdev->dev,
7896                        "failed to obtain PCI resources\n");
7897                pci_disable_device(h->pdev);
7898                return err;
7899        }
7900
7901        pci_set_master(h->pdev);
7902
7903        err = hpsa_interrupt_mode(h);
7904        if (err)
7905                goto clean1;
7906
7907        /* setup mapping between CPU and reply queue */
7908        hpsa_setup_reply_map(h);
7909
7910        err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7911        if (err)
7912                goto clean2;    /* intmode+region, pci */
7913        h->vaddr = remap_pci_mem(h->paddr, 0x250);
7914        if (!h->vaddr) {
7915                dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7916                err = -ENOMEM;
7917                goto clean2;    /* intmode+region, pci */
7918        }
7919        err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7920        if (err)
7921                goto clean3;    /* vaddr, intmode+region, pci */
7922        err = hpsa_find_cfgtables(h);
7923        if (err)
7924                goto clean3;    /* vaddr, intmode+region, pci */
7925        hpsa_find_board_params(h);
7926
7927        if (!hpsa_CISS_signature_present(h)) {
7928                err = -ENODEV;
7929                goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7930        }
7931        hpsa_set_driver_support_bits(h);
7932        hpsa_p600_dma_prefetch_quirk(h);
7933        err = hpsa_enter_simple_mode(h);
7934        if (err)
7935                goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7936        return 0;
7937
7938clean4: /* cfgtables, vaddr, intmode+region, pci */
7939        hpsa_free_cfgtables(h);
7940clean3: /* vaddr, intmode+region, pci */
7941        iounmap(h->vaddr);
7942        h->vaddr = NULL;
7943clean2: /* intmode+region, pci */
7944        hpsa_disable_interrupt_mode(h);
7945clean1:
7946        /*
7947         * call pci_disable_device before pci_release_regions per
7948         * Documentation/driver-api/pci/pci.rst
7949         */
7950        pci_disable_device(h->pdev);
7951        pci_release_regions(h->pdev);
7952        return err;
7953}
7954
7955static void hpsa_hba_inquiry(struct ctlr_info *h)
7956{
7957        int rc;
7958
7959#define HBA_INQUIRY_BYTE_COUNT 64
7960        h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7961        if (!h->hba_inquiry_data)
7962                return;
7963        rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7964                h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7965        if (rc != 0) {
7966                kfree(h->hba_inquiry_data);
7967                h->hba_inquiry_data = NULL;
7968        }
7969}
7970
7971static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7972{
7973        int rc, i;
7974        void __iomem *vaddr;
7975
7976        if (!reset_devices)
7977                return 0;
7978
7979        /* kdump kernel is loading, we don't know in which state is
7980         * the pci interface. The dev->enable_cnt is equal zero
7981         * so we call enable+disable, wait a while and switch it on.
7982         */
7983        rc = pci_enable_device(pdev);
7984        if (rc) {
7985                dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7986                return -ENODEV;
7987        }
7988        pci_disable_device(pdev);
7989        msleep(260);                    /* a randomly chosen number */
7990        rc = pci_enable_device(pdev);
7991        if (rc) {
7992                dev_warn(&pdev->dev, "failed to enable device.\n");
7993                return -ENODEV;
7994        }
7995
7996        pci_set_master(pdev);
7997
7998        vaddr = pci_ioremap_bar(pdev, 0);
7999        if (vaddr == NULL) {
8000                rc = -ENOMEM;
8001                goto out_disable;
8002        }
8003        writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
8004        iounmap(vaddr);
8005
8006        /* Reset the controller with a PCI power-cycle or via doorbell */
8007        rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
8008
8009        /* -ENOTSUPP here means we cannot reset the controller
8010         * but it's already (and still) up and running in
8011         * "performant mode".  Or, it might be 640x, which can't reset
8012         * due to concerns about shared bbwc between 6402/6404 pair.
8013         */
8014        if (rc)
8015                goto out_disable;
8016
8017        /* Now try to get the controller to respond to a no-op */
8018        dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
8019        for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
8020                if (hpsa_noop(pdev) == 0)
8021                        break;
8022                else
8023                        dev_warn(&pdev->dev, "no-op failed%s\n",
8024                                        (i < 11 ? "; re-trying" : ""));
8025        }
8026
8027out_disable:
8028
8029        pci_disable_device(pdev);
8030        return rc;
8031}
8032
8033static void hpsa_free_cmd_pool(struct ctlr_info *h)
8034{
8035        kfree(h->cmd_pool_bits);
8036        h->cmd_pool_bits = NULL;
8037        if (h->cmd_pool) {
8038                dma_free_coherent(&h->pdev->dev,
8039                                h->nr_cmds * sizeof(struct CommandList),
8040                                h->cmd_pool,
8041                                h->cmd_pool_dhandle);
8042                h->cmd_pool = NULL;
8043                h->cmd_pool_dhandle = 0;
8044        }
8045        if (h->errinfo_pool) {
8046                dma_free_coherent(&h->pdev->dev,
8047                                h->nr_cmds * sizeof(struct ErrorInfo),
8048                                h->errinfo_pool,
8049                                h->errinfo_pool_dhandle);
8050                h->errinfo_pool = NULL;
8051                h->errinfo_pool_dhandle = 0;
8052        }
8053}
8054
8055static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8056{
8057        h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
8058                                   sizeof(unsigned long),
8059                                   GFP_KERNEL);
8060        h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
8061                    h->nr_cmds * sizeof(*h->cmd_pool),
8062                    &h->cmd_pool_dhandle, GFP_KERNEL);
8063        h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
8064                    h->nr_cmds * sizeof(*h->errinfo_pool),
8065                    &h->errinfo_pool_dhandle, GFP_KERNEL);
8066        if ((h->cmd_pool_bits == NULL)
8067            || (h->cmd_pool == NULL)
8068            || (h->errinfo_pool == NULL)) {
8069                dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8070                goto clean_up;
8071        }
8072        hpsa_preinitialize_commands(h);
8073        return 0;
8074clean_up:
8075        hpsa_free_cmd_pool(h);
8076        return -ENOMEM;
8077}
8078
8079/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8080static void hpsa_free_irqs(struct ctlr_info *h)
8081{
8082        int i;
8083        int irq_vector = 0;
8084
8085        if (hpsa_simple_mode)
8086                irq_vector = h->intr_mode;
8087
8088        if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8089                /* Single reply queue, only one irq to free */
8090                free_irq(pci_irq_vector(h->pdev, irq_vector),
8091                                &h->q[h->intr_mode]);
8092                h->q[h->intr_mode] = 0;
8093                return;
8094        }
8095
8096        for (i = 0; i < h->msix_vectors; i++) {
8097                free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8098                h->q[i] = 0;
8099        }
8100        for (; i < MAX_REPLY_QUEUES; i++)
8101                h->q[i] = 0;
8102}
8103
8104/* returns 0 on success; cleans up and returns -Enn on error */
8105static int hpsa_request_irqs(struct ctlr_info *h,
8106        irqreturn_t (*msixhandler)(int, void *),
8107        irqreturn_t (*intxhandler)(int, void *))
8108{
8109        int rc, i;
8110        int irq_vector = 0;
8111
8112        if (hpsa_simple_mode)
8113                irq_vector = h->intr_mode;
8114
8115        /*
8116         * initialize h->q[x] = x so that interrupt handlers know which
8117         * queue to process.
8118         */
8119        for (i = 0; i < MAX_REPLY_QUEUES; i++)
8120                h->q[i] = (u8) i;
8121
8122        if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8123                /* If performant mode and MSI-X, use multiple reply queues */
8124                for (i = 0; i < h->msix_vectors; i++) {
8125                        sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8126                        rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8127                                        0, h->intrname[i],
8128                                        &h->q[i]);
8129                        if (rc) {
8130                                int j;
8131
8132                                dev_err(&h->pdev->dev,
8133                                        "failed to get irq %d for %s\n",
8134                                       pci_irq_vector(h->pdev, i), h->devname);
8135                                for (j = 0; j < i; j++) {
8136                                        free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8137                                        h->q[j] = 0;
8138                                }
8139                                for (; j < MAX_REPLY_QUEUES; j++)
8140                                        h->q[j] = 0;
8141                                return rc;
8142                        }
8143                }
8144        } else {
8145                /* Use single reply pool */
8146                if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8147                        sprintf(h->intrname[0], "%s-msi%s", h->devname,
8148                                h->msix_vectors ? "x" : "");
8149                        rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8150                                msixhandler, 0,
8151                                h->intrname[0],
8152                                &h->q[h->intr_mode]);
8153                } else {
8154                        sprintf(h->intrname[h->intr_mode],
8155                                "%s-intx", h->devname);
8156                        rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8157                                intxhandler, IRQF_SHARED,
8158                                h->intrname[0],
8159                                &h->q[h->intr_mode]);
8160                }
8161        }
8162        if (rc) {
8163                dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8164                       pci_irq_vector(h->pdev, irq_vector), h->devname);
8165                hpsa_free_irqs(h);
8166                return -ENODEV;
8167        }
8168        return 0;
8169}
8170
8171static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8172{
8173        int rc;
8174        hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
8175
8176        dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8177        rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8178        if (rc) {
8179                dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8180                return rc;
8181        }
8182
8183        dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8184        rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8185        if (rc) {
8186                dev_warn(&h->pdev->dev, "Board failed to become ready "
8187                        "after soft reset.\n");
8188                return rc;
8189        }
8190
8191        return 0;
8192}
8193
8194static void hpsa_free_reply_queues(struct ctlr_info *h)
8195{
8196        int i;
8197
8198        for (i = 0; i < h->nreply_queues; i++) {
8199                if (!h->reply_queue[i].head)
8200                        continue;
8201                dma_free_coherent(&h->pdev->dev,
8202                                        h->reply_queue_size,
8203                                        h->reply_queue[i].head,
8204                                        h->reply_queue[i].busaddr);
8205                h->reply_queue[i].head = NULL;
8206                h->reply_queue[i].busaddr = 0;
8207        }
8208        h->reply_queue_size = 0;
8209}
8210
8211static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8212{
8213        hpsa_free_performant_mode(h);           /* init_one 7 */
8214        hpsa_free_sg_chain_blocks(h);           /* init_one 6 */
8215        hpsa_free_cmd_pool(h);                  /* init_one 5 */
8216        hpsa_free_irqs(h);                      /* init_one 4 */
8217        scsi_host_put(h->scsi_host);            /* init_one 3 */
8218        h->scsi_host = NULL;                    /* init_one 3 */
8219        hpsa_free_pci_init(h);                  /* init_one 2_5 */
8220        free_percpu(h->lockup_detected);        /* init_one 2 */
8221        h->lockup_detected = NULL;              /* init_one 2 */
8222        if (h->resubmit_wq) {
8223                destroy_workqueue(h->resubmit_wq);      /* init_one 1 */
8224                h->resubmit_wq = NULL;
8225        }
8226        if (h->rescan_ctlr_wq) {
8227                destroy_workqueue(h->rescan_ctlr_wq);
8228                h->rescan_ctlr_wq = NULL;
8229        }
8230        if (h->monitor_ctlr_wq) {
8231                destroy_workqueue(h->monitor_ctlr_wq);
8232                h->monitor_ctlr_wq = NULL;
8233        }
8234
8235        kfree(h);                               /* init_one 1 */
8236}
8237
8238/* Called when controller lockup detected. */
8239static void fail_all_outstanding_cmds(struct ctlr_info *h)
8240{
8241        int i, refcount;
8242        struct CommandList *c;
8243        int failcount = 0;
8244
8245        flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8246        for (i = 0; i < h->nr_cmds; i++) {
8247                c = h->cmd_pool + i;
8248                refcount = atomic_inc_return(&c->refcount);
8249                if (refcount > 1) {
8250                        c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8251                        finish_cmd(c);
8252                        atomic_dec(&h->commands_outstanding);
8253                        failcount++;
8254                }
8255                cmd_free(h, c);
8256        }
8257        dev_warn(&h->pdev->dev,
8258                "failed %d commands in fail_all\n", failcount);
8259}
8260
8261static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8262{
8263        int cpu;
8264
8265        for_each_online_cpu(cpu) {
8266                u32 *lockup_detected;
8267                lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8268                *lockup_detected = value;
8269        }
8270        wmb(); /* be sure the per-cpu variables are out to memory */
8271}
8272
8273static void controller_lockup_detected(struct ctlr_info *h)
8274{
8275        unsigned long flags;
8276        u32 lockup_detected;
8277
8278        h->access.set_intr_mask(h, HPSA_INTR_OFF);
8279        spin_lock_irqsave(&h->lock, flags);
8280        lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8281        if (!lockup_detected) {
8282                /* no heartbeat, but controller gave us a zero. */
8283                dev_warn(&h->pdev->dev,
8284                        "lockup detected after %d but scratchpad register is zero\n",
8285                        h->heartbeat_sample_interval / HZ);
8286                lockup_detected = 0xffffffff;
8287        }
8288        set_lockup_detected_for_all_cpus(h, lockup_detected);
8289        spin_unlock_irqrestore(&h->lock, flags);
8290        dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8291                        lockup_detected, h->heartbeat_sample_interval / HZ);
8292        if (lockup_detected == 0xffff0000) {
8293                dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8294                writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8295        }
8296        pci_disable_device(h->pdev);
8297        fail_all_outstanding_cmds(h);
8298}
8299
8300static int detect_controller_lockup(struct ctlr_info *h)
8301{
8302        u64 now;
8303        u32 heartbeat;
8304        unsigned long flags;
8305
8306        now = get_jiffies_64();
8307        /* If we've received an interrupt recently, we're ok. */
8308        if (time_after64(h->last_intr_timestamp +
8309                                (h->heartbeat_sample_interval), now))
8310                return false;
8311
8312        /*
8313         * If we've already checked the heartbeat recently, we're ok.
8314         * This could happen if someone sends us a signal. We
8315         * otherwise don't care about signals in this thread.
8316         */
8317        if (time_after64(h->last_heartbeat_timestamp +
8318                                (h->heartbeat_sample_interval), now))
8319                return false;
8320
8321        /* If heartbeat has not changed since we last looked, we're not ok. */
8322        spin_lock_irqsave(&h->lock, flags);
8323        heartbeat = readl(&h->cfgtable->HeartBeat);
8324        spin_unlock_irqrestore(&h->lock, flags);
8325        if (h->last_heartbeat == heartbeat) {
8326                controller_lockup_detected(h);
8327                return true;
8328        }
8329
8330        /* We're ok. */
8331        h->last_heartbeat = heartbeat;
8332        h->last_heartbeat_timestamp = now;
8333        return false;
8334}
8335
8336/*
8337 * Set ioaccel status for all ioaccel volumes.
8338 *
8339 * Called from monitor controller worker (hpsa_event_monitor_worker)
8340 *
8341 * A Volume (or Volumes that comprise an Array set) may be undergoing a
8342 * transformation, so we will be turning off ioaccel for all volumes that
8343 * make up the Array.
8344 */
8345static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8346{
8347        int rc;
8348        int i;
8349        u8 ioaccel_status;
8350        unsigned char *buf;
8351        struct hpsa_scsi_dev_t *device;
8352
8353        if (!h)
8354                return;
8355
8356        buf = kmalloc(64, GFP_KERNEL);
8357        if (!buf)
8358                return;
8359
8360        /*
8361         * Run through current device list used during I/O requests.
8362         */
8363        for (i = 0; i < h->ndevices; i++) {
8364                int offload_to_be_enabled = 0;
8365                int offload_config = 0;
8366
8367                device = h->dev[i];
8368
8369                if (!device)
8370                        continue;
8371                if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8372                                                HPSA_VPD_LV_IOACCEL_STATUS))
8373                        continue;
8374
8375                memset(buf, 0, 64);
8376
8377                rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8378                                        VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8379                                        buf, 64);
8380                if (rc != 0)
8381                        continue;
8382
8383                ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8384
8385                /*
8386                 * Check if offload is still configured on
8387                 */
8388                offload_config =
8389                                !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8390                /*
8391                 * If offload is configured on, check to see if ioaccel
8392                 * needs to be enabled.
8393                 */
8394                if (offload_config)
8395                        offload_to_be_enabled =
8396                                !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8397
8398                /*
8399                 * If ioaccel is to be re-enabled, re-enable later during the
8400                 * scan operation so the driver can get a fresh raidmap
8401                 * before turning ioaccel back on.
8402                 */
8403                if (offload_to_be_enabled)
8404                        continue;
8405
8406                /*
8407                 * Immediately turn off ioaccel for any volume the
8408                 * controller tells us to. Some of the reasons could be:
8409                 *    transformation - change to the LVs of an Array.
8410                 *    degraded volume - component failure
8411                 */
8412                hpsa_turn_off_ioaccel_for_device(device);
8413        }
8414
8415        kfree(buf);
8416}
8417
8418static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8419{
8420        char *event_type;
8421
8422        if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8423                return;
8424
8425        /* Ask the controller to clear the events we're handling. */
8426        if ((h->transMethod & (CFGTBL_Trans_io_accel1
8427                        | CFGTBL_Trans_io_accel2)) &&
8428                (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8429                 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8430
8431                if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8432                        event_type = "state change";
8433                if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8434                        event_type = "configuration change";
8435                /* Stop sending new RAID offload reqs via the IO accelerator */
8436                scsi_block_requests(h->scsi_host);
8437                hpsa_set_ioaccel_status(h);
8438                hpsa_drain_accel_commands(h);
8439                /* Set 'accelerator path config change' bit */
8440                dev_warn(&h->pdev->dev,
8441                        "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8442                        h->events, event_type);
8443                writel(h->events, &(h->cfgtable->clear_event_notify));
8444                /* Set the "clear event notify field update" bit 6 */
8445                writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8446                /* Wait until ctlr clears 'clear event notify field', bit 6 */
8447                hpsa_wait_for_clear_event_notify_ack(h);
8448                scsi_unblock_requests(h->scsi_host);
8449        } else {
8450                /* Acknowledge controller notification events. */
8451                writel(h->events, &(h->cfgtable->clear_event_notify));
8452                writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8453                hpsa_wait_for_clear_event_notify_ack(h);
8454        }
8455        return;
8456}
8457
8458/* Check a register on the controller to see if there are configuration
8459 * changes (added/changed/removed logical drives, etc.) which mean that
8460 * we should rescan the controller for devices.
8461 * Also check flag for driver-initiated rescan.
8462 */
8463static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8464{
8465        if (h->drv_req_rescan) {
8466                h->drv_req_rescan = 0;
8467                return 1;
8468        }
8469
8470        if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8471                return 0;
8472
8473        h->events = readl(&(h->cfgtable->event_notify));
8474        return h->events & RESCAN_REQUIRED_EVENT_BITS;
8475}
8476
8477/*
8478 * Check if any of the offline devices have become ready
8479 */
8480static int hpsa_offline_devices_ready(struct ctlr_info *h)
8481{
8482        unsigned long flags;
8483        struct offline_device_entry *d;
8484        struct list_head *this, *tmp;
8485
8486        spin_lock_irqsave(&h->offline_device_lock, flags);
8487        list_for_each_safe(this, tmp, &h->offline_device_list) {
8488                d = list_entry(this, struct offline_device_entry,
8489                                offline_list);
8490                spin_unlock_irqrestore(&h->offline_device_lock, flags);
8491                if (!hpsa_volume_offline(h, d->scsi3addr)) {
8492                        spin_lock_irqsave(&h->offline_device_lock, flags);
8493                        list_del(&d->offline_list);
8494                        spin_unlock_irqrestore(&h->offline_device_lock, flags);
8495                        return 1;
8496                }
8497                spin_lock_irqsave(&h->offline_device_lock, flags);
8498        }
8499        spin_unlock_irqrestore(&h->offline_device_lock, flags);
8500        return 0;
8501}
8502
8503static int hpsa_luns_changed(struct ctlr_info *h)
8504{
8505        int rc = 1; /* assume there are changes */
8506        struct ReportLUNdata *logdev = NULL;
8507
8508        /* if we can't find out if lun data has changed,
8509         * assume that it has.
8510         */
8511
8512        if (!h->lastlogicals)
8513                return rc;
8514
8515        logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8516        if (!logdev)
8517                return rc;
8518
8519        if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8520                dev_warn(&h->pdev->dev,
8521                        "report luns failed, can't track lun changes.\n");
8522                goto out;
8523        }
8524        if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8525                dev_info(&h->pdev->dev,
8526                        "Lun changes detected.\n");
8527                memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8528                goto out;
8529        } else
8530                rc = 0; /* no changes detected. */
8531out:
8532        kfree(logdev);
8533        return rc;
8534}
8535
8536static void hpsa_perform_rescan(struct ctlr_info *h)
8537{
8538        struct Scsi_Host *sh = NULL;
8539        unsigned long flags;
8540
8541        /*
8542         * Do the scan after the reset
8543         */
8544        spin_lock_irqsave(&h->reset_lock, flags);
8545        if (h->reset_in_progress) {
8546                h->drv_req_rescan = 1;
8547                spin_unlock_irqrestore(&h->reset_lock, flags);
8548                return;
8549        }
8550        spin_unlock_irqrestore(&h->reset_lock, flags);
8551
8552        sh = scsi_host_get(h->scsi_host);
8553        if (sh != NULL) {
8554                hpsa_scan_start(sh);
8555                scsi_host_put(sh);
8556                h->drv_req_rescan = 0;
8557        }
8558}
8559
8560/*
8561 * watch for controller events
8562 */
8563static void hpsa_event_monitor_worker(struct work_struct *work)
8564{
8565        struct ctlr_info *h = container_of(to_delayed_work(work),
8566                                        struct ctlr_info, event_monitor_work);
8567        unsigned long flags;
8568
8569        spin_lock_irqsave(&h->lock, flags);
8570        if (h->remove_in_progress) {
8571                spin_unlock_irqrestore(&h->lock, flags);
8572                return;
8573        }
8574        spin_unlock_irqrestore(&h->lock, flags);
8575
8576        if (hpsa_ctlr_needs_rescan(h)) {
8577                hpsa_ack_ctlr_events(h);
8578                hpsa_perform_rescan(h);
8579        }
8580
8581        spin_lock_irqsave(&h->lock, flags);
8582        if (!h->remove_in_progress)
8583                queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
8584                                HPSA_EVENT_MONITOR_INTERVAL);
8585        spin_unlock_irqrestore(&h->lock, flags);
8586}
8587
8588static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8589{
8590        unsigned long flags;
8591        struct ctlr_info *h = container_of(to_delayed_work(work),
8592                                        struct ctlr_info, rescan_ctlr_work);
8593
8594        spin_lock_irqsave(&h->lock, flags);
8595        if (h->remove_in_progress) {
8596                spin_unlock_irqrestore(&h->lock, flags);
8597                return;
8598        }
8599        spin_unlock_irqrestore(&h->lock, flags);
8600
8601        if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8602                hpsa_perform_rescan(h);
8603        } else if (h->discovery_polling) {
8604                if (hpsa_luns_changed(h)) {
8605                        dev_info(&h->pdev->dev,
8606                                "driver discovery polling rescan.\n");
8607                        hpsa_perform_rescan(h);
8608                }
8609        }
8610        spin_lock_irqsave(&h->lock, flags);
8611        if (!h->remove_in_progress)
8612                queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8613                                h->heartbeat_sample_interval);
8614        spin_unlock_irqrestore(&h->lock, flags);
8615}
8616
8617static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8618{
8619        unsigned long flags;
8620        struct ctlr_info *h = container_of(to_delayed_work(work),
8621                                        struct ctlr_info, monitor_ctlr_work);
8622
8623        detect_controller_lockup(h);
8624        if (lockup_detected(h))
8625                return;
8626
8627        spin_lock_irqsave(&h->lock, flags);
8628        if (!h->remove_in_progress)
8629                queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
8630                                h->heartbeat_sample_interval);
8631        spin_unlock_irqrestore(&h->lock, flags);
8632}
8633
8634static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8635                                                char *name)
8636{
8637        struct workqueue_struct *wq = NULL;
8638
8639        wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8640        if (!wq)
8641                dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8642
8643        return wq;
8644}
8645
8646static void hpda_free_ctlr_info(struct ctlr_info *h)
8647{
8648        kfree(h->reply_map);
8649        kfree(h);
8650}
8651
8652static struct ctlr_info *hpda_alloc_ctlr_info(void)
8653{
8654        struct ctlr_info *h;
8655
8656        h = kzalloc(sizeof(*h), GFP_KERNEL);
8657        if (!h)
8658                return NULL;
8659
8660        h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
8661        if (!h->reply_map) {
8662                kfree(h);
8663                return NULL;
8664        }
8665        return h;
8666}
8667
8668static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8669{
8670        int rc;
8671        struct ctlr_info *h;
8672        int try_soft_reset = 0;
8673        unsigned long flags;
8674        u32 board_id;
8675
8676        if (number_of_controllers == 0)
8677                printk(KERN_INFO DRIVER_NAME "\n");
8678
8679        rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8680        if (rc < 0) {
8681                dev_warn(&pdev->dev, "Board ID not found\n");
8682                return rc;
8683        }
8684
8685        rc = hpsa_init_reset_devices(pdev, board_id);
8686        if (rc) {
8687                if (rc != -ENOTSUPP)
8688                        return rc;
8689                /* If the reset fails in a particular way (it has no way to do
8690                 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8691                 * a soft reset once we get the controller configured up to the
8692                 * point that it can accept a command.
8693                 */
8694                try_soft_reset = 1;
8695                rc = 0;
8696        }
8697
8698reinit_after_soft_reset:
8699
8700        /* Command structures must be aligned on a 32-byte boundary because
8701         * the 5 lower bits of the address are used by the hardware. and by
8702         * the driver.  See comments in hpsa.h for more info.
8703         */
8704        BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8705        h = hpda_alloc_ctlr_info();
8706        if (!h) {
8707                dev_err(&pdev->dev, "Failed to allocate controller head\n");
8708                return -ENOMEM;
8709        }
8710
8711        h->pdev = pdev;
8712
8713        h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8714        INIT_LIST_HEAD(&h->offline_device_list);
8715        spin_lock_init(&h->lock);
8716        spin_lock_init(&h->offline_device_lock);
8717        spin_lock_init(&h->scan_lock);
8718        spin_lock_init(&h->reset_lock);
8719        atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8720
8721        /* Allocate and clear per-cpu variable lockup_detected */
8722        h->lockup_detected = alloc_percpu(u32);
8723        if (!h->lockup_detected) {
8724                dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8725                rc = -ENOMEM;
8726                goto clean1;    /* aer/h */
8727        }
8728        set_lockup_detected_for_all_cpus(h, 0);
8729
8730        rc = hpsa_pci_init(h);
8731        if (rc)
8732                goto clean2;    /* lu, aer/h */
8733
8734        /* relies on h-> settings made by hpsa_pci_init, including
8735         * interrupt_mode h->intr */
8736        rc = hpsa_scsi_host_alloc(h);
8737        if (rc)
8738                goto clean2_5;  /* pci, lu, aer/h */
8739
8740        sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8741        h->ctlr = number_of_controllers;
8742        number_of_controllers++;
8743
8744        /* configure PCI DMA stuff */
8745        rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
8746        if (rc != 0) {
8747                rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8748                if (rc != 0) {
8749                        dev_err(&pdev->dev, "no suitable DMA available\n");
8750                        goto clean3;    /* shost, pci, lu, aer/h */
8751                }
8752        }
8753
8754        /* make sure the board interrupts are off */
8755        h->access.set_intr_mask(h, HPSA_INTR_OFF);
8756
8757        rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8758        if (rc)
8759                goto clean3;    /* shost, pci, lu, aer/h */
8760        rc = hpsa_alloc_cmd_pool(h);
8761        if (rc)
8762                goto clean4;    /* irq, shost, pci, lu, aer/h */
8763        rc = hpsa_alloc_sg_chain_blocks(h);
8764        if (rc)
8765                goto clean5;    /* cmd, irq, shost, pci, lu, aer/h */
8766        init_waitqueue_head(&h->scan_wait_queue);
8767        init_waitqueue_head(&h->event_sync_wait_queue);
8768        mutex_init(&h->reset_mutex);
8769        h->scan_finished = 1; /* no scan currently in progress */
8770        h->scan_waiting = 0;
8771
8772        pci_set_drvdata(pdev, h);
8773        h->ndevices = 0;
8774
8775        spin_lock_init(&h->devlock);
8776        rc = hpsa_put_ctlr_into_performant_mode(h);
8777        if (rc)
8778                goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8779
8780        /* create the resubmit workqueue */
8781        h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8782        if (!h->rescan_ctlr_wq) {
8783                rc = -ENOMEM;
8784                goto clean7;
8785        }
8786
8787        h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8788        if (!h->resubmit_wq) {
8789                rc = -ENOMEM;
8790                goto clean7;    /* aer/h */
8791        }
8792
8793        h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
8794        if (!h->monitor_ctlr_wq) {
8795                rc = -ENOMEM;
8796                goto clean7;
8797        }
8798
8799        /*
8800         * At this point, the controller is ready to take commands.
8801         * Now, if reset_devices and the hard reset didn't work, try
8802         * the soft reset and see if that works.
8803         */
8804        if (try_soft_reset) {
8805
8806                /* This is kind of gross.  We may or may not get a completion
8807                 * from the soft reset command, and if we do, then the value
8808                 * from the fifo may or may not be valid.  So, we wait 10 secs
8809                 * after the reset throwing away any completions we get during
8810                 * that time.  Unregister the interrupt handler and register
8811                 * fake ones to scoop up any residual completions.
8812                 */
8813                spin_lock_irqsave(&h->lock, flags);
8814                h->access.set_intr_mask(h, HPSA_INTR_OFF);
8815                spin_unlock_irqrestore(&h->lock, flags);
8816                hpsa_free_irqs(h);
8817                rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8818                                        hpsa_intx_discard_completions);
8819                if (rc) {
8820                        dev_warn(&h->pdev->dev,
8821                                "Failed to request_irq after soft reset.\n");
8822                        /*
8823                         * cannot goto clean7 or free_irqs will be called
8824                         * again. Instead, do its work
8825                         */
8826                        hpsa_free_performant_mode(h);   /* clean7 */
8827                        hpsa_free_sg_chain_blocks(h);   /* clean6 */
8828                        hpsa_free_cmd_pool(h);          /* clean5 */
8829                        /*
8830                         * skip hpsa_free_irqs(h) clean4 since that
8831                         * was just called before request_irqs failed
8832                         */
8833                        goto clean3;
8834                }
8835
8836                rc = hpsa_kdump_soft_reset(h);
8837                if (rc)
8838                        /* Neither hard nor soft reset worked, we're hosed. */
8839                        goto clean7;
8840
8841                dev_info(&h->pdev->dev, "Board READY.\n");
8842                dev_info(&h->pdev->dev,
8843                        "Waiting for stale completions to drain.\n");
8844                h->access.set_intr_mask(h, HPSA_INTR_ON);
8845                msleep(10000);
8846                h->access.set_intr_mask(h, HPSA_INTR_OFF);
8847
8848                rc = controller_reset_failed(h->cfgtable);
8849                if (rc)
8850                        dev_info(&h->pdev->dev,
8851                                "Soft reset appears to have failed.\n");
8852
8853                /* since the controller's reset, we have to go back and re-init
8854                 * everything.  Easiest to just forget what we've done and do it
8855                 * all over again.
8856                 */
8857                hpsa_undo_allocations_after_kdump_soft_reset(h);
8858                try_soft_reset = 0;
8859                if (rc)
8860                        /* don't goto clean, we already unallocated */
8861                        return -ENODEV;
8862
8863                goto reinit_after_soft_reset;
8864        }
8865
8866        /* Enable Accelerated IO path at driver layer */
8867        h->acciopath_status = 1;
8868        /* Disable discovery polling.*/
8869        h->discovery_polling = 0;
8870
8871
8872        /* Turn the interrupts on so we can service requests */
8873        h->access.set_intr_mask(h, HPSA_INTR_ON);
8874
8875        hpsa_hba_inquiry(h);
8876
8877        h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8878        if (!h->lastlogicals)
8879                dev_info(&h->pdev->dev,
8880                        "Can't track change to report lun data\n");
8881
8882        /* hook into SCSI subsystem */
8883        rc = hpsa_scsi_add_host(h);
8884        if (rc)
8885                goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
8886
8887        /* Monitor the controller for firmware lockups */
8888        h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8889        INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8890        schedule_delayed_work(&h->monitor_ctlr_work,
8891                                h->heartbeat_sample_interval);
8892        INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8893        queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8894                                h->heartbeat_sample_interval);
8895        INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8896        schedule_delayed_work(&h->event_monitor_work,
8897                                HPSA_EVENT_MONITOR_INTERVAL);
8898        return 0;
8899
8900clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
8901        kfree(h->lastlogicals);
8902clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8903        hpsa_free_performant_mode(h);
8904        h->access.set_intr_mask(h, HPSA_INTR_OFF);
8905clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8906        hpsa_free_sg_chain_blocks(h);
8907clean5: /* cmd, irq, shost, pci, lu, aer/h */
8908        hpsa_free_cmd_pool(h);
8909clean4: /* irq, shost, pci, lu, aer/h */
8910        hpsa_free_irqs(h);
8911clean3: /* shost, pci, lu, aer/h */
8912        scsi_host_put(h->scsi_host);
8913        h->scsi_host = NULL;
8914clean2_5: /* pci, lu, aer/h */
8915        hpsa_free_pci_init(h);
8916clean2: /* lu, aer/h */
8917        if (h->lockup_detected) {
8918                free_percpu(h->lockup_detected);
8919                h->lockup_detected = NULL;
8920        }
8921clean1: /* wq/aer/h */
8922        if (h->resubmit_wq) {
8923                destroy_workqueue(h->resubmit_wq);
8924                h->resubmit_wq = NULL;
8925        }
8926        if (h->rescan_ctlr_wq) {
8927                destroy_workqueue(h->rescan_ctlr_wq);
8928                h->rescan_ctlr_wq = NULL;
8929        }
8930        if (h->monitor_ctlr_wq) {
8931                destroy_workqueue(h->monitor_ctlr_wq);
8932                h->monitor_ctlr_wq = NULL;
8933        }
8934        kfree(h);
8935        return rc;
8936}
8937
8938static void hpsa_flush_cache(struct ctlr_info *h)
8939{
8940        char *flush_buf;
8941        struct CommandList *c;
8942        int rc;
8943
8944        if (unlikely(lockup_detected(h)))
8945                return;
8946        flush_buf = kzalloc(4, GFP_KERNEL);
8947        if (!flush_buf)
8948                return;
8949
8950        c = cmd_alloc(h);
8951
8952        if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8953                RAID_CTLR_LUNID, TYPE_CMD)) {
8954                goto out;
8955        }
8956        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8957                        DEFAULT_TIMEOUT);
8958        if (rc)
8959                goto out;
8960        if (c->err_info->CommandStatus != 0)
8961out:
8962                dev_warn(&h->pdev->dev,
8963                        "error flushing cache on controller\n");
8964        cmd_free(h, c);
8965        kfree(flush_buf);
8966}
8967
8968/* Make controller gather fresh report lun data each time we
8969 * send down a report luns request
8970 */
8971static void hpsa_disable_rld_caching(struct ctlr_info *h)
8972{
8973        u32 *options;
8974        struct CommandList *c;
8975        int rc;
8976
8977        /* Don't bother trying to set diag options if locked up */
8978        if (unlikely(h->lockup_detected))
8979                return;
8980
8981        options = kzalloc(sizeof(*options), GFP_KERNEL);
8982        if (!options)
8983                return;
8984
8985        c = cmd_alloc(h);
8986
8987        /* first, get the current diag options settings */
8988        if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8989                RAID_CTLR_LUNID, TYPE_CMD))
8990                goto errout;
8991
8992        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8993                        NO_TIMEOUT);
8994        if ((rc != 0) || (c->err_info->CommandStatus != 0))
8995                goto errout;
8996
8997        /* Now, set the bit for disabling the RLD caching */
8998        *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8999
9000        if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
9001                RAID_CTLR_LUNID, TYPE_CMD))
9002                goto errout;
9003
9004        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
9005                        NO_TIMEOUT);
9006        if ((rc != 0)  || (c->err_info->CommandStatus != 0))
9007                goto errout;
9008
9009        /* Now verify that it got set: */
9010        if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9011                RAID_CTLR_LUNID, TYPE_CMD))
9012                goto errout;
9013
9014        rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
9015                        NO_TIMEOUT);
9016        if ((rc != 0)  || (c->err_info->CommandStatus != 0))
9017                goto errout;
9018
9019        if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
9020                goto out;
9021
9022errout:
9023        dev_err(&h->pdev->dev,
9024                        "Error: failed to disable report lun data caching.\n");
9025out:
9026        cmd_free(h, c);
9027        kfree(options);
9028}
9029
9030static void __hpsa_shutdown(struct pci_dev *pdev)
9031{
9032        struct ctlr_info *h;
9033
9034        h = pci_get_drvdata(pdev);
9035        /* Turn board interrupts off  and send the flush cache command
9036         * sendcmd will turn off interrupt, and send the flush...
9037         * To write all data in the battery backed cache to disks
9038         */
9039        hpsa_flush_cache(h);
9040        h->access.set_intr_mask(h, HPSA_INTR_OFF);
9041        hpsa_free_irqs(h);                      /* init_one 4 */
9042        hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
9043}
9044
9045static void hpsa_shutdown(struct pci_dev *pdev)
9046{
9047        __hpsa_shutdown(pdev);
9048        pci_disable_device(pdev);
9049}
9050
9051static void hpsa_free_device_info(struct ctlr_info *h)
9052{
9053        int i;
9054
9055        for (i = 0; i < h->ndevices; i++) {
9056                kfree(h->dev[i]);
9057                h->dev[i] = NULL;
9058        }
9059}
9060
9061static void hpsa_remove_one(struct pci_dev *pdev)
9062{
9063        struct ctlr_info *h;
9064        unsigned long flags;
9065
9066        if (pci_get_drvdata(pdev) == NULL) {
9067                dev_err(&pdev->dev, "unable to remove device\n");
9068                return;
9069        }
9070        h = pci_get_drvdata(pdev);
9071
9072        /* Get rid of any controller monitoring work items */
9073        spin_lock_irqsave(&h->lock, flags);
9074        h->remove_in_progress = 1;
9075        spin_unlock_irqrestore(&h->lock, flags);
9076        cancel_delayed_work_sync(&h->monitor_ctlr_work);
9077        cancel_delayed_work_sync(&h->rescan_ctlr_work);
9078        cancel_delayed_work_sync(&h->event_monitor_work);
9079        destroy_workqueue(h->rescan_ctlr_wq);
9080        destroy_workqueue(h->resubmit_wq);
9081        destroy_workqueue(h->monitor_ctlr_wq);
9082
9083        hpsa_delete_sas_host(h);
9084
9085        /*
9086         * Call before disabling interrupts.
9087         * scsi_remove_host can trigger I/O operations especially
9088         * when multipath is enabled. There can be SYNCHRONIZE CACHE
9089         * operations which cannot complete and will hang the system.
9090         */
9091        if (h->scsi_host)
9092                scsi_remove_host(h->scsi_host);         /* init_one 8 */
9093        /* includes hpsa_free_irqs - init_one 4 */
9094        /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9095        __hpsa_shutdown(pdev);
9096
9097        hpsa_free_device_info(h);               /* scan */
9098
9099        kfree(h->hba_inquiry_data);                     /* init_one 10 */
9100        h->hba_inquiry_data = NULL;                     /* init_one 10 */
9101        hpsa_free_ioaccel2_sg_chain_blocks(h);
9102        hpsa_free_performant_mode(h);                   /* init_one 7 */
9103        hpsa_free_sg_chain_blocks(h);                   /* init_one 6 */
9104        hpsa_free_cmd_pool(h);                          /* init_one 5 */
9105        kfree(h->lastlogicals);
9106
9107        /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9108
9109        scsi_host_put(h->scsi_host);                    /* init_one 3 */
9110        h->scsi_host = NULL;                            /* init_one 3 */
9111
9112        /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9113        hpsa_free_pci_init(h);                          /* init_one 2.5 */
9114
9115        free_percpu(h->lockup_detected);                /* init_one 2 */
9116        h->lockup_detected = NULL;                      /* init_one 2 */
9117        /* (void) pci_disable_pcie_error_reporting(pdev); */    /* init_one 1 */
9118
9119        hpda_free_ctlr_info(h);                         /* init_one 1 */
9120}
9121
9122static int __maybe_unused hpsa_suspend(
9123        __attribute__((unused)) struct device *dev)
9124{
9125        return -ENOSYS;
9126}
9127
9128static int __maybe_unused hpsa_resume
9129        (__attribute__((unused)) struct device *dev)
9130{
9131        return -ENOSYS;
9132}
9133
9134static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume);
9135
9136static struct pci_driver hpsa_pci_driver = {
9137        .name = HPSA,
9138        .probe = hpsa_init_one,
9139        .remove = hpsa_remove_one,
9140        .id_table = hpsa_pci_device_id, /* id_table */
9141        .shutdown = hpsa_shutdown,
9142        .driver.pm = &hpsa_pm_ops,
9143};
9144
9145/* Fill in bucket_map[], given nsgs (the max number of
9146 * scatter gather elements supported) and bucket[],
9147 * which is an array of 8 integers.  The bucket[] array
9148 * contains 8 different DMA transfer sizes (in 16
9149 * byte increments) which the controller uses to fetch
9150 * commands.  This function fills in bucket_map[], which
9151 * maps a given number of scatter gather elements to one of
9152 * the 8 DMA transfer sizes.  The point of it is to allow the
9153 * controller to only do as much DMA as needed to fetch the
9154 * command, with the DMA transfer size encoded in the lower
9155 * bits of the command address.
9156 */
9157static void  calc_bucket_map(int bucket[], int num_buckets,
9158        int nsgs, int min_blocks, u32 *bucket_map)
9159{
9160        int i, j, b, size;
9161
9162        /* Note, bucket_map must have nsgs+1 entries. */
9163        for (i = 0; i <= nsgs; i++) {
9164                /* Compute size of a command with i SG entries */
9165                size = i + min_blocks;
9166                b = num_buckets; /* Assume the biggest bucket */
9167                /* Find the bucket that is just big enough */
9168                for (j = 0; j < num_buckets; j++) {
9169                        if (bucket[j] >= size) {
9170                                b = j;
9171                                break;
9172                        }
9173                }
9174                /* for a command with i SG entries, use bucket b. */
9175                bucket_map[i] = b;
9176        }
9177}
9178
9179/*
9180 * return -ENODEV on err, 0 on success (or no action)
9181 * allocates numerous items that must be freed later
9182 */
9183static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9184{
9185        int i;
9186        unsigned long register_value;
9187        unsigned long transMethod = CFGTBL_Trans_Performant |
9188                        (trans_support & CFGTBL_Trans_use_short_tags) |
9189                                CFGTBL_Trans_enable_directed_msix |
9190                        (trans_support & (CFGTBL_Trans_io_accel1 |
9191                                CFGTBL_Trans_io_accel2));
9192        struct access_method access = SA5_performant_access;
9193
9194        /* This is a bit complicated.  There are 8 registers on
9195         * the controller which we write to to tell it 8 different
9196         * sizes of commands which there may be.  It's a way of
9197         * reducing the DMA done to fetch each command.  Encoded into
9198         * each command's tag are 3 bits which communicate to the controller
9199         * which of the eight sizes that command fits within.  The size of
9200         * each command depends on how many scatter gather entries there are.
9201         * Each SG entry requires 16 bytes.  The eight registers are programmed
9202         * with the number of 16-byte blocks a command of that size requires.
9203         * The smallest command possible requires 5 such 16 byte blocks.
9204         * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9205         * blocks.  Note, this only extends to the SG entries contained
9206         * within the command block, and does not extend to chained blocks
9207         * of SG elements.   bft[] contains the eight values we write to
9208         * the registers.  They are not evenly distributed, but have more
9209         * sizes for small commands, and fewer sizes for larger commands.
9210         */
9211        int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9212#define MIN_IOACCEL2_BFT_ENTRY 5
9213#define HPSA_IOACCEL2_HEADER_SZ 4
9214        int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9215                        13, 14, 15, 16, 17, 18, 19,
9216                        HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9217        BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9218        BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9219        BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9220                                 16 * MIN_IOACCEL2_BFT_ENTRY);
9221        BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9222        BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9223        /*  5 = 1 s/g entry or 4k
9224         *  6 = 2 s/g entry or 8k
9225         *  8 = 4 s/g entry or 16k
9226         * 10 = 6 s/g entry or 24k
9227         */
9228
9229        /* If the controller supports either ioaccel method then
9230         * we can also use the RAID stack submit path that does not
9231         * perform the superfluous readl() after each command submission.
9232         */
9233        if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9234                access = SA5_performant_access_no_read;
9235
9236        /* Controller spec: zero out this buffer. */
9237        for (i = 0; i < h->nreply_queues; i++)
9238                memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9239
9240        bft[7] = SG_ENTRIES_IN_CMD + 4;
9241        calc_bucket_map(bft, ARRAY_SIZE(bft),
9242                                SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9243        for (i = 0; i < 8; i++)
9244                writel(bft[i], &h->transtable->BlockFetch[i]);
9245
9246        /* size of controller ring buffer */
9247        writel(h->max_commands, &h->transtable->RepQSize);
9248        writel(h->nreply_queues, &h->transtable->RepQCount);
9249        writel(0, &h->transtable->RepQCtrAddrLow32);
9250        writel(0, &h->transtable->RepQCtrAddrHigh32);
9251
9252        for (i = 0; i < h->nreply_queues; i++) {
9253                writel(0, &h->transtable->RepQAddr[i].upper);
9254                writel(h->reply_queue[i].busaddr,
9255                        &h->transtable->RepQAddr[i].lower);
9256        }
9257
9258        writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9259        writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9260        /*
9261         * enable outbound interrupt coalescing in accelerator mode;
9262         */
9263        if (trans_support & CFGTBL_Trans_io_accel1) {
9264                access = SA5_ioaccel_mode1_access;
9265                writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9266                writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9267        } else
9268                if (trans_support & CFGTBL_Trans_io_accel2)
9269                        access = SA5_ioaccel_mode2_access;
9270        writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9271        if (hpsa_wait_for_mode_change_ack(h)) {
9272                dev_err(&h->pdev->dev,
9273                        "performant mode problem - doorbell timeout\n");
9274                return -ENODEV;
9275        }
9276        register_value = readl(&(h->cfgtable->TransportActive));
9277        if (!(register_value & CFGTBL_Trans_Performant)) {
9278                dev_err(&h->pdev->dev,
9279                        "performant mode problem - transport not active\n");
9280                return -ENODEV;
9281        }
9282        /* Change the access methods to the performant access methods */
9283        h->access = access;
9284        h->transMethod = transMethod;
9285
9286        if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9287                (trans_support & CFGTBL_Trans_io_accel2)))
9288                return 0;
9289
9290        if (trans_support & CFGTBL_Trans_io_accel1) {
9291                /* Set up I/O accelerator mode */
9292                for (i = 0; i < h->nreply_queues; i++) {
9293                        writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9294                        h->reply_queue[i].current_entry =
9295                                readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9296                }
9297                bft[7] = h->ioaccel_maxsg + 8;
9298                calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9299                                h->ioaccel1_blockFetchTable);
9300
9301                /* initialize all reply queue entries to unused */
9302                for (i = 0; i < h->nreply_queues; i++)
9303                        memset(h->reply_queue[i].head,
9304                                (u8) IOACCEL_MODE1_REPLY_UNUSED,
9305                                h->reply_queue_size);
9306
9307                /* set all the constant fields in the accelerator command
9308                 * frames once at init time to save CPU cycles later.
9309                 */
9310                for (i = 0; i < h->nr_cmds; i++) {
9311                        struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9312
9313                        cp->function = IOACCEL1_FUNCTION_SCSIIO;
9314                        cp->err_info = (u32) (h->errinfo_pool_dhandle +
9315                                        (i * sizeof(struct ErrorInfo)));
9316                        cp->err_info_len = sizeof(struct ErrorInfo);
9317                        cp->sgl_offset = IOACCEL1_SGLOFFSET;
9318                        cp->host_context_flags =
9319                                cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9320                        cp->timeout_sec = 0;
9321                        cp->ReplyQueue = 0;
9322                        cp->tag =
9323                                cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9324                        cp->host_addr =
9325                                cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9326                                        (i * sizeof(struct io_accel1_cmd)));
9327                }
9328        } else if (trans_support & CFGTBL_Trans_io_accel2) {
9329                u64 cfg_offset, cfg_base_addr_index;
9330                u32 bft2_offset, cfg_base_addr;
9331
9332                hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9333                                    &cfg_base_addr_index, &cfg_offset);
9334                BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9335                bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9336                calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9337                                4, h->ioaccel2_blockFetchTable);
9338                bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9339                BUILD_BUG_ON(offsetof(struct CfgTable,
9340                                io_accel_request_size_offset) != 0xb8);
9341                h->ioaccel2_bft2_regs =
9342                        remap_pci_mem(pci_resource_start(h->pdev,
9343                                        cfg_base_addr_index) +
9344                                        cfg_offset + bft2_offset,
9345                                        ARRAY_SIZE(bft2) *
9346                                        sizeof(*h->ioaccel2_bft2_regs));
9347                for (i = 0; i < ARRAY_SIZE(bft2); i++)
9348                        writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9349        }
9350        writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9351        if (hpsa_wait_for_mode_change_ack(h)) {
9352                dev_err(&h->pdev->dev,
9353                        "performant mode problem - enabling ioaccel mode\n");
9354                return -ENODEV;
9355        }
9356        return 0;
9357}
9358
9359/* Free ioaccel1 mode command blocks and block fetch table */
9360static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9361{
9362        if (h->ioaccel_cmd_pool) {
9363                dma_free_coherent(&h->pdev->dev,
9364                                  h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9365                                  h->ioaccel_cmd_pool,
9366                                  h->ioaccel_cmd_pool_dhandle);
9367                h->ioaccel_cmd_pool = NULL;
9368                h->ioaccel_cmd_pool_dhandle = 0;
9369        }
9370        kfree(h->ioaccel1_blockFetchTable);
9371        h->ioaccel1_blockFetchTable = NULL;
9372}
9373
9374/* Allocate ioaccel1 mode command blocks and block fetch table */
9375static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9376{
9377        h->ioaccel_maxsg =
9378                readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9379        if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9380                h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9381
9382        /* Command structures must be aligned on a 128-byte boundary
9383         * because the 7 lower bits of the address are used by the
9384         * hardware.
9385         */
9386        BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9387                        IOACCEL1_COMMANDLIST_ALIGNMENT);
9388        h->ioaccel_cmd_pool =
9389                dma_alloc_coherent(&h->pdev->dev,
9390                        h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9391                        &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
9392
9393        h->ioaccel1_blockFetchTable =
9394                kmalloc(((h->ioaccel_maxsg + 1) *
9395                                sizeof(u32)), GFP_KERNEL);
9396
9397        if ((h->ioaccel_cmd_pool == NULL) ||
9398                (h->ioaccel1_blockFetchTable == NULL))
9399                goto clean_up;
9400
9401        memset(h->ioaccel_cmd_pool, 0,
9402                h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9403        return 0;
9404
9405clean_up:
9406        hpsa_free_ioaccel1_cmd_and_bft(h);
9407        return -ENOMEM;
9408}
9409
9410/* Free ioaccel2 mode command blocks and block fetch table */
9411static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9412{
9413        hpsa_free_ioaccel2_sg_chain_blocks(h);
9414
9415        if (h->ioaccel2_cmd_pool) {
9416                dma_free_coherent(&h->pdev->dev,
9417                                  h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9418                                  h->ioaccel2_cmd_pool,
9419                                  h->ioaccel2_cmd_pool_dhandle);
9420                h->ioaccel2_cmd_pool = NULL;
9421                h->ioaccel2_cmd_pool_dhandle = 0;
9422        }
9423        kfree(h->ioaccel2_blockFetchTable);
9424        h->ioaccel2_blockFetchTable = NULL;
9425}
9426
9427/* Allocate ioaccel2 mode command blocks and block fetch table */
9428static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9429{
9430        int rc;
9431
9432        /* Allocate ioaccel2 mode command blocks and block fetch table */
9433
9434        h->ioaccel_maxsg =
9435                readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9436        if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9437                h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9438
9439        BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9440                        IOACCEL2_COMMANDLIST_ALIGNMENT);
9441        h->ioaccel2_cmd_pool =
9442                dma_alloc_coherent(&h->pdev->dev,
9443                        h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9444                        &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
9445
9446        h->ioaccel2_blockFetchTable =
9447                kmalloc(((h->ioaccel_maxsg + 1) *
9448                                sizeof(u32)), GFP_KERNEL);
9449
9450        if ((h->ioaccel2_cmd_pool == NULL) ||
9451                (h->ioaccel2_blockFetchTable == NULL)) {
9452                rc = -ENOMEM;
9453                goto clean_up;
9454        }
9455
9456        rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9457        if (rc)
9458                goto clean_up;
9459
9460        memset(h->ioaccel2_cmd_pool, 0,
9461                h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9462        return 0;
9463
9464clean_up:
9465        hpsa_free_ioaccel2_cmd_and_bft(h);
9466        return rc;
9467}
9468
9469/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9470static void hpsa_free_performant_mode(struct ctlr_info *h)
9471{
9472        kfree(h->blockFetchTable);
9473        h->blockFetchTable = NULL;
9474        hpsa_free_reply_queues(h);
9475        hpsa_free_ioaccel1_cmd_and_bft(h);
9476        hpsa_free_ioaccel2_cmd_and_bft(h);
9477}
9478
9479/* return -ENODEV on error, 0 on success (or no action)
9480 * allocates numerous items that must be freed later
9481 */
9482static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9483{
9484        u32 trans_support;
9485        unsigned long transMethod = CFGTBL_Trans_Performant |
9486                                        CFGTBL_Trans_use_short_tags;
9487        int i, rc;
9488
9489        if (hpsa_simple_mode)
9490                return 0;
9491
9492        trans_support = readl(&(h->cfgtable->TransportSupport));
9493        if (!(trans_support & PERFORMANT_MODE))
9494                return 0;
9495
9496        /* Check for I/O accelerator mode support */
9497        if (trans_support & CFGTBL_Trans_io_accel1) {
9498                transMethod |= CFGTBL_Trans_io_accel1 |
9499                                CFGTBL_Trans_enable_directed_msix;
9500                rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9501                if (rc)
9502                        return rc;
9503        } else if (trans_support & CFGTBL_Trans_io_accel2) {
9504                transMethod |= CFGTBL_Trans_io_accel2 |
9505                                CFGTBL_Trans_enable_directed_msix;
9506                rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9507                if (rc)
9508                        return rc;
9509        }
9510
9511        h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9512        hpsa_get_max_perf_mode_cmds(h);
9513        /* Performant mode ring buffer and supporting data structures */
9514        h->reply_queue_size = h->max_commands * sizeof(u64);
9515
9516        for (i = 0; i < h->nreply_queues; i++) {
9517                h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
9518                                                h->reply_queue_size,
9519                                                &h->reply_queue[i].busaddr,
9520                                                GFP_KERNEL);
9521                if (!h->reply_queue[i].head) {
9522                        rc = -ENOMEM;
9523                        goto clean1;    /* rq, ioaccel */
9524                }
9525                h->reply_queue[i].size = h->max_commands;
9526                h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
9527                h->reply_queue[i].current_entry = 0;
9528        }
9529
9530        /* Need a block fetch table for performant mode */
9531        h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9532                                sizeof(u32)), GFP_KERNEL);
9533        if (!h->blockFetchTable) {
9534                rc = -ENOMEM;
9535                goto clean1;    /* rq, ioaccel */
9536        }
9537
9538        rc = hpsa_enter_performant_mode(h, trans_support);
9539        if (rc)
9540                goto clean2;    /* bft, rq, ioaccel */
9541        return 0;
9542
9543clean2: /* bft, rq, ioaccel */
9544        kfree(h->blockFetchTable);
9545        h->blockFetchTable = NULL;
9546clean1: /* rq, ioaccel */
9547        hpsa_free_reply_queues(h);
9548        hpsa_free_ioaccel1_cmd_and_bft(h);
9549        hpsa_free_ioaccel2_cmd_and_bft(h);
9550        return rc;
9551}
9552
9553static int is_accelerated_cmd(struct CommandList *c)
9554{
9555        return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9556}
9557
9558static void hpsa_drain_accel_commands(struct ctlr_info *h)
9559{
9560        struct CommandList *c = NULL;
9561        int i, accel_cmds_out;
9562        int refcount;
9563
9564        do { /* wait for all outstanding ioaccel commands to drain out */
9565                accel_cmds_out = 0;
9566                for (i = 0; i < h->nr_cmds; i++) {
9567                        c = h->cmd_pool + i;
9568                        refcount = atomic_inc_return(&c->refcount);
9569                        if (refcount > 1) /* Command is allocated */
9570                                accel_cmds_out += is_accelerated_cmd(c);
9571                        cmd_free(h, c);
9572                }
9573                if (accel_cmds_out <= 0)
9574                        break;
9575                msleep(100);
9576        } while (1);
9577}
9578
9579static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9580                                struct hpsa_sas_port *hpsa_sas_port)
9581{
9582        struct hpsa_sas_phy *hpsa_sas_phy;
9583        struct sas_phy *phy;
9584
9585        hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9586        if (!hpsa_sas_phy)
9587                return NULL;
9588
9589        phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9590                hpsa_sas_port->next_phy_index);
9591        if (!phy) {
9592                kfree(hpsa_sas_phy);
9593                return NULL;
9594        }
9595
9596        hpsa_sas_port->next_phy_index++;
9597        hpsa_sas_phy->phy = phy;
9598        hpsa_sas_phy->parent_port = hpsa_sas_port;
9599
9600        return hpsa_sas_phy;
9601}
9602
9603static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9604{
9605        struct sas_phy *phy = hpsa_sas_phy->phy;
9606
9607        sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9608        if (hpsa_sas_phy->added_to_port)
9609                list_del(&hpsa_sas_phy->phy_list_entry);
9610        sas_phy_delete(phy);
9611        kfree(hpsa_sas_phy);
9612}
9613
9614static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9615{
9616        int rc;
9617        struct hpsa_sas_port *hpsa_sas_port;
9618        struct sas_phy *phy;
9619        struct sas_identify *identify;
9620
9621        hpsa_sas_port = hpsa_sas_phy->parent_port;
9622        phy = hpsa_sas_phy->phy;
9623
9624        identify = &phy->identify;
9625        memset(identify, 0, sizeof(*identify));
9626        identify->sas_address = hpsa_sas_port->sas_address;
9627        identify->device_type = SAS_END_DEVICE;
9628        identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9629        identify->target_port_protocols = SAS_PROTOCOL_STP;
9630        phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9631        phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9632        phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9633        phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9634        phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9635
9636        rc = sas_phy_add(hpsa_sas_phy->phy);
9637        if (rc)
9638                return rc;
9639
9640        sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9641        list_add_tail(&hpsa_sas_phy->phy_list_entry,
9642                        &hpsa_sas_port->phy_list_head);
9643        hpsa_sas_phy->added_to_port = true;
9644
9645        return 0;
9646}
9647
9648static int
9649        hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9650                                struct sas_rphy *rphy)
9651{
9652        struct sas_identify *identify;
9653
9654        identify = &rphy->identify;
9655        identify->sas_address = hpsa_sas_port->sas_address;
9656        identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9657        identify->target_port_protocols = SAS_PROTOCOL_STP;
9658
9659        return sas_rphy_add(rphy);
9660}
9661
9662static struct hpsa_sas_port
9663        *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9664                                u64 sas_address)
9665{
9666        int rc;
9667        struct hpsa_sas_port *hpsa_sas_port;
9668        struct sas_port *port;
9669
9670        hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9671        if (!hpsa_sas_port)
9672                return NULL;
9673
9674        INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9675        hpsa_sas_port->parent_node = hpsa_sas_node;
9676
9677        port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9678        if (!port)
9679                goto free_hpsa_port;
9680
9681        rc = sas_port_add(port);
9682        if (rc)
9683                goto free_sas_port;
9684
9685        hpsa_sas_port->port = port;
9686        hpsa_sas_port->sas_address = sas_address;
9687        list_add_tail(&hpsa_sas_port->port_list_entry,
9688                        &hpsa_sas_node->port_list_head);
9689
9690        return hpsa_sas_port;
9691
9692free_sas_port:
9693        sas_port_free(port);
9694free_hpsa_port:
9695        kfree(hpsa_sas_port);
9696
9697        return NULL;
9698}
9699
9700static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9701{
9702        struct hpsa_sas_phy *hpsa_sas_phy;
9703        struct hpsa_sas_phy *next;
9704
9705        list_for_each_entry_safe(hpsa_sas_phy, next,
9706                        &hpsa_sas_port->phy_list_head, phy_list_entry)
9707                hpsa_free_sas_phy(hpsa_sas_phy);
9708
9709        sas_port_delete(hpsa_sas_port->port);
9710        list_del(&hpsa_sas_port->port_list_entry);
9711        kfree(hpsa_sas_port);
9712}
9713
9714static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9715{
9716        struct hpsa_sas_node *hpsa_sas_node;
9717
9718        hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9719        if (hpsa_sas_node) {
9720                hpsa_sas_node->parent_dev = parent_dev;
9721                INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9722        }
9723
9724        return hpsa_sas_node;
9725}
9726
9727static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9728{
9729        struct hpsa_sas_port *hpsa_sas_port;
9730        struct hpsa_sas_port *next;
9731
9732        if (!hpsa_sas_node)
9733                return;
9734
9735        list_for_each_entry_safe(hpsa_sas_port, next,
9736                        &hpsa_sas_node->port_list_head, port_list_entry)
9737                hpsa_free_sas_port(hpsa_sas_port);
9738
9739        kfree(hpsa_sas_node);
9740}
9741
9742static struct hpsa_scsi_dev_t
9743        *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9744                                        struct sas_rphy *rphy)
9745{
9746        int i;
9747        struct hpsa_scsi_dev_t *device;
9748
9749        for (i = 0; i < h->ndevices; i++) {
9750                device = h->dev[i];
9751                if (!device->sas_port)
9752                        continue;
9753                if (device->sas_port->rphy == rphy)
9754                        return device;
9755        }
9756
9757        return NULL;
9758}
9759
9760static int hpsa_add_sas_host(struct ctlr_info *h)
9761{
9762        int rc;
9763        struct device *parent_dev;
9764        struct hpsa_sas_node *hpsa_sas_node;
9765        struct hpsa_sas_port *hpsa_sas_port;
9766        struct hpsa_sas_phy *hpsa_sas_phy;
9767
9768        parent_dev = &h->scsi_host->shost_dev;
9769
9770        hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9771        if (!hpsa_sas_node)
9772                return -ENOMEM;
9773
9774        hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9775        if (!hpsa_sas_port) {
9776                rc = -ENODEV;
9777                goto free_sas_node;
9778        }
9779
9780        hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9781        if (!hpsa_sas_phy) {
9782                rc = -ENODEV;
9783                goto free_sas_port;
9784        }
9785
9786        rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9787        if (rc)
9788                goto free_sas_phy;
9789
9790        h->sas_host = hpsa_sas_node;
9791
9792        return 0;
9793
9794free_sas_phy:
9795        hpsa_free_sas_phy(hpsa_sas_phy);
9796free_sas_port:
9797        hpsa_free_sas_port(hpsa_sas_port);
9798free_sas_node:
9799        hpsa_free_sas_node(hpsa_sas_node);
9800
9801        return rc;
9802}
9803
9804static void hpsa_delete_sas_host(struct ctlr_info *h)
9805{
9806        hpsa_free_sas_node(h->sas_host);
9807}
9808
9809static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9810                                struct hpsa_scsi_dev_t *device)
9811{
9812        int rc;
9813        struct hpsa_sas_port *hpsa_sas_port;
9814        struct sas_rphy *rphy;
9815
9816        hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9817        if (!hpsa_sas_port)
9818                return -ENOMEM;
9819
9820        rphy = sas_end_device_alloc(hpsa_sas_port->port);
9821        if (!rphy) {
9822                rc = -ENODEV;
9823                goto free_sas_port;
9824        }
9825
9826        hpsa_sas_port->rphy = rphy;
9827        device->sas_port = hpsa_sas_port;
9828
9829        rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9830        if (rc)
9831                goto free_sas_port;
9832
9833        return 0;
9834
9835free_sas_port:
9836        hpsa_free_sas_port(hpsa_sas_port);
9837        device->sas_port = NULL;
9838
9839        return rc;
9840}
9841
9842static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9843{
9844        if (device->sas_port) {
9845                hpsa_free_sas_port(device->sas_port);
9846                device->sas_port = NULL;
9847        }
9848}
9849
9850static int
9851hpsa_sas_get_linkerrors(struct sas_phy *phy)
9852{
9853        return 0;
9854}
9855
9856static int
9857hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9858{
9859        struct Scsi_Host *shost = phy_to_shost(rphy);
9860        struct ctlr_info *h;
9861        struct hpsa_scsi_dev_t *sd;
9862
9863        if (!shost)
9864                return -ENXIO;
9865
9866        h = shost_to_hba(shost);
9867
9868        if (!h)
9869                return -ENXIO;
9870
9871        sd = hpsa_find_device_by_sas_rphy(h, rphy);
9872        if (!sd)
9873                return -ENXIO;
9874
9875        *identifier = sd->eli;
9876
9877        return 0;
9878}
9879
9880static int
9881hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9882{
9883        return -ENXIO;
9884}
9885
9886static int
9887hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9888{
9889        return 0;
9890}
9891
9892static int
9893hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9894{
9895        return 0;
9896}
9897
9898static int
9899hpsa_sas_phy_setup(struct sas_phy *phy)
9900{
9901        return 0;
9902}
9903
9904static void
9905hpsa_sas_phy_release(struct sas_phy *phy)
9906{
9907}
9908
9909static int
9910hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9911{
9912        return -EINVAL;
9913}
9914
9915static struct sas_function_template hpsa_sas_transport_functions = {
9916        .get_linkerrors = hpsa_sas_get_linkerrors,
9917        .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9918        .get_bay_identifier = hpsa_sas_get_bay_identifier,
9919        .phy_reset = hpsa_sas_phy_reset,
9920        .phy_enable = hpsa_sas_phy_enable,
9921        .phy_setup = hpsa_sas_phy_setup,
9922        .phy_release = hpsa_sas_phy_release,
9923        .set_phy_speed = hpsa_sas_phy_speed,
9924};
9925
9926/*
9927 *  This is it.  Register the PCI driver information for the cards we control
9928 *  the OS will call our registered routines when it finds one of our cards.
9929 */
9930static int __init hpsa_init(void)
9931{
9932        int rc;
9933
9934        hpsa_sas_transport_template =
9935                sas_attach_transport(&hpsa_sas_transport_functions);
9936        if (!hpsa_sas_transport_template)
9937                return -ENODEV;
9938
9939        rc = pci_register_driver(&hpsa_pci_driver);
9940
9941        if (rc)
9942                sas_release_transport(hpsa_sas_transport_template);
9943
9944        return rc;
9945}
9946
9947static void __exit hpsa_cleanup(void)
9948{
9949        pci_unregister_driver(&hpsa_pci_driver);
9950        sas_release_transport(hpsa_sas_transport_template);
9951}
9952
9953static void __attribute__((unused)) verify_offsets(void)
9954{
9955#define VERIFY_OFFSET(member, offset) \
9956        BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9957
9958        VERIFY_OFFSET(structure_size, 0);
9959        VERIFY_OFFSET(volume_blk_size, 4);
9960        VERIFY_OFFSET(volume_blk_cnt, 8);
9961        VERIFY_OFFSET(phys_blk_shift, 16);
9962        VERIFY_OFFSET(parity_rotation_shift, 17);
9963        VERIFY_OFFSET(strip_size, 18);
9964        VERIFY_OFFSET(disk_starting_blk, 20);
9965        VERIFY_OFFSET(disk_blk_cnt, 28);
9966        VERIFY_OFFSET(data_disks_per_row, 36);
9967        VERIFY_OFFSET(metadata_disks_per_row, 38);
9968        VERIFY_OFFSET(row_cnt, 40);
9969        VERIFY_OFFSET(layout_map_count, 42);
9970        VERIFY_OFFSET(flags, 44);
9971        VERIFY_OFFSET(dekindex, 46);
9972        /* VERIFY_OFFSET(reserved, 48 */
9973        VERIFY_OFFSET(data, 64);
9974
9975#undef VERIFY_OFFSET
9976
9977#define VERIFY_OFFSET(member, offset) \
9978        BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9979
9980        VERIFY_OFFSET(IU_type, 0);
9981        VERIFY_OFFSET(direction, 1);
9982        VERIFY_OFFSET(reply_queue, 2);
9983        /* VERIFY_OFFSET(reserved1, 3);  */
9984        VERIFY_OFFSET(scsi_nexus, 4);
9985        VERIFY_OFFSET(Tag, 8);
9986        VERIFY_OFFSET(cdb, 16);
9987        VERIFY_OFFSET(cciss_lun, 32);
9988        VERIFY_OFFSET(data_len, 40);
9989        VERIFY_OFFSET(cmd_priority_task_attr, 44);
9990        VERIFY_OFFSET(sg_count, 45);
9991        /* VERIFY_OFFSET(reserved3 */
9992        VERIFY_OFFSET(err_ptr, 48);
9993        VERIFY_OFFSET(err_len, 56);
9994        /* VERIFY_OFFSET(reserved4  */
9995        VERIFY_OFFSET(sg, 64);
9996
9997#undef VERIFY_OFFSET
9998
9999#define VERIFY_OFFSET(member, offset) \
10000        BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
10001
10002        VERIFY_OFFSET(dev_handle, 0x00);
10003        VERIFY_OFFSET(reserved1, 0x02);
10004        VERIFY_OFFSET(function, 0x03);
10005        VERIFY_OFFSET(reserved2, 0x04);
10006        VERIFY_OFFSET(err_info, 0x0C);
10007        VERIFY_OFFSET(reserved3, 0x10);
10008        VERIFY_OFFSET(err_info_len, 0x12);
10009        VERIFY_OFFSET(reserved4, 0x13);
10010        VERIFY_OFFSET(sgl_offset, 0x14);
10011        VERIFY_OFFSET(reserved5, 0x15);
10012        VERIFY_OFFSET(transfer_len, 0x1C);
10013        VERIFY_OFFSET(reserved6, 0x20);
10014        VERIFY_OFFSET(io_flags, 0x24);
10015        VERIFY_OFFSET(reserved7, 0x26);
10016        VERIFY_OFFSET(LUN, 0x34);
10017        VERIFY_OFFSET(control, 0x3C);
10018        VERIFY_OFFSET(CDB, 0x40);
10019        VERIFY_OFFSET(reserved8, 0x50);
10020        VERIFY_OFFSET(host_context_flags, 0x60);
10021        VERIFY_OFFSET(timeout_sec, 0x62);
10022        VERIFY_OFFSET(ReplyQueue, 0x64);
10023        VERIFY_OFFSET(reserved9, 0x65);
10024        VERIFY_OFFSET(tag, 0x68);
10025        VERIFY_OFFSET(host_addr, 0x70);
10026        VERIFY_OFFSET(CISS_LUN, 0x78);
10027        VERIFY_OFFSET(SG, 0x78 + 8);
10028#undef VERIFY_OFFSET
10029}
10030
10031module_init(hpsa_init);
10032module_exit(hpsa_cleanup);
10033