linux/drivers/scsi/ipr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * ipr.c -- driver for IBM Power Linux RAID adapters
   4 *
   5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
   6 *
   7 * Copyright (C) 2003, 2004 IBM Corporation
   8 */
   9
  10/*
  11 * Notes:
  12 *
  13 * This driver is used to control the following SCSI adapters:
  14 *
  15 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
  16 *
  17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
  18 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
  19 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
  20 *              Embedded SCSI adapter on p615 and p655 systems
  21 *
  22 * Supported Hardware Features:
  23 *      - Ultra 320 SCSI controller
  24 *      - PCI-X host interface
  25 *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
  26 *      - Non-Volatile Write Cache
  27 *      - Supports attachment of non-RAID disks, tape, and optical devices
  28 *      - RAID Levels 0, 5, 10
  29 *      - Hot spare
  30 *      - Background Parity Checking
  31 *      - Background Data Scrubbing
  32 *      - Ability to increase the capacity of an existing RAID 5 disk array
  33 *              by adding disks
  34 *
  35 * Driver Features:
  36 *      - Tagged command queuing
  37 *      - Adapter microcode download
  38 *      - PCI hot plug
  39 *      - SCSI device hot plug
  40 *
  41 */
  42
  43#include <linux/fs.h>
  44#include <linux/init.h>
  45#include <linux/types.h>
  46#include <linux/errno.h>
  47#include <linux/kernel.h>
  48#include <linux/slab.h>
  49#include <linux/vmalloc.h>
  50#include <linux/ioport.h>
  51#include <linux/delay.h>
  52#include <linux/pci.h>
  53#include <linux/wait.h>
  54#include <linux/spinlock.h>
  55#include <linux/sched.h>
  56#include <linux/interrupt.h>
  57#include <linux/blkdev.h>
  58#include <linux/firmware.h>
  59#include <linux/module.h>
  60#include <linux/moduleparam.h>
  61#include <linux/libata.h>
  62#include <linux/hdreg.h>
  63#include <linux/reboot.h>
  64#include <linux/stringify.h>
  65#include <asm/io.h>
  66#include <asm/irq.h>
  67#include <asm/processor.h>
  68#include <scsi/scsi.h>
  69#include <scsi/scsi_host.h>
  70#include <scsi/scsi_tcq.h>
  71#include <scsi/scsi_eh.h>
  72#include <scsi/scsi_cmnd.h>
  73#include "ipr.h"
  74
  75/*
  76 *   Global Data
  77 */
  78static LIST_HEAD(ipr_ioa_head);
  79static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
  80static unsigned int ipr_max_speed = 1;
  81static int ipr_testmode = 0;
  82static unsigned int ipr_fastfail = 0;
  83static unsigned int ipr_transop_timeout = 0;
  84static unsigned int ipr_debug = 0;
  85static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
  86static unsigned int ipr_dual_ioa_raid = 1;
  87static unsigned int ipr_number_of_msix = 16;
  88static unsigned int ipr_fast_reboot;
  89static DEFINE_SPINLOCK(ipr_driver_lock);
  90
  91/* This table describes the differences between DMA controller chips */
  92static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
  93        { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
  94                .mailbox = 0x0042C,
  95                .max_cmds = 100,
  96                .cache_line_size = 0x20,
  97                .clear_isr = 1,
  98                .iopoll_weight = 0,
  99                {
 100                        .set_interrupt_mask_reg = 0x0022C,
 101                        .clr_interrupt_mask_reg = 0x00230,
 102                        .clr_interrupt_mask_reg32 = 0x00230,
 103                        .sense_interrupt_mask_reg = 0x0022C,
 104                        .sense_interrupt_mask_reg32 = 0x0022C,
 105                        .clr_interrupt_reg = 0x00228,
 106                        .clr_interrupt_reg32 = 0x00228,
 107                        .sense_interrupt_reg = 0x00224,
 108                        .sense_interrupt_reg32 = 0x00224,
 109                        .ioarrin_reg = 0x00404,
 110                        .sense_uproc_interrupt_reg = 0x00214,
 111                        .sense_uproc_interrupt_reg32 = 0x00214,
 112                        .set_uproc_interrupt_reg = 0x00214,
 113                        .set_uproc_interrupt_reg32 = 0x00214,
 114                        .clr_uproc_interrupt_reg = 0x00218,
 115                        .clr_uproc_interrupt_reg32 = 0x00218
 116                }
 117        },
 118        { /* Snipe and Scamp */
 119                .mailbox = 0x0052C,
 120                .max_cmds = 100,
 121                .cache_line_size = 0x20,
 122                .clear_isr = 1,
 123                .iopoll_weight = 0,
 124                {
 125                        .set_interrupt_mask_reg = 0x00288,
 126                        .clr_interrupt_mask_reg = 0x0028C,
 127                        .clr_interrupt_mask_reg32 = 0x0028C,
 128                        .sense_interrupt_mask_reg = 0x00288,
 129                        .sense_interrupt_mask_reg32 = 0x00288,
 130                        .clr_interrupt_reg = 0x00284,
 131                        .clr_interrupt_reg32 = 0x00284,
 132                        .sense_interrupt_reg = 0x00280,
 133                        .sense_interrupt_reg32 = 0x00280,
 134                        .ioarrin_reg = 0x00504,
 135                        .sense_uproc_interrupt_reg = 0x00290,
 136                        .sense_uproc_interrupt_reg32 = 0x00290,
 137                        .set_uproc_interrupt_reg = 0x00290,
 138                        .set_uproc_interrupt_reg32 = 0x00290,
 139                        .clr_uproc_interrupt_reg = 0x00294,
 140                        .clr_uproc_interrupt_reg32 = 0x00294
 141                }
 142        },
 143        { /* CRoC */
 144                .mailbox = 0x00044,
 145                .max_cmds = 1000,
 146                .cache_line_size = 0x20,
 147                .clear_isr = 0,
 148                .iopoll_weight = 64,
 149                {
 150                        .set_interrupt_mask_reg = 0x00010,
 151                        .clr_interrupt_mask_reg = 0x00018,
 152                        .clr_interrupt_mask_reg32 = 0x0001C,
 153                        .sense_interrupt_mask_reg = 0x00010,
 154                        .sense_interrupt_mask_reg32 = 0x00014,
 155                        .clr_interrupt_reg = 0x00008,
 156                        .clr_interrupt_reg32 = 0x0000C,
 157                        .sense_interrupt_reg = 0x00000,
 158                        .sense_interrupt_reg32 = 0x00004,
 159                        .ioarrin_reg = 0x00070,
 160                        .sense_uproc_interrupt_reg = 0x00020,
 161                        .sense_uproc_interrupt_reg32 = 0x00024,
 162                        .set_uproc_interrupt_reg = 0x00020,
 163                        .set_uproc_interrupt_reg32 = 0x00024,
 164                        .clr_uproc_interrupt_reg = 0x00028,
 165                        .clr_uproc_interrupt_reg32 = 0x0002C,
 166                        .init_feedback_reg = 0x0005C,
 167                        .dump_addr_reg = 0x00064,
 168                        .dump_data_reg = 0x00068,
 169                        .endian_swap_reg = 0x00084
 170                }
 171        },
 172};
 173
 174static const struct ipr_chip_t ipr_chip[] = {
 175        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 176        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 177        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 178        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 179        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 180        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 181        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 182        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
 183        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
 184        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
 185};
 186
 187static int ipr_max_bus_speeds[] = {
 188        IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
 189};
 190
 191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
 192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
 193module_param_named(max_speed, ipr_max_speed, uint, 0);
 194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
 195module_param_named(log_level, ipr_log_level, uint, 0);
 196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
 197module_param_named(testmode, ipr_testmode, int, 0);
 198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
 199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
 200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
 201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
 202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
 203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
 204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
 205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
 206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
 207module_param_named(max_devs, ipr_max_devs, int, 0);
 208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
 209                 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
 210module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
 211MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
 212module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
 213MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
 214MODULE_LICENSE("GPL");
 215MODULE_VERSION(IPR_DRIVER_VERSION);
 216
 217/*  A constant array of IOASCs/URCs/Error Messages */
 218static const
 219struct ipr_error_table_t ipr_error_table[] = {
 220        {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
 221        "8155: An unknown error was received"},
 222        {0x00330000, 0, 0,
 223        "Soft underlength error"},
 224        {0x005A0000, 0, 0,
 225        "Command to be cancelled not found"},
 226        {0x00808000, 0, 0,
 227        "Qualified success"},
 228        {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
 229        "FFFE: Soft device bus error recovered by the IOA"},
 230        {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
 231        "4101: Soft device bus fabric error"},
 232        {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
 233        "FFFC: Logical block guard error recovered by the device"},
 234        {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
 235        "FFFC: Logical block reference tag error recovered by the device"},
 236        {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
 237        "4171: Recovered scatter list tag / sequence number error"},
 238        {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
 239        "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
 240        {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
 241        "4171: Recovered logical block sequence number error on IOA to Host transfer"},
 242        {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
 243        "FFFD: Recovered logical block reference tag error detected by the IOA"},
 244        {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
 245        "FFFD: Logical block guard error recovered by the IOA"},
 246        {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
 247        "FFF9: Device sector reassign successful"},
 248        {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
 249        "FFF7: Media error recovered by device rewrite procedures"},
 250        {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
 251        "7001: IOA sector reassignment successful"},
 252        {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
 253        "FFF9: Soft media error. Sector reassignment recommended"},
 254        {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
 255        "FFF7: Media error recovered by IOA rewrite procedures"},
 256        {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
 257        "FF3D: Soft PCI bus error recovered by the IOA"},
 258        {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
 259        "FFF6: Device hardware error recovered by the IOA"},
 260        {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
 261        "FFF6: Device hardware error recovered by the device"},
 262        {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
 263        "FF3D: Soft IOA error recovered by the IOA"},
 264        {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
 265        "FFFA: Undefined device response recovered by the IOA"},
 266        {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
 267        "FFF6: Device bus error, message or command phase"},
 268        {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
 269        "FFFE: Task Management Function failed"},
 270        {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
 271        "FFF6: Failure prediction threshold exceeded"},
 272        {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
 273        "8009: Impending cache battery pack failure"},
 274        {0x02040100, 0, 0,
 275        "Logical Unit in process of becoming ready"},
 276        {0x02040200, 0, 0,
 277        "Initializing command required"},
 278        {0x02040400, 0, 0,
 279        "34FF: Disk device format in progress"},
 280        {0x02040C00, 0, 0,
 281        "Logical unit not accessible, target port in unavailable state"},
 282        {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
 283        "9070: IOA requested reset"},
 284        {0x023F0000, 0, 0,
 285        "Synchronization required"},
 286        {0x02408500, 0, 0,
 287        "IOA microcode download required"},
 288        {0x02408600, 0, 0,
 289        "Device bus connection is prohibited by host"},
 290        {0x024E0000, 0, 0,
 291        "No ready, IOA shutdown"},
 292        {0x025A0000, 0, 0,
 293        "Not ready, IOA has been shutdown"},
 294        {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
 295        "3020: Storage subsystem configuration error"},
 296        {0x03110B00, 0, 0,
 297        "FFF5: Medium error, data unreadable, recommend reassign"},
 298        {0x03110C00, 0, 0,
 299        "7000: Medium error, data unreadable, do not reassign"},
 300        {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
 301        "FFF3: Disk media format bad"},
 302        {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
 303        "3002: Addressed device failed to respond to selection"},
 304        {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
 305        "3100: Device bus error"},
 306        {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
 307        "3109: IOA timed out a device command"},
 308        {0x04088000, 0, 0,
 309        "3120: SCSI bus is not operational"},
 310        {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
 311        "4100: Hard device bus fabric error"},
 312        {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
 313        "310C: Logical block guard error detected by the device"},
 314        {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
 315        "310C: Logical block reference tag error detected by the device"},
 316        {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
 317        "4170: Scatter list tag / sequence number error"},
 318        {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
 319        "8150: Logical block CRC error on IOA to Host transfer"},
 320        {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
 321        "4170: Logical block sequence number error on IOA to Host transfer"},
 322        {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
 323        "310D: Logical block reference tag error detected by the IOA"},
 324        {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
 325        "310D: Logical block guard error detected by the IOA"},
 326        {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
 327        "9000: IOA reserved area data check"},
 328        {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
 329        "9001: IOA reserved area invalid data pattern"},
 330        {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
 331        "9002: IOA reserved area LRC error"},
 332        {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
 333        "Hardware Error, IOA metadata access error"},
 334        {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
 335        "102E: Out of alternate sectors for disk storage"},
 336        {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
 337        "FFF4: Data transfer underlength error"},
 338        {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
 339        "FFF4: Data transfer overlength error"},
 340        {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
 341        "3400: Logical unit failure"},
 342        {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
 343        "FFF4: Device microcode is corrupt"},
 344        {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
 345        "8150: PCI bus error"},
 346        {0x04430000, 1, 0,
 347        "Unsupported device bus message received"},
 348        {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
 349        "FFF4: Disk device problem"},
 350        {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
 351        "8150: Permanent IOA failure"},
 352        {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
 353        "3010: Disk device returned wrong response to IOA"},
 354        {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
 355        "8151: IOA microcode error"},
 356        {0x04448500, 0, 0,
 357        "Device bus status error"},
 358        {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
 359        "8157: IOA error requiring IOA reset to recover"},
 360        {0x04448700, 0, 0,
 361        "ATA device status error"},
 362        {0x04490000, 0, 0,
 363        "Message reject received from the device"},
 364        {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
 365        "8008: A permanent cache battery pack failure occurred"},
 366        {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
 367        "9090: Disk unit has been modified after the last known status"},
 368        {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
 369        "9081: IOA detected device error"},
 370        {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
 371        "9082: IOA detected device error"},
 372        {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
 373        "3110: Device bus error, message or command phase"},
 374        {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
 375        "3110: SAS Command / Task Management Function failed"},
 376        {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
 377        "9091: Incorrect hardware configuration change has been detected"},
 378        {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
 379        "9073: Invalid multi-adapter configuration"},
 380        {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
 381        "4010: Incorrect connection between cascaded expanders"},
 382        {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
 383        "4020: Connections exceed IOA design limits"},
 384        {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
 385        "4030: Incorrect multipath connection"},
 386        {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
 387        "4110: Unsupported enclosure function"},
 388        {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
 389        "4120: SAS cable VPD cannot be read"},
 390        {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
 391        "FFF4: Command to logical unit failed"},
 392        {0x05240000, 1, 0,
 393        "Illegal request, invalid request type or request packet"},
 394        {0x05250000, 0, 0,
 395        "Illegal request, invalid resource handle"},
 396        {0x05258000, 0, 0,
 397        "Illegal request, commands not allowed to this device"},
 398        {0x05258100, 0, 0,
 399        "Illegal request, command not allowed to a secondary adapter"},
 400        {0x05258200, 0, 0,
 401        "Illegal request, command not allowed to a non-optimized resource"},
 402        {0x05260000, 0, 0,
 403        "Illegal request, invalid field in parameter list"},
 404        {0x05260100, 0, 0,
 405        "Illegal request, parameter not supported"},
 406        {0x05260200, 0, 0,
 407        "Illegal request, parameter value invalid"},
 408        {0x052C0000, 0, 0,
 409        "Illegal request, command sequence error"},
 410        {0x052C8000, 1, 0,
 411        "Illegal request, dual adapter support not enabled"},
 412        {0x052C8100, 1, 0,
 413        "Illegal request, another cable connector was physically disabled"},
 414        {0x054E8000, 1, 0,
 415        "Illegal request, inconsistent group id/group count"},
 416        {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
 417        "9031: Array protection temporarily suspended, protection resuming"},
 418        {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
 419        "9040: Array protection temporarily suspended, protection resuming"},
 420        {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
 421        "4080: IOA exceeded maximum operating temperature"},
 422        {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
 423        "4085: Service required"},
 424        {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
 425        "4086: SAS Adapter Hardware Configuration Error"},
 426        {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
 427        "3140: Device bus not ready to ready transition"},
 428        {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
 429        "FFFB: SCSI bus was reset"},
 430        {0x06290500, 0, 0,
 431        "FFFE: SCSI bus transition to single ended"},
 432        {0x06290600, 0, 0,
 433        "FFFE: SCSI bus transition to LVD"},
 434        {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
 435        "FFFB: SCSI bus was reset by another initiator"},
 436        {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
 437        "3029: A device replacement has occurred"},
 438        {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
 439        "4102: Device bus fabric performance degradation"},
 440        {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
 441        "9051: IOA cache data exists for a missing or failed device"},
 442        {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
 443        "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
 444        {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
 445        "9025: Disk unit is not supported at its physical location"},
 446        {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
 447        "3020: IOA detected a SCSI bus configuration error"},
 448        {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
 449        "3150: SCSI bus configuration error"},
 450        {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
 451        "9074: Asymmetric advanced function disk configuration"},
 452        {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
 453        "4040: Incomplete multipath connection between IOA and enclosure"},
 454        {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
 455        "4041: Incomplete multipath connection between enclosure and device"},
 456        {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
 457        "9075: Incomplete multipath connection between IOA and remote IOA"},
 458        {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
 459        "9076: Configuration error, missing remote IOA"},
 460        {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
 461        "4050: Enclosure does not support a required multipath function"},
 462        {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
 463        "4121: Configuration error, required cable is missing"},
 464        {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
 465        "4122: Cable is not plugged into the correct location on remote IOA"},
 466        {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
 467        "4123: Configuration error, invalid cable vital product data"},
 468        {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
 469        "4124: Configuration error, both cable ends are plugged into the same IOA"},
 470        {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
 471        "4070: Logically bad block written on device"},
 472        {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
 473        "9041: Array protection temporarily suspended"},
 474        {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
 475        "9042: Corrupt array parity detected on specified device"},
 476        {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
 477        "9030: Array no longer protected due to missing or failed disk unit"},
 478        {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
 479        "9071: Link operational transition"},
 480        {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
 481        "9072: Link not operational transition"},
 482        {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
 483        "9032: Array exposed but still protected"},
 484        {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
 485        "70DD: Device forced failed by disrupt device command"},
 486        {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
 487        "4061: Multipath redundancy level got better"},
 488        {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
 489        "4060: Multipath redundancy level got worse"},
 490        {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
 491        "9083: Device raw mode enabled"},
 492        {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
 493        "9084: Device raw mode disabled"},
 494        {0x07270000, 0, 0,
 495        "Failure due to other device"},
 496        {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
 497        "9008: IOA does not support functions expected by devices"},
 498        {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
 499        "9010: Cache data associated with attached devices cannot be found"},
 500        {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
 501        "9011: Cache data belongs to devices other than those attached"},
 502        {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
 503        "9020: Array missing 2 or more devices with only 1 device present"},
 504        {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
 505        "9021: Array missing 2 or more devices with 2 or more devices present"},
 506        {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
 507        "9022: Exposed array is missing a required device"},
 508        {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
 509        "9023: Array member(s) not at required physical locations"},
 510        {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
 511        "9024: Array not functional due to present hardware configuration"},
 512        {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
 513        "9026: Array not functional due to present hardware configuration"},
 514        {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
 515        "9027: Array is missing a device and parity is out of sync"},
 516        {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
 517        "9028: Maximum number of arrays already exist"},
 518        {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
 519        "9050: Required cache data cannot be located for a disk unit"},
 520        {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
 521        "9052: Cache data exists for a device that has been modified"},
 522        {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
 523        "9054: IOA resources not available due to previous problems"},
 524        {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
 525        "9092: Disk unit requires initialization before use"},
 526        {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
 527        "9029: Incorrect hardware configuration change has been detected"},
 528        {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
 529        "9060: One or more disk pairs are missing from an array"},
 530        {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
 531        "9061: One or more disks are missing from an array"},
 532        {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
 533        "9062: One or more disks are missing from an array"},
 534        {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
 535        "9063: Maximum number of functional arrays has been exceeded"},
 536        {0x07279A00, 0, 0,
 537        "Data protect, other volume set problem"},
 538        {0x0B260000, 0, 0,
 539        "Aborted command, invalid descriptor"},
 540        {0x0B3F9000, 0, 0,
 541        "Target operating conditions have changed, dual adapter takeover"},
 542        {0x0B530200, 0, 0,
 543        "Aborted command, medium removal prevented"},
 544        {0x0B5A0000, 0, 0,
 545        "Command terminated by host"},
 546        {0x0B5B8000, 0, 0,
 547        "Aborted command, command terminated by host"}
 548};
 549
 550static const struct ipr_ses_table_entry ipr_ses_table[] = {
 551        { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
 552        { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
 553        { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
 554        { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
 555        { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
 556        { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
 557        { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
 558        { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
 559        { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
 560        { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
 561        { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
 562        { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
 563        { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
 564};
 565
 566/*
 567 *  Function Prototypes
 568 */
 569static int ipr_reset_alert(struct ipr_cmnd *);
 570static void ipr_process_ccn(struct ipr_cmnd *);
 571static void ipr_process_error(struct ipr_cmnd *);
 572static void ipr_reset_ioa_job(struct ipr_cmnd *);
 573static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
 574                                   enum ipr_shutdown_type);
 575
 576#ifdef CONFIG_SCSI_IPR_TRACE
 577/**
 578 * ipr_trc_hook - Add a trace entry to the driver trace
 579 * @ipr_cmd:    ipr command struct
 580 * @type:               trace type
 581 * @add_data:   additional data
 582 *
 583 * Return value:
 584 *      none
 585 **/
 586static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
 587                         u8 type, u32 add_data)
 588{
 589        struct ipr_trace_entry *trace_entry;
 590        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 591        unsigned int trace_index;
 592
 593        trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
 594        trace_entry = &ioa_cfg->trace[trace_index];
 595        trace_entry->time = jiffies;
 596        trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
 597        trace_entry->type = type;
 598        if (ipr_cmd->ioa_cfg->sis64)
 599                trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
 600        else
 601                trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
 602        trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
 603        trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
 604        trace_entry->u.add_data = add_data;
 605        wmb();
 606}
 607#else
 608#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
 609#endif
 610
 611/**
 612 * ipr_lock_and_done - Acquire lock and complete command
 613 * @ipr_cmd:    ipr command struct
 614 *
 615 * Return value:
 616 *      none
 617 **/
 618static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
 619{
 620        unsigned long lock_flags;
 621        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 622
 623        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 624        ipr_cmd->done(ipr_cmd);
 625        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 626}
 627
 628/**
 629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
 630 * @ipr_cmd:    ipr command struct
 631 *
 632 * Return value:
 633 *      none
 634 **/
 635static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
 636{
 637        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 638        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
 639        struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
 640        dma_addr_t dma_addr = ipr_cmd->dma_addr;
 641        int hrrq_id;
 642
 643        hrrq_id = ioarcb->cmd_pkt.hrrq_id;
 644        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
 645        ioarcb->cmd_pkt.hrrq_id = hrrq_id;
 646        ioarcb->data_transfer_length = 0;
 647        ioarcb->read_data_transfer_length = 0;
 648        ioarcb->ioadl_len = 0;
 649        ioarcb->read_ioadl_len = 0;
 650
 651        if (ipr_cmd->ioa_cfg->sis64) {
 652                ioarcb->u.sis64_addr_data.data_ioadl_addr =
 653                        cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
 654                ioasa64->u.gata.status = 0;
 655        } else {
 656                ioarcb->write_ioadl_addr =
 657                        cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
 658                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
 659                ioasa->u.gata.status = 0;
 660        }
 661
 662        ioasa->hdr.ioasc = 0;
 663        ioasa->hdr.residual_data_len = 0;
 664        ipr_cmd->scsi_cmd = NULL;
 665        ipr_cmd->qc = NULL;
 666        ipr_cmd->sense_buffer[0] = 0;
 667        ipr_cmd->dma_use_sg = 0;
 668}
 669
 670/**
 671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
 672 * @ipr_cmd:    ipr command struct
 673 *
 674 * Return value:
 675 *      none
 676 **/
 677static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
 678                              void (*fast_done) (struct ipr_cmnd *))
 679{
 680        ipr_reinit_ipr_cmnd(ipr_cmd);
 681        ipr_cmd->u.scratch = 0;
 682        ipr_cmd->sibling = NULL;
 683        ipr_cmd->eh_comp = NULL;
 684        ipr_cmd->fast_done = fast_done;
 685        timer_setup(&ipr_cmd->timer, NULL, 0);
 686}
 687
 688/**
 689 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
 690 * @ioa_cfg:    ioa config struct
 691 *
 692 * Return value:
 693 *      pointer to ipr command struct
 694 **/
 695static
 696struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
 697{
 698        struct ipr_cmnd *ipr_cmd = NULL;
 699
 700        if (likely(!list_empty(&hrrq->hrrq_free_q))) {
 701                ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
 702                        struct ipr_cmnd, queue);
 703                list_del(&ipr_cmd->queue);
 704        }
 705
 706
 707        return ipr_cmd;
 708}
 709
 710/**
 711 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
 712 * @ioa_cfg:    ioa config struct
 713 *
 714 * Return value:
 715 *      pointer to ipr command struct
 716 **/
 717static
 718struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 719{
 720        struct ipr_cmnd *ipr_cmd =
 721                __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
 722        ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
 723        return ipr_cmd;
 724}
 725
 726/**
 727 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
 728 * @ioa_cfg:    ioa config struct
 729 * @clr_ints:     interrupts to clear
 730 *
 731 * This function masks all interrupts on the adapter, then clears the
 732 * interrupts specified in the mask
 733 *
 734 * Return value:
 735 *      none
 736 **/
 737static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
 738                                          u32 clr_ints)
 739{
 740        volatile u32 int_reg;
 741        int i;
 742
 743        /* Stop new interrupts */
 744        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
 745                spin_lock(&ioa_cfg->hrrq[i]._lock);
 746                ioa_cfg->hrrq[i].allow_interrupts = 0;
 747                spin_unlock(&ioa_cfg->hrrq[i]._lock);
 748        }
 749
 750        /* Set interrupt mask to stop all new interrupts */
 751        if (ioa_cfg->sis64)
 752                writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 753        else
 754                writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 755
 756        /* Clear any pending interrupts */
 757        if (ioa_cfg->sis64)
 758                writel(~0, ioa_cfg->regs.clr_interrupt_reg);
 759        writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
 760        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 761}
 762
 763/**
 764 * ipr_save_pcix_cmd_reg - Save PCI-X command register
 765 * @ioa_cfg:    ioa config struct
 766 *
 767 * Return value:
 768 *      0 on success / -EIO on failure
 769 **/
 770static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
 771{
 772        int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
 773
 774        if (pcix_cmd_reg == 0)
 775                return 0;
 776
 777        if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
 778                                 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
 779                dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
 780                return -EIO;
 781        }
 782
 783        ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
 784        return 0;
 785}
 786
 787/**
 788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
 789 * @ioa_cfg:    ioa config struct
 790 *
 791 * Return value:
 792 *      0 on success / -EIO on failure
 793 **/
 794static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
 795{
 796        int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
 797
 798        if (pcix_cmd_reg) {
 799                if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
 800                                          ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
 801                        dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
 802                        return -EIO;
 803                }
 804        }
 805
 806        return 0;
 807}
 808
 809/**
 810 * __ipr_sata_eh_done - done function for aborted SATA commands
 811 * @ipr_cmd:    ipr command struct
 812 *
 813 * This function is invoked for ops generated to SATA
 814 * devices which are being aborted.
 815 *
 816 * Return value:
 817 *      none
 818 **/
 819static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
 820{
 821        struct ata_queued_cmd *qc = ipr_cmd->qc;
 822        struct ipr_sata_port *sata_port = qc->ap->private_data;
 823
 824        qc->err_mask |= AC_ERR_OTHER;
 825        sata_port->ioasa.status |= ATA_BUSY;
 826        ata_qc_complete(qc);
 827        if (ipr_cmd->eh_comp)
 828                complete(ipr_cmd->eh_comp);
 829        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 830}
 831
 832/**
 833 * ipr_sata_eh_done - done function for aborted SATA commands
 834 * @ipr_cmd:    ipr command struct
 835 *
 836 * This function is invoked for ops generated to SATA
 837 * devices which are being aborted.
 838 *
 839 * Return value:
 840 *      none
 841 **/
 842static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
 843{
 844        struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
 845        unsigned long hrrq_flags;
 846
 847        spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
 848        __ipr_sata_eh_done(ipr_cmd);
 849        spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
 850}
 851
 852/**
 853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
 854 * @ipr_cmd:    ipr command struct
 855 *
 856 * This function is invoked by the interrupt handler for
 857 * ops generated by the SCSI mid-layer which are being aborted.
 858 *
 859 * Return value:
 860 *      none
 861 **/
 862static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 863{
 864        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 865
 866        scsi_cmd->result |= (DID_ERROR << 16);
 867
 868        scsi_dma_unmap(ipr_cmd->scsi_cmd);
 869        scsi_cmd->scsi_done(scsi_cmd);
 870        if (ipr_cmd->eh_comp)
 871                complete(ipr_cmd->eh_comp);
 872        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 873}
 874
 875/**
 876 * ipr_scsi_eh_done - mid-layer done function for aborted ops
 877 * @ipr_cmd:    ipr command struct
 878 *
 879 * This function is invoked by the interrupt handler for
 880 * ops generated by the SCSI mid-layer which are being aborted.
 881 *
 882 * Return value:
 883 *      none
 884 **/
 885static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 886{
 887        unsigned long hrrq_flags;
 888        struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
 889
 890        spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
 891        __ipr_scsi_eh_done(ipr_cmd);
 892        spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
 893}
 894
 895/**
 896 * ipr_fail_all_ops - Fails all outstanding ops.
 897 * @ioa_cfg:    ioa config struct
 898 *
 899 * This function fails all outstanding ops.
 900 *
 901 * Return value:
 902 *      none
 903 **/
 904static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
 905{
 906        struct ipr_cmnd *ipr_cmd, *temp;
 907        struct ipr_hrr_queue *hrrq;
 908
 909        ENTER;
 910        for_each_hrrq(hrrq, ioa_cfg) {
 911                spin_lock(&hrrq->_lock);
 912                list_for_each_entry_safe(ipr_cmd,
 913                                        temp, &hrrq->hrrq_pending_q, queue) {
 914                        list_del(&ipr_cmd->queue);
 915
 916                        ipr_cmd->s.ioasa.hdr.ioasc =
 917                                cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
 918                        ipr_cmd->s.ioasa.hdr.ilid =
 919                                cpu_to_be32(IPR_DRIVER_ILID);
 920
 921                        if (ipr_cmd->scsi_cmd)
 922                                ipr_cmd->done = __ipr_scsi_eh_done;
 923                        else if (ipr_cmd->qc)
 924                                ipr_cmd->done = __ipr_sata_eh_done;
 925
 926                        ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
 927                                     IPR_IOASC_IOA_WAS_RESET);
 928                        del_timer(&ipr_cmd->timer);
 929                        ipr_cmd->done(ipr_cmd);
 930                }
 931                spin_unlock(&hrrq->_lock);
 932        }
 933        LEAVE;
 934}
 935
 936/**
 937 * ipr_send_command -  Send driver initiated requests.
 938 * @ipr_cmd:            ipr command struct
 939 *
 940 * This function sends a command to the adapter using the correct write call.
 941 * In the case of sis64, calculate the ioarcb size required. Then or in the
 942 * appropriate bits.
 943 *
 944 * Return value:
 945 *      none
 946 **/
 947static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
 948{
 949        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 950        dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
 951
 952        if (ioa_cfg->sis64) {
 953                /* The default size is 256 bytes */
 954                send_dma_addr |= 0x1;
 955
 956                /* If the number of ioadls * size of ioadl > 128 bytes,
 957                   then use a 512 byte ioarcb */
 958                if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
 959                        send_dma_addr |= 0x4;
 960                writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
 961        } else
 962                writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
 963}
 964
 965/**
 966 * ipr_do_req -  Send driver initiated requests.
 967 * @ipr_cmd:            ipr command struct
 968 * @done:                       done function
 969 * @timeout_func:       timeout function
 970 * @timeout:            timeout value
 971 *
 972 * This function sends the specified command to the adapter with the
 973 * timeout given. The done function is invoked on command completion.
 974 *
 975 * Return value:
 976 *      none
 977 **/
 978static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
 979                       void (*done) (struct ipr_cmnd *),
 980                       void (*timeout_func) (struct timer_list *), u32 timeout)
 981{
 982        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
 983
 984        ipr_cmd->done = done;
 985
 986        ipr_cmd->timer.expires = jiffies + timeout;
 987        ipr_cmd->timer.function = timeout_func;
 988
 989        add_timer(&ipr_cmd->timer);
 990
 991        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
 992
 993        ipr_send_command(ipr_cmd);
 994}
 995
 996/**
 997 * ipr_internal_cmd_done - Op done function for an internally generated op.
 998 * @ipr_cmd:    ipr command struct
 999 *
1000 * This function is the op done function for an internally generated,
1001 * blocking op. It simply wakes the sleeping thread.
1002 *
1003 * Return value:
1004 *      none
1005 **/
1006static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1007{
1008        if (ipr_cmd->sibling)
1009                ipr_cmd->sibling = NULL;
1010        else
1011                complete(&ipr_cmd->completion);
1012}
1013
1014/**
1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1016 * @ipr_cmd:    ipr command struct
1017 * @dma_addr:   dma address
1018 * @len:        transfer length
1019 * @flags:      ioadl flag value
1020 *
1021 * This function initializes an ioadl in the case where there is only a single
1022 * descriptor.
1023 *
1024 * Return value:
1025 *      nothing
1026 **/
1027static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1028                           u32 len, int flags)
1029{
1030        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1031        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1032
1033        ipr_cmd->dma_use_sg = 1;
1034
1035        if (ipr_cmd->ioa_cfg->sis64) {
1036                ioadl64->flags = cpu_to_be32(flags);
1037                ioadl64->data_len = cpu_to_be32(len);
1038                ioadl64->address = cpu_to_be64(dma_addr);
1039
1040                ipr_cmd->ioarcb.ioadl_len =
1041                        cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1042                ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1043        } else {
1044                ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1045                ioadl->address = cpu_to_be32(dma_addr);
1046
1047                if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1048                        ipr_cmd->ioarcb.read_ioadl_len =
1049                                cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1050                        ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1051                } else {
1052                        ipr_cmd->ioarcb.ioadl_len =
1053                                cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1054                        ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1055                }
1056        }
1057}
1058
1059/**
1060 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1061 * @ipr_cmd:    ipr command struct
1062 * @timeout_func:       function to invoke if command times out
1063 * @timeout:    timeout
1064 *
1065 * Return value:
1066 *      none
1067 **/
1068static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1069                                  void (*timeout_func) (struct timer_list *),
1070                                  u32 timeout)
1071{
1072        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1073
1074        init_completion(&ipr_cmd->completion);
1075        ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1076
1077        spin_unlock_irq(ioa_cfg->host->host_lock);
1078        wait_for_completion(&ipr_cmd->completion);
1079        spin_lock_irq(ioa_cfg->host->host_lock);
1080}
1081
1082static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1083{
1084        unsigned int hrrq;
1085
1086        if (ioa_cfg->hrrq_num == 1)
1087                hrrq = 0;
1088        else {
1089                hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1090                hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1091        }
1092        return hrrq;
1093}
1094
1095/**
1096 * ipr_send_hcam - Send an HCAM to the adapter.
1097 * @ioa_cfg:    ioa config struct
1098 * @type:               HCAM type
1099 * @hostrcb:    hostrcb struct
1100 *
1101 * This function will send a Host Controlled Async command to the adapter.
1102 * If HCAMs are currently not allowed to be issued to the adapter, it will
1103 * place the hostrcb on the free queue.
1104 *
1105 * Return value:
1106 *      none
1107 **/
1108static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1109                          struct ipr_hostrcb *hostrcb)
1110{
1111        struct ipr_cmnd *ipr_cmd;
1112        struct ipr_ioarcb *ioarcb;
1113
1114        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1115                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1116                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1117                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1118
1119                ipr_cmd->u.hostrcb = hostrcb;
1120                ioarcb = &ipr_cmd->ioarcb;
1121
1122                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1123                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1124                ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1125                ioarcb->cmd_pkt.cdb[1] = type;
1126                ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1127                ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1128
1129                ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1130                               sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1131
1132                if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1133                        ipr_cmd->done = ipr_process_ccn;
1134                else
1135                        ipr_cmd->done = ipr_process_error;
1136
1137                ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1138
1139                ipr_send_command(ipr_cmd);
1140        } else {
1141                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1142        }
1143}
1144
1145/**
1146 * ipr_update_ata_class - Update the ata class in the resource entry
1147 * @res:        resource entry struct
1148 * @proto:      cfgte device bus protocol value
1149 *
1150 * Return value:
1151 *      none
1152 **/
1153static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1154{
1155        switch (proto) {
1156        case IPR_PROTO_SATA:
1157        case IPR_PROTO_SAS_STP:
1158                res->ata_class = ATA_DEV_ATA;
1159                break;
1160        case IPR_PROTO_SATA_ATAPI:
1161        case IPR_PROTO_SAS_STP_ATAPI:
1162                res->ata_class = ATA_DEV_ATAPI;
1163                break;
1164        default:
1165                res->ata_class = ATA_DEV_UNKNOWN;
1166                break;
1167        };
1168}
1169
1170/**
1171 * ipr_init_res_entry - Initialize a resource entry struct.
1172 * @res:        resource entry struct
1173 * @cfgtew:     config table entry wrapper struct
1174 *
1175 * Return value:
1176 *      none
1177 **/
1178static void ipr_init_res_entry(struct ipr_resource_entry *res,
1179                               struct ipr_config_table_entry_wrapper *cfgtew)
1180{
1181        int found = 0;
1182        unsigned int proto;
1183        struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1184        struct ipr_resource_entry *gscsi_res = NULL;
1185
1186        res->needs_sync_complete = 0;
1187        res->in_erp = 0;
1188        res->add_to_ml = 0;
1189        res->del_from_ml = 0;
1190        res->resetting_device = 0;
1191        res->reset_occurred = 0;
1192        res->sdev = NULL;
1193        res->sata_port = NULL;
1194
1195        if (ioa_cfg->sis64) {
1196                proto = cfgtew->u.cfgte64->proto;
1197                res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1198                res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1199                res->qmodel = IPR_QUEUEING_MODEL64(res);
1200                res->type = cfgtew->u.cfgte64->res_type;
1201
1202                memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1203                        sizeof(res->res_path));
1204
1205                res->bus = 0;
1206                memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1207                        sizeof(res->dev_lun.scsi_lun));
1208                res->lun = scsilun_to_int(&res->dev_lun);
1209
1210                if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1211                        list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1212                                if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1213                                        found = 1;
1214                                        res->target = gscsi_res->target;
1215                                        break;
1216                                }
1217                        }
1218                        if (!found) {
1219                                res->target = find_first_zero_bit(ioa_cfg->target_ids,
1220                                                                  ioa_cfg->max_devs_supported);
1221                                set_bit(res->target, ioa_cfg->target_ids);
1222                        }
1223                } else if (res->type == IPR_RES_TYPE_IOAFP) {
1224                        res->bus = IPR_IOAFP_VIRTUAL_BUS;
1225                        res->target = 0;
1226                } else if (res->type == IPR_RES_TYPE_ARRAY) {
1227                        res->bus = IPR_ARRAY_VIRTUAL_BUS;
1228                        res->target = find_first_zero_bit(ioa_cfg->array_ids,
1229                                                          ioa_cfg->max_devs_supported);
1230                        set_bit(res->target, ioa_cfg->array_ids);
1231                } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1232                        res->bus = IPR_VSET_VIRTUAL_BUS;
1233                        res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1234                                                          ioa_cfg->max_devs_supported);
1235                        set_bit(res->target, ioa_cfg->vset_ids);
1236                } else {
1237                        res->target = find_first_zero_bit(ioa_cfg->target_ids,
1238                                                          ioa_cfg->max_devs_supported);
1239                        set_bit(res->target, ioa_cfg->target_ids);
1240                }
1241        } else {
1242                proto = cfgtew->u.cfgte->proto;
1243                res->qmodel = IPR_QUEUEING_MODEL(res);
1244                res->flags = cfgtew->u.cfgte->flags;
1245                if (res->flags & IPR_IS_IOA_RESOURCE)
1246                        res->type = IPR_RES_TYPE_IOAFP;
1247                else
1248                        res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1249
1250                res->bus = cfgtew->u.cfgte->res_addr.bus;
1251                res->target = cfgtew->u.cfgte->res_addr.target;
1252                res->lun = cfgtew->u.cfgte->res_addr.lun;
1253                res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1254        }
1255
1256        ipr_update_ata_class(res, proto);
1257}
1258
1259/**
1260 * ipr_is_same_device - Determine if two devices are the same.
1261 * @res:        resource entry struct
1262 * @cfgtew:     config table entry wrapper struct
1263 *
1264 * Return value:
1265 *      1 if the devices are the same / 0 otherwise
1266 **/
1267static int ipr_is_same_device(struct ipr_resource_entry *res,
1268                              struct ipr_config_table_entry_wrapper *cfgtew)
1269{
1270        if (res->ioa_cfg->sis64) {
1271                if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1272                                        sizeof(cfgtew->u.cfgte64->dev_id)) &&
1273                        !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274                                        sizeof(cfgtew->u.cfgte64->lun))) {
1275                        return 1;
1276                }
1277        } else {
1278                if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1279                    res->target == cfgtew->u.cfgte->res_addr.target &&
1280                    res->lun == cfgtew->u.cfgte->res_addr.lun)
1281                        return 1;
1282        }
1283
1284        return 0;
1285}
1286
1287/**
1288 * __ipr_format_res_path - Format the resource path for printing.
1289 * @res_path:   resource path
1290 * @buf:        buffer
1291 * @len:        length of buffer provided
1292 *
1293 * Return value:
1294 *      pointer to buffer
1295 **/
1296static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1297{
1298        int i;
1299        char *p = buffer;
1300
1301        *p = '\0';
1302        p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
1303        for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1304                p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
1305
1306        return buffer;
1307}
1308
1309/**
1310 * ipr_format_res_path - Format the resource path for printing.
1311 * @ioa_cfg:    ioa config struct
1312 * @res_path:   resource path
1313 * @buf:        buffer
1314 * @len:        length of buffer provided
1315 *
1316 * Return value:
1317 *      pointer to buffer
1318 **/
1319static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1320                                 u8 *res_path, char *buffer, int len)
1321{
1322        char *p = buffer;
1323
1324        *p = '\0';
1325        p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1326        __ipr_format_res_path(res_path, p, len - (buffer - p));
1327        return buffer;
1328}
1329
1330/**
1331 * ipr_update_res_entry - Update the resource entry.
1332 * @res:        resource entry struct
1333 * @cfgtew:     config table entry wrapper struct
1334 *
1335 * Return value:
1336 *      none
1337 **/
1338static void ipr_update_res_entry(struct ipr_resource_entry *res,
1339                                 struct ipr_config_table_entry_wrapper *cfgtew)
1340{
1341        char buffer[IPR_MAX_RES_PATH_LENGTH];
1342        unsigned int proto;
1343        int new_path = 0;
1344
1345        if (res->ioa_cfg->sis64) {
1346                res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1347                res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1348                res->type = cfgtew->u.cfgte64->res_type;
1349
1350                memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1351                        sizeof(struct ipr_std_inq_data));
1352
1353                res->qmodel = IPR_QUEUEING_MODEL64(res);
1354                proto = cfgtew->u.cfgte64->proto;
1355                res->res_handle = cfgtew->u.cfgte64->res_handle;
1356                res->dev_id = cfgtew->u.cfgte64->dev_id;
1357
1358                memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1359                        sizeof(res->dev_lun.scsi_lun));
1360
1361                if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1362                                        sizeof(res->res_path))) {
1363                        memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1364                                sizeof(res->res_path));
1365                        new_path = 1;
1366                }
1367
1368                if (res->sdev && new_path)
1369                        sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1370                                    ipr_format_res_path(res->ioa_cfg,
1371                                        res->res_path, buffer, sizeof(buffer)));
1372        } else {
1373                res->flags = cfgtew->u.cfgte->flags;
1374                if (res->flags & IPR_IS_IOA_RESOURCE)
1375                        res->type = IPR_RES_TYPE_IOAFP;
1376                else
1377                        res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1378
1379                memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1380                        sizeof(struct ipr_std_inq_data));
1381
1382                res->qmodel = IPR_QUEUEING_MODEL(res);
1383                proto = cfgtew->u.cfgte->proto;
1384                res->res_handle = cfgtew->u.cfgte->res_handle;
1385        }
1386
1387        ipr_update_ata_class(res, proto);
1388}
1389
1390/**
1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1392 *                        for the resource.
1393 * @res:        resource entry struct
1394 * @cfgtew:     config table entry wrapper struct
1395 *
1396 * Return value:
1397 *      none
1398 **/
1399static void ipr_clear_res_target(struct ipr_resource_entry *res)
1400{
1401        struct ipr_resource_entry *gscsi_res = NULL;
1402        struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1403
1404        if (!ioa_cfg->sis64)
1405                return;
1406
1407        if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1408                clear_bit(res->target, ioa_cfg->array_ids);
1409        else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1410                clear_bit(res->target, ioa_cfg->vset_ids);
1411        else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1412                list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1413                        if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1414                                return;
1415                clear_bit(res->target, ioa_cfg->target_ids);
1416
1417        } else if (res->bus == 0)
1418                clear_bit(res->target, ioa_cfg->target_ids);
1419}
1420
1421/**
1422 * ipr_handle_config_change - Handle a config change from the adapter
1423 * @ioa_cfg:    ioa config struct
1424 * @hostrcb:    hostrcb
1425 *
1426 * Return value:
1427 *      none
1428 **/
1429static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1430                                     struct ipr_hostrcb *hostrcb)
1431{
1432        struct ipr_resource_entry *res = NULL;
1433        struct ipr_config_table_entry_wrapper cfgtew;
1434        __be32 cc_res_handle;
1435
1436        u32 is_ndn = 1;
1437
1438        if (ioa_cfg->sis64) {
1439                cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1440                cc_res_handle = cfgtew.u.cfgte64->res_handle;
1441        } else {
1442                cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1443                cc_res_handle = cfgtew.u.cfgte->res_handle;
1444        }
1445
1446        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1447                if (res->res_handle == cc_res_handle) {
1448                        is_ndn = 0;
1449                        break;
1450                }
1451        }
1452
1453        if (is_ndn) {
1454                if (list_empty(&ioa_cfg->free_res_q)) {
1455                        ipr_send_hcam(ioa_cfg,
1456                                      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1457                                      hostrcb);
1458                        return;
1459                }
1460
1461                res = list_entry(ioa_cfg->free_res_q.next,
1462                                 struct ipr_resource_entry, queue);
1463
1464                list_del(&res->queue);
1465                ipr_init_res_entry(res, &cfgtew);
1466                list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1467        }
1468
1469        ipr_update_res_entry(res, &cfgtew);
1470
1471        if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1472                if (res->sdev) {
1473                        res->del_from_ml = 1;
1474                        res->res_handle = IPR_INVALID_RES_HANDLE;
1475                        schedule_work(&ioa_cfg->work_q);
1476                } else {
1477                        ipr_clear_res_target(res);
1478                        list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1479                }
1480        } else if (!res->sdev || res->del_from_ml) {
1481                res->add_to_ml = 1;
1482                schedule_work(&ioa_cfg->work_q);
1483        }
1484
1485        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1486}
1487
1488/**
1489 * ipr_process_ccn - Op done function for a CCN.
1490 * @ipr_cmd:    ipr command struct
1491 *
1492 * This function is the op done function for a configuration
1493 * change notification host controlled async from the adapter.
1494 *
1495 * Return value:
1496 *      none
1497 **/
1498static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1499{
1500        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1501        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1502        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1503
1504        list_del_init(&hostrcb->queue);
1505        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1506
1507        if (ioasc) {
1508                if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1509                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1510                        dev_err(&ioa_cfg->pdev->dev,
1511                                "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1512
1513                ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1514        } else {
1515                ipr_handle_config_change(ioa_cfg, hostrcb);
1516        }
1517}
1518
1519/**
1520 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1521 * @i:          index into buffer
1522 * @buf:                string to modify
1523 *
1524 * This function will strip all trailing whitespace, pad the end
1525 * of the string with a single space, and NULL terminate the string.
1526 *
1527 * Return value:
1528 *      new length of string
1529 **/
1530static int strip_and_pad_whitespace(int i, char *buf)
1531{
1532        while (i && buf[i] == ' ')
1533                i--;
1534        buf[i+1] = ' ';
1535        buf[i+2] = '\0';
1536        return i + 2;
1537}
1538
1539/**
1540 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1541 * @prefix:             string to print at start of printk
1542 * @hostrcb:    hostrcb pointer
1543 * @vpd:                vendor/product id/sn struct
1544 *
1545 * Return value:
1546 *      none
1547 **/
1548static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1549                                struct ipr_vpd *vpd)
1550{
1551        char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1552        int i = 0;
1553
1554        memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1555        i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1556
1557        memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1558        i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1559
1560        memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1561        buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1562
1563        ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1564}
1565
1566/**
1567 * ipr_log_vpd - Log the passed VPD to the error log.
1568 * @vpd:                vendor/product id/sn struct
1569 *
1570 * Return value:
1571 *      none
1572 **/
1573static void ipr_log_vpd(struct ipr_vpd *vpd)
1574{
1575        char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1576                    + IPR_SERIAL_NUM_LEN];
1577
1578        memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1579        memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1580               IPR_PROD_ID_LEN);
1581        buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1582        ipr_err("Vendor/Product ID: %s\n", buffer);
1583
1584        memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1585        buffer[IPR_SERIAL_NUM_LEN] = '\0';
1586        ipr_err("    Serial Number: %s\n", buffer);
1587}
1588
1589/**
1590 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1591 * @prefix:             string to print at start of printk
1592 * @hostrcb:    hostrcb pointer
1593 * @vpd:                vendor/product id/sn/wwn struct
1594 *
1595 * Return value:
1596 *      none
1597 **/
1598static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1599                                    struct ipr_ext_vpd *vpd)
1600{
1601        ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1602        ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1603                     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1604}
1605
1606/**
1607 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1608 * @vpd:                vendor/product id/sn/wwn struct
1609 *
1610 * Return value:
1611 *      none
1612 **/
1613static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1614{
1615        ipr_log_vpd(&vpd->vpd);
1616        ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1617                be32_to_cpu(vpd->wwid[1]));
1618}
1619
1620/**
1621 * ipr_log_enhanced_cache_error - Log a cache error.
1622 * @ioa_cfg:    ioa config struct
1623 * @hostrcb:    hostrcb struct
1624 *
1625 * Return value:
1626 *      none
1627 **/
1628static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1629                                         struct ipr_hostrcb *hostrcb)
1630{
1631        struct ipr_hostrcb_type_12_error *error;
1632
1633        if (ioa_cfg->sis64)
1634                error = &hostrcb->hcam.u.error64.u.type_12_error;
1635        else
1636                error = &hostrcb->hcam.u.error.u.type_12_error;
1637
1638        ipr_err("-----Current Configuration-----\n");
1639        ipr_err("Cache Directory Card Information:\n");
1640        ipr_log_ext_vpd(&error->ioa_vpd);
1641        ipr_err("Adapter Card Information:\n");
1642        ipr_log_ext_vpd(&error->cfc_vpd);
1643
1644        ipr_err("-----Expected Configuration-----\n");
1645        ipr_err("Cache Directory Card Information:\n");
1646        ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1647        ipr_err("Adapter Card Information:\n");
1648        ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1649
1650        ipr_err("Additional IOA Data: %08X %08X %08X\n",
1651                     be32_to_cpu(error->ioa_data[0]),
1652                     be32_to_cpu(error->ioa_data[1]),
1653                     be32_to_cpu(error->ioa_data[2]));
1654}
1655
1656/**
1657 * ipr_log_cache_error - Log a cache error.
1658 * @ioa_cfg:    ioa config struct
1659 * @hostrcb:    hostrcb struct
1660 *
1661 * Return value:
1662 *      none
1663 **/
1664static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1665                                struct ipr_hostrcb *hostrcb)
1666{
1667        struct ipr_hostrcb_type_02_error *error =
1668                &hostrcb->hcam.u.error.u.type_02_error;
1669
1670        ipr_err("-----Current Configuration-----\n");
1671        ipr_err("Cache Directory Card Information:\n");
1672        ipr_log_vpd(&error->ioa_vpd);
1673        ipr_err("Adapter Card Information:\n");
1674        ipr_log_vpd(&error->cfc_vpd);
1675
1676        ipr_err("-----Expected Configuration-----\n");
1677        ipr_err("Cache Directory Card Information:\n");
1678        ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1679        ipr_err("Adapter Card Information:\n");
1680        ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1681
1682        ipr_err("Additional IOA Data: %08X %08X %08X\n",
1683                     be32_to_cpu(error->ioa_data[0]),
1684                     be32_to_cpu(error->ioa_data[1]),
1685                     be32_to_cpu(error->ioa_data[2]));
1686}
1687
1688/**
1689 * ipr_log_enhanced_config_error - Log a configuration error.
1690 * @ioa_cfg:    ioa config struct
1691 * @hostrcb:    hostrcb struct
1692 *
1693 * Return value:
1694 *      none
1695 **/
1696static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1697                                          struct ipr_hostrcb *hostrcb)
1698{
1699        int errors_logged, i;
1700        struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1701        struct ipr_hostrcb_type_13_error *error;
1702
1703        error = &hostrcb->hcam.u.error.u.type_13_error;
1704        errors_logged = be32_to_cpu(error->errors_logged);
1705
1706        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1707                be32_to_cpu(error->errors_detected), errors_logged);
1708
1709        dev_entry = error->dev;
1710
1711        for (i = 0; i < errors_logged; i++, dev_entry++) {
1712                ipr_err_separator;
1713
1714                ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1715                ipr_log_ext_vpd(&dev_entry->vpd);
1716
1717                ipr_err("-----New Device Information-----\n");
1718                ipr_log_ext_vpd(&dev_entry->new_vpd);
1719
1720                ipr_err("Cache Directory Card Information:\n");
1721                ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1722
1723                ipr_err("Adapter Card Information:\n");
1724                ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1725        }
1726}
1727
1728/**
1729 * ipr_log_sis64_config_error - Log a device error.
1730 * @ioa_cfg:    ioa config struct
1731 * @hostrcb:    hostrcb struct
1732 *
1733 * Return value:
1734 *      none
1735 **/
1736static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1737                                       struct ipr_hostrcb *hostrcb)
1738{
1739        int errors_logged, i;
1740        struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1741        struct ipr_hostrcb_type_23_error *error;
1742        char buffer[IPR_MAX_RES_PATH_LENGTH];
1743
1744        error = &hostrcb->hcam.u.error64.u.type_23_error;
1745        errors_logged = be32_to_cpu(error->errors_logged);
1746
1747        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1748                be32_to_cpu(error->errors_detected), errors_logged);
1749
1750        dev_entry = error->dev;
1751
1752        for (i = 0; i < errors_logged; i++, dev_entry++) {
1753                ipr_err_separator;
1754
1755                ipr_err("Device %d : %s", i + 1,
1756                        __ipr_format_res_path(dev_entry->res_path,
1757                                              buffer, sizeof(buffer)));
1758                ipr_log_ext_vpd(&dev_entry->vpd);
1759
1760                ipr_err("-----New Device Information-----\n");
1761                ipr_log_ext_vpd(&dev_entry->new_vpd);
1762
1763                ipr_err("Cache Directory Card Information:\n");
1764                ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1765
1766                ipr_err("Adapter Card Information:\n");
1767                ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1768        }
1769}
1770
1771/**
1772 * ipr_log_config_error - Log a configuration error.
1773 * @ioa_cfg:    ioa config struct
1774 * @hostrcb:    hostrcb struct
1775 *
1776 * Return value:
1777 *      none
1778 **/
1779static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1780                                 struct ipr_hostrcb *hostrcb)
1781{
1782        int errors_logged, i;
1783        struct ipr_hostrcb_device_data_entry *dev_entry;
1784        struct ipr_hostrcb_type_03_error *error;
1785
1786        error = &hostrcb->hcam.u.error.u.type_03_error;
1787        errors_logged = be32_to_cpu(error->errors_logged);
1788
1789        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1790                be32_to_cpu(error->errors_detected), errors_logged);
1791
1792        dev_entry = error->dev;
1793
1794        for (i = 0; i < errors_logged; i++, dev_entry++) {
1795                ipr_err_separator;
1796
1797                ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1798                ipr_log_vpd(&dev_entry->vpd);
1799
1800                ipr_err("-----New Device Information-----\n");
1801                ipr_log_vpd(&dev_entry->new_vpd);
1802
1803                ipr_err("Cache Directory Card Information:\n");
1804                ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1805
1806                ipr_err("Adapter Card Information:\n");
1807                ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1808
1809                ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1810                        be32_to_cpu(dev_entry->ioa_data[0]),
1811                        be32_to_cpu(dev_entry->ioa_data[1]),
1812                        be32_to_cpu(dev_entry->ioa_data[2]),
1813                        be32_to_cpu(dev_entry->ioa_data[3]),
1814                        be32_to_cpu(dev_entry->ioa_data[4]));
1815        }
1816}
1817
1818/**
1819 * ipr_log_enhanced_array_error - Log an array configuration error.
1820 * @ioa_cfg:    ioa config struct
1821 * @hostrcb:    hostrcb struct
1822 *
1823 * Return value:
1824 *      none
1825 **/
1826static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1827                                         struct ipr_hostrcb *hostrcb)
1828{
1829        int i, num_entries;
1830        struct ipr_hostrcb_type_14_error *error;
1831        struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1832        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1833
1834        error = &hostrcb->hcam.u.error.u.type_14_error;
1835
1836        ipr_err_separator;
1837
1838        ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1839                error->protection_level,
1840                ioa_cfg->host->host_no,
1841                error->last_func_vset_res_addr.bus,
1842                error->last_func_vset_res_addr.target,
1843                error->last_func_vset_res_addr.lun);
1844
1845        ipr_err_separator;
1846
1847        array_entry = error->array_member;
1848        num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1849                            ARRAY_SIZE(error->array_member));
1850
1851        for (i = 0; i < num_entries; i++, array_entry++) {
1852                if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1853                        continue;
1854
1855                if (be32_to_cpu(error->exposed_mode_adn) == i)
1856                        ipr_err("Exposed Array Member %d:\n", i);
1857                else
1858                        ipr_err("Array Member %d:\n", i);
1859
1860                ipr_log_ext_vpd(&array_entry->vpd);
1861                ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1862                ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1863                                 "Expected Location");
1864
1865                ipr_err_separator;
1866        }
1867}
1868
1869/**
1870 * ipr_log_array_error - Log an array configuration error.
1871 * @ioa_cfg:    ioa config struct
1872 * @hostrcb:    hostrcb struct
1873 *
1874 * Return value:
1875 *      none
1876 **/
1877static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1878                                struct ipr_hostrcb *hostrcb)
1879{
1880        int i;
1881        struct ipr_hostrcb_type_04_error *error;
1882        struct ipr_hostrcb_array_data_entry *array_entry;
1883        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1884
1885        error = &hostrcb->hcam.u.error.u.type_04_error;
1886
1887        ipr_err_separator;
1888
1889        ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1890                error->protection_level,
1891                ioa_cfg->host->host_no,
1892                error->last_func_vset_res_addr.bus,
1893                error->last_func_vset_res_addr.target,
1894                error->last_func_vset_res_addr.lun);
1895
1896        ipr_err_separator;
1897
1898        array_entry = error->array_member;
1899
1900        for (i = 0; i < 18; i++) {
1901                if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1902                        continue;
1903
1904                if (be32_to_cpu(error->exposed_mode_adn) == i)
1905                        ipr_err("Exposed Array Member %d:\n", i);
1906                else
1907                        ipr_err("Array Member %d:\n", i);
1908
1909                ipr_log_vpd(&array_entry->vpd);
1910
1911                ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1912                ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1913                                 "Expected Location");
1914
1915                ipr_err_separator;
1916
1917                if (i == 9)
1918                        array_entry = error->array_member2;
1919                else
1920                        array_entry++;
1921        }
1922}
1923
1924/**
1925 * ipr_log_hex_data - Log additional hex IOA error data.
1926 * @ioa_cfg:    ioa config struct
1927 * @data:               IOA error data
1928 * @len:                data length
1929 *
1930 * Return value:
1931 *      none
1932 **/
1933static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1934{
1935        int i;
1936
1937        if (len == 0)
1938                return;
1939
1940        if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1941                len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1942
1943        for (i = 0; i < len / 4; i += 4) {
1944                ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1945                        be32_to_cpu(data[i]),
1946                        be32_to_cpu(data[i+1]),
1947                        be32_to_cpu(data[i+2]),
1948                        be32_to_cpu(data[i+3]));
1949        }
1950}
1951
1952/**
1953 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1954 * @ioa_cfg:    ioa config struct
1955 * @hostrcb:    hostrcb struct
1956 *
1957 * Return value:
1958 *      none
1959 **/
1960static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1961                                            struct ipr_hostrcb *hostrcb)
1962{
1963        struct ipr_hostrcb_type_17_error *error;
1964
1965        if (ioa_cfg->sis64)
1966                error = &hostrcb->hcam.u.error64.u.type_17_error;
1967        else
1968                error = &hostrcb->hcam.u.error.u.type_17_error;
1969
1970        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1971        strim(error->failure_reason);
1972
1973        ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1974                     be32_to_cpu(hostrcb->hcam.u.error.prc));
1975        ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1976        ipr_log_hex_data(ioa_cfg, error->data,
1977                         be32_to_cpu(hostrcb->hcam.length) -
1978                         (offsetof(struct ipr_hostrcb_error, u) +
1979                          offsetof(struct ipr_hostrcb_type_17_error, data)));
1980}
1981
1982/**
1983 * ipr_log_dual_ioa_error - Log a dual adapter error.
1984 * @ioa_cfg:    ioa config struct
1985 * @hostrcb:    hostrcb struct
1986 *
1987 * Return value:
1988 *      none
1989 **/
1990static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1991                                   struct ipr_hostrcb *hostrcb)
1992{
1993        struct ipr_hostrcb_type_07_error *error;
1994
1995        error = &hostrcb->hcam.u.error.u.type_07_error;
1996        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1997        strim(error->failure_reason);
1998
1999        ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2000                     be32_to_cpu(hostrcb->hcam.u.error.prc));
2001        ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2002        ipr_log_hex_data(ioa_cfg, error->data,
2003                         be32_to_cpu(hostrcb->hcam.length) -
2004                         (offsetof(struct ipr_hostrcb_error, u) +
2005                          offsetof(struct ipr_hostrcb_type_07_error, data)));
2006}
2007
2008static const struct {
2009        u8 active;
2010        char *desc;
2011} path_active_desc[] = {
2012        { IPR_PATH_NO_INFO, "Path" },
2013        { IPR_PATH_ACTIVE, "Active path" },
2014        { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2015};
2016
2017static const struct {
2018        u8 state;
2019        char *desc;
2020} path_state_desc[] = {
2021        { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2022        { IPR_PATH_HEALTHY, "is healthy" },
2023        { IPR_PATH_DEGRADED, "is degraded" },
2024        { IPR_PATH_FAILED, "is failed" }
2025};
2026
2027/**
2028 * ipr_log_fabric_path - Log a fabric path error
2029 * @hostrcb:    hostrcb struct
2030 * @fabric:             fabric descriptor
2031 *
2032 * Return value:
2033 *      none
2034 **/
2035static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2036                                struct ipr_hostrcb_fabric_desc *fabric)
2037{
2038        int i, j;
2039        u8 path_state = fabric->path_state;
2040        u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2041        u8 state = path_state & IPR_PATH_STATE_MASK;
2042
2043        for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2044                if (path_active_desc[i].active != active)
2045                        continue;
2046
2047                for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2048                        if (path_state_desc[j].state != state)
2049                                continue;
2050
2051                        if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2052                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2053                                             path_active_desc[i].desc, path_state_desc[j].desc,
2054                                             fabric->ioa_port);
2055                        } else if (fabric->cascaded_expander == 0xff) {
2056                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2057                                             path_active_desc[i].desc, path_state_desc[j].desc,
2058                                             fabric->ioa_port, fabric->phy);
2059                        } else if (fabric->phy == 0xff) {
2060                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2061                                             path_active_desc[i].desc, path_state_desc[j].desc,
2062                                             fabric->ioa_port, fabric->cascaded_expander);
2063                        } else {
2064                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2065                                             path_active_desc[i].desc, path_state_desc[j].desc,
2066                                             fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2067                        }
2068                        return;
2069                }
2070        }
2071
2072        ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2073                fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2074}
2075
2076/**
2077 * ipr_log64_fabric_path - Log a fabric path error
2078 * @hostrcb:    hostrcb struct
2079 * @fabric:             fabric descriptor
2080 *
2081 * Return value:
2082 *      none
2083 **/
2084static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2085                                  struct ipr_hostrcb64_fabric_desc *fabric)
2086{
2087        int i, j;
2088        u8 path_state = fabric->path_state;
2089        u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2090        u8 state = path_state & IPR_PATH_STATE_MASK;
2091        char buffer[IPR_MAX_RES_PATH_LENGTH];
2092
2093        for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2094                if (path_active_desc[i].active != active)
2095                        continue;
2096
2097                for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2098                        if (path_state_desc[j].state != state)
2099                                continue;
2100
2101                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2102                                     path_active_desc[i].desc, path_state_desc[j].desc,
2103                                     ipr_format_res_path(hostrcb->ioa_cfg,
2104                                                fabric->res_path,
2105                                                buffer, sizeof(buffer)));
2106                        return;
2107                }
2108        }
2109
2110        ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2111                ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2112                                    buffer, sizeof(buffer)));
2113}
2114
2115static const struct {
2116        u8 type;
2117        char *desc;
2118} path_type_desc[] = {
2119        { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2120        { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2121        { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2122        { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2123};
2124
2125static const struct {
2126        u8 status;
2127        char *desc;
2128} path_status_desc[] = {
2129        { IPR_PATH_CFG_NO_PROB, "Functional" },
2130        { IPR_PATH_CFG_DEGRADED, "Degraded" },
2131        { IPR_PATH_CFG_FAILED, "Failed" },
2132        { IPR_PATH_CFG_SUSPECT, "Suspect" },
2133        { IPR_PATH_NOT_DETECTED, "Missing" },
2134        { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2135};
2136
2137static const char *link_rate[] = {
2138        "unknown",
2139        "disabled",
2140        "phy reset problem",
2141        "spinup hold",
2142        "port selector",
2143        "unknown",
2144        "unknown",
2145        "unknown",
2146        "1.5Gbps",
2147        "3.0Gbps",
2148        "unknown",
2149        "unknown",
2150        "unknown",
2151        "unknown",
2152        "unknown",
2153        "unknown"
2154};
2155
2156/**
2157 * ipr_log_path_elem - Log a fabric path element.
2158 * @hostrcb:    hostrcb struct
2159 * @cfg:                fabric path element struct
2160 *
2161 * Return value:
2162 *      none
2163 **/
2164static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2165                              struct ipr_hostrcb_config_element *cfg)
2166{
2167        int i, j;
2168        u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2169        u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2170
2171        if (type == IPR_PATH_CFG_NOT_EXIST)
2172                return;
2173
2174        for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2175                if (path_type_desc[i].type != type)
2176                        continue;
2177
2178                for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2179                        if (path_status_desc[j].status != status)
2180                                continue;
2181
2182                        if (type == IPR_PATH_CFG_IOA_PORT) {
2183                                ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2184                                             path_status_desc[j].desc, path_type_desc[i].desc,
2185                                             cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2186                                             be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2187                        } else {
2188                                if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2189                                        ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2190                                                     path_status_desc[j].desc, path_type_desc[i].desc,
2191                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2192                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2193                                } else if (cfg->cascaded_expander == 0xff) {
2194                                        ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2195                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2196                                                     path_type_desc[i].desc, cfg->phy,
2197                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2198                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2199                                } else if (cfg->phy == 0xff) {
2200                                        ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2201                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2202                                                     path_type_desc[i].desc, cfg->cascaded_expander,
2203                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2204                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2205                                } else {
2206                                        ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2207                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2208                                                     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2209                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2210                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2211                                }
2212                        }
2213                        return;
2214                }
2215        }
2216
2217        ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2218                     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2219                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2220                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2221}
2222
2223/**
2224 * ipr_log64_path_elem - Log a fabric path element.
2225 * @hostrcb:    hostrcb struct
2226 * @cfg:                fabric path element struct
2227 *
2228 * Return value:
2229 *      none
2230 **/
2231static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2232                                struct ipr_hostrcb64_config_element *cfg)
2233{
2234        int i, j;
2235        u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2236        u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2237        u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2238        char buffer[IPR_MAX_RES_PATH_LENGTH];
2239
2240        if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2241                return;
2242
2243        for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2244                if (path_type_desc[i].type != type)
2245                        continue;
2246
2247                for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2248                        if (path_status_desc[j].status != status)
2249                                continue;
2250
2251                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2252                                     path_status_desc[j].desc, path_type_desc[i].desc,
2253                                     ipr_format_res_path(hostrcb->ioa_cfg,
2254                                        cfg->res_path, buffer, sizeof(buffer)),
2255                                        link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2256                                        be32_to_cpu(cfg->wwid[0]),
2257                                        be32_to_cpu(cfg->wwid[1]));
2258                        return;
2259                }
2260        }
2261        ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2262                     "WWN=%08X%08X\n", cfg->type_status,
2263                     ipr_format_res_path(hostrcb->ioa_cfg,
2264                        cfg->res_path, buffer, sizeof(buffer)),
2265                        link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2266                        be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2267}
2268
2269/**
2270 * ipr_log_fabric_error - Log a fabric error.
2271 * @ioa_cfg:    ioa config struct
2272 * @hostrcb:    hostrcb struct
2273 *
2274 * Return value:
2275 *      none
2276 **/
2277static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2278                                 struct ipr_hostrcb *hostrcb)
2279{
2280        struct ipr_hostrcb_type_20_error *error;
2281        struct ipr_hostrcb_fabric_desc *fabric;
2282        struct ipr_hostrcb_config_element *cfg;
2283        int i, add_len;
2284
2285        error = &hostrcb->hcam.u.error.u.type_20_error;
2286        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2287        ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2288
2289        add_len = be32_to_cpu(hostrcb->hcam.length) -
2290                (offsetof(struct ipr_hostrcb_error, u) +
2291                 offsetof(struct ipr_hostrcb_type_20_error, desc));
2292
2293        for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2294                ipr_log_fabric_path(hostrcb, fabric);
2295                for_each_fabric_cfg(fabric, cfg)
2296                        ipr_log_path_elem(hostrcb, cfg);
2297
2298                add_len -= be16_to_cpu(fabric->length);
2299                fabric = (struct ipr_hostrcb_fabric_desc *)
2300                        ((unsigned long)fabric + be16_to_cpu(fabric->length));
2301        }
2302
2303        ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2304}
2305
2306/**
2307 * ipr_log_sis64_array_error - Log a sis64 array error.
2308 * @ioa_cfg:    ioa config struct
2309 * @hostrcb:    hostrcb struct
2310 *
2311 * Return value:
2312 *      none
2313 **/
2314static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2315                                      struct ipr_hostrcb *hostrcb)
2316{
2317        int i, num_entries;
2318        struct ipr_hostrcb_type_24_error *error;
2319        struct ipr_hostrcb64_array_data_entry *array_entry;
2320        char buffer[IPR_MAX_RES_PATH_LENGTH];
2321        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2322
2323        error = &hostrcb->hcam.u.error64.u.type_24_error;
2324
2325        ipr_err_separator;
2326
2327        ipr_err("RAID %s Array Configuration: %s\n",
2328                error->protection_level,
2329                ipr_format_res_path(ioa_cfg, error->last_res_path,
2330                        buffer, sizeof(buffer)));
2331
2332        ipr_err_separator;
2333
2334        array_entry = error->array_member;
2335        num_entries = min_t(u32, error->num_entries,
2336                            ARRAY_SIZE(error->array_member));
2337
2338        for (i = 0; i < num_entries; i++, array_entry++) {
2339
2340                if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2341                        continue;
2342
2343                if (error->exposed_mode_adn == i)
2344                        ipr_err("Exposed Array Member %d:\n", i);
2345                else
2346                        ipr_err("Array Member %d:\n", i);
2347
2348                ipr_err("Array Member %d:\n", i);
2349                ipr_log_ext_vpd(&array_entry->vpd);
2350                ipr_err("Current Location: %s\n",
2351                         ipr_format_res_path(ioa_cfg, array_entry->res_path,
2352                                buffer, sizeof(buffer)));
2353                ipr_err("Expected Location: %s\n",
2354                         ipr_format_res_path(ioa_cfg,
2355                                array_entry->expected_res_path,
2356                                buffer, sizeof(buffer)));
2357
2358                ipr_err_separator;
2359        }
2360}
2361
2362/**
2363 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2364 * @ioa_cfg:    ioa config struct
2365 * @hostrcb:    hostrcb struct
2366 *
2367 * Return value:
2368 *      none
2369 **/
2370static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2371                                       struct ipr_hostrcb *hostrcb)
2372{
2373        struct ipr_hostrcb_type_30_error *error;
2374        struct ipr_hostrcb64_fabric_desc *fabric;
2375        struct ipr_hostrcb64_config_element *cfg;
2376        int i, add_len;
2377
2378        error = &hostrcb->hcam.u.error64.u.type_30_error;
2379
2380        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2381        ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2382
2383        add_len = be32_to_cpu(hostrcb->hcam.length) -
2384                (offsetof(struct ipr_hostrcb64_error, u) +
2385                 offsetof(struct ipr_hostrcb_type_30_error, desc));
2386
2387        for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2388                ipr_log64_fabric_path(hostrcb, fabric);
2389                for_each_fabric_cfg(fabric, cfg)
2390                        ipr_log64_path_elem(hostrcb, cfg);
2391
2392                add_len -= be16_to_cpu(fabric->length);
2393                fabric = (struct ipr_hostrcb64_fabric_desc *)
2394                        ((unsigned long)fabric + be16_to_cpu(fabric->length));
2395        }
2396
2397        ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2398}
2399
2400/**
2401 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2402 * @ioa_cfg:    ioa config struct
2403 * @hostrcb:    hostrcb struct
2404 *
2405 * Return value:
2406 *      none
2407 **/
2408static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2409                                       struct ipr_hostrcb *hostrcb)
2410{
2411        struct ipr_hostrcb_type_41_error *error;
2412
2413        error = &hostrcb->hcam.u.error64.u.type_41_error;
2414
2415        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2416        ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2417        ipr_log_hex_data(ioa_cfg, error->data,
2418                         be32_to_cpu(hostrcb->hcam.length) -
2419                         (offsetof(struct ipr_hostrcb_error, u) +
2420                          offsetof(struct ipr_hostrcb_type_41_error, data)));
2421}
2422/**
2423 * ipr_log_generic_error - Log an adapter error.
2424 * @ioa_cfg:    ioa config struct
2425 * @hostrcb:    hostrcb struct
2426 *
2427 * Return value:
2428 *      none
2429 **/
2430static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2431                                  struct ipr_hostrcb *hostrcb)
2432{
2433        ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2434                         be32_to_cpu(hostrcb->hcam.length));
2435}
2436
2437/**
2438 * ipr_log_sis64_device_error - Log a cache error.
2439 * @ioa_cfg:    ioa config struct
2440 * @hostrcb:    hostrcb struct
2441 *
2442 * Return value:
2443 *      none
2444 **/
2445static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2446                                         struct ipr_hostrcb *hostrcb)
2447{
2448        struct ipr_hostrcb_type_21_error *error;
2449        char buffer[IPR_MAX_RES_PATH_LENGTH];
2450
2451        error = &hostrcb->hcam.u.error64.u.type_21_error;
2452
2453        ipr_err("-----Failing Device Information-----\n");
2454        ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2455                be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2456                 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2457        ipr_err("Device Resource Path: %s\n",
2458                __ipr_format_res_path(error->res_path,
2459                                      buffer, sizeof(buffer)));
2460        error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2461        error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2462        ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2463        ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2464        ipr_err("SCSI Sense Data:\n");
2465        ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2466        ipr_err("SCSI Command Descriptor Block: \n");
2467        ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2468
2469        ipr_err("Additional IOA Data:\n");
2470        ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2471}
2472
2473/**
2474 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2475 * @ioasc:      IOASC
2476 *
2477 * This function will return the index of into the ipr_error_table
2478 * for the specified IOASC. If the IOASC is not in the table,
2479 * 0 will be returned, which points to the entry used for unknown errors.
2480 *
2481 * Return value:
2482 *      index into the ipr_error_table
2483 **/
2484static u32 ipr_get_error(u32 ioasc)
2485{
2486        int i;
2487
2488        for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2489                if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2490                        return i;
2491
2492        return 0;
2493}
2494
2495/**
2496 * ipr_handle_log_data - Log an adapter error.
2497 * @ioa_cfg:    ioa config struct
2498 * @hostrcb:    hostrcb struct
2499 *
2500 * This function logs an adapter error to the system.
2501 *
2502 * Return value:
2503 *      none
2504 **/
2505static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2506                                struct ipr_hostrcb *hostrcb)
2507{
2508        u32 ioasc;
2509        int error_index;
2510        struct ipr_hostrcb_type_21_error *error;
2511
2512        if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2513                return;
2514
2515        if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2516                dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2517
2518        if (ioa_cfg->sis64)
2519                ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2520        else
2521                ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2522
2523        if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2524            ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2525                /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2526                scsi_report_bus_reset(ioa_cfg->host,
2527                                      hostrcb->hcam.u.error.fd_res_addr.bus);
2528        }
2529
2530        error_index = ipr_get_error(ioasc);
2531
2532        if (!ipr_error_table[error_index].log_hcam)
2533                return;
2534
2535        if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2536            hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2537                error = &hostrcb->hcam.u.error64.u.type_21_error;
2538
2539                if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2540                        ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2541                                return;
2542        }
2543
2544        ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2545
2546        /* Set indication we have logged an error */
2547        ioa_cfg->errors_logged++;
2548
2549        if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2550                return;
2551        if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2552                hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2553
2554        switch (hostrcb->hcam.overlay_id) {
2555        case IPR_HOST_RCB_OVERLAY_ID_2:
2556                ipr_log_cache_error(ioa_cfg, hostrcb);
2557                break;
2558        case IPR_HOST_RCB_OVERLAY_ID_3:
2559                ipr_log_config_error(ioa_cfg, hostrcb);
2560                break;
2561        case IPR_HOST_RCB_OVERLAY_ID_4:
2562        case IPR_HOST_RCB_OVERLAY_ID_6:
2563                ipr_log_array_error(ioa_cfg, hostrcb);
2564                break;
2565        case IPR_HOST_RCB_OVERLAY_ID_7:
2566                ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2567                break;
2568        case IPR_HOST_RCB_OVERLAY_ID_12:
2569                ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2570                break;
2571        case IPR_HOST_RCB_OVERLAY_ID_13:
2572                ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2573                break;
2574        case IPR_HOST_RCB_OVERLAY_ID_14:
2575        case IPR_HOST_RCB_OVERLAY_ID_16:
2576                ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2577                break;
2578        case IPR_HOST_RCB_OVERLAY_ID_17:
2579                ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2580                break;
2581        case IPR_HOST_RCB_OVERLAY_ID_20:
2582                ipr_log_fabric_error(ioa_cfg, hostrcb);
2583                break;
2584        case IPR_HOST_RCB_OVERLAY_ID_21:
2585                ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2586                break;
2587        case IPR_HOST_RCB_OVERLAY_ID_23:
2588                ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2589                break;
2590        case IPR_HOST_RCB_OVERLAY_ID_24:
2591        case IPR_HOST_RCB_OVERLAY_ID_26:
2592                ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2593                break;
2594        case IPR_HOST_RCB_OVERLAY_ID_30:
2595                ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2596                break;
2597        case IPR_HOST_RCB_OVERLAY_ID_41:
2598                ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2599                break;
2600        case IPR_HOST_RCB_OVERLAY_ID_1:
2601        case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2602        default:
2603                ipr_log_generic_error(ioa_cfg, hostrcb);
2604                break;
2605        }
2606}
2607
2608static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2609{
2610        struct ipr_hostrcb *hostrcb;
2611
2612        hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2613                                        struct ipr_hostrcb, queue);
2614
2615        if (unlikely(!hostrcb)) {
2616                dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2617                hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2618                                                struct ipr_hostrcb, queue);
2619        }
2620
2621        list_del_init(&hostrcb->queue);
2622        return hostrcb;
2623}
2624
2625/**
2626 * ipr_process_error - Op done function for an adapter error log.
2627 * @ipr_cmd:    ipr command struct
2628 *
2629 * This function is the op done function for an error log host
2630 * controlled async from the adapter. It will log the error and
2631 * send the HCAM back to the adapter.
2632 *
2633 * Return value:
2634 *      none
2635 **/
2636static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2637{
2638        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2639        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2640        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2641        u32 fd_ioasc;
2642
2643        if (ioa_cfg->sis64)
2644                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2645        else
2646                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2647
2648        list_del_init(&hostrcb->queue);
2649        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2650
2651        if (!ioasc) {
2652                ipr_handle_log_data(ioa_cfg, hostrcb);
2653                if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2654                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2655        } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2656                   ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2657                dev_err(&ioa_cfg->pdev->dev,
2658                        "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2659        }
2660
2661        list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2662        schedule_work(&ioa_cfg->work_q);
2663        hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2664
2665        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2666}
2667
2668/**
2669 * ipr_timeout -  An internally generated op has timed out.
2670 * @ipr_cmd:    ipr command struct
2671 *
2672 * This function blocks host requests and initiates an
2673 * adapter reset.
2674 *
2675 * Return value:
2676 *      none
2677 **/
2678static void ipr_timeout(struct timer_list *t)
2679{
2680        struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2681        unsigned long lock_flags = 0;
2682        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2683
2684        ENTER;
2685        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2686
2687        ioa_cfg->errors_logged++;
2688        dev_err(&ioa_cfg->pdev->dev,
2689                "Adapter being reset due to command timeout.\n");
2690
2691        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2692                ioa_cfg->sdt_state = GET_DUMP;
2693
2694        if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2695                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2696
2697        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2698        LEAVE;
2699}
2700
2701/**
2702 * ipr_oper_timeout -  Adapter timed out transitioning to operational
2703 * @ipr_cmd:    ipr command struct
2704 *
2705 * This function blocks host requests and initiates an
2706 * adapter reset.
2707 *
2708 * Return value:
2709 *      none
2710 **/
2711static void ipr_oper_timeout(struct timer_list *t)
2712{
2713        struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2714        unsigned long lock_flags = 0;
2715        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2716
2717        ENTER;
2718        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2719
2720        ioa_cfg->errors_logged++;
2721        dev_err(&ioa_cfg->pdev->dev,
2722                "Adapter timed out transitioning to operational.\n");
2723
2724        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2725                ioa_cfg->sdt_state = GET_DUMP;
2726
2727        if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2728                if (ipr_fastfail)
2729                        ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2730                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2731        }
2732
2733        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2734        LEAVE;
2735}
2736
2737/**
2738 * ipr_find_ses_entry - Find matching SES in SES table
2739 * @res:        resource entry struct of SES
2740 *
2741 * Return value:
2742 *      pointer to SES table entry / NULL on failure
2743 **/
2744static const struct ipr_ses_table_entry *
2745ipr_find_ses_entry(struct ipr_resource_entry *res)
2746{
2747        int i, j, matches;
2748        struct ipr_std_inq_vpids *vpids;
2749        const struct ipr_ses_table_entry *ste = ipr_ses_table;
2750
2751        for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2752                for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2753                        if (ste->compare_product_id_byte[j] == 'X') {
2754                                vpids = &res->std_inq_data.vpids;
2755                                if (vpids->product_id[j] == ste->product_id[j])
2756                                        matches++;
2757                                else
2758                                        break;
2759                        } else
2760                                matches++;
2761                }
2762
2763                if (matches == IPR_PROD_ID_LEN)
2764                        return ste;
2765        }
2766
2767        return NULL;
2768}
2769
2770/**
2771 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2772 * @ioa_cfg:    ioa config struct
2773 * @bus:                SCSI bus
2774 * @bus_width:  bus width
2775 *
2776 * Return value:
2777 *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2778 *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2779 *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2780 *      max 160MHz = max 320MB/sec).
2781 **/
2782static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2783{
2784        struct ipr_resource_entry *res;
2785        const struct ipr_ses_table_entry *ste;
2786        u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2787
2788        /* Loop through each config table entry in the config table buffer */
2789        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2790                if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2791                        continue;
2792
2793                if (bus != res->bus)
2794                        continue;
2795
2796                if (!(ste = ipr_find_ses_entry(res)))
2797                        continue;
2798
2799                max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2800        }
2801
2802        return max_xfer_rate;
2803}
2804
2805/**
2806 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2807 * @ioa_cfg:            ioa config struct
2808 * @max_delay:          max delay in micro-seconds to wait
2809 *
2810 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2811 *
2812 * Return value:
2813 *      0 on success / other on failure
2814 **/
2815static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2816{
2817        volatile u32 pcii_reg;
2818        int delay = 1;
2819
2820        /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2821        while (delay < max_delay) {
2822                pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2823
2824                if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2825                        return 0;
2826
2827                /* udelay cannot be used if delay is more than a few milliseconds */
2828                if ((delay / 1000) > MAX_UDELAY_MS)
2829                        mdelay(delay / 1000);
2830                else
2831                        udelay(delay);
2832
2833                delay += delay;
2834        }
2835        return -EIO;
2836}
2837
2838/**
2839 * ipr_get_sis64_dump_data_section - Dump IOA memory
2840 * @ioa_cfg:                    ioa config struct
2841 * @start_addr:                 adapter address to dump
2842 * @dest:                       destination kernel buffer
2843 * @length_in_words:            length to dump in 4 byte words
2844 *
2845 * Return value:
2846 *      0 on success
2847 **/
2848static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2849                                           u32 start_addr,
2850                                           __be32 *dest, u32 length_in_words)
2851{
2852        int i;
2853
2854        for (i = 0; i < length_in_words; i++) {
2855                writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2856                *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2857                dest++;
2858        }
2859
2860        return 0;
2861}
2862
2863/**
2864 * ipr_get_ldump_data_section - Dump IOA memory
2865 * @ioa_cfg:                    ioa config struct
2866 * @start_addr:                 adapter address to dump
2867 * @dest:                               destination kernel buffer
2868 * @length_in_words:    length to dump in 4 byte words
2869 *
2870 * Return value:
2871 *      0 on success / -EIO on failure
2872 **/
2873static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2874                                      u32 start_addr,
2875                                      __be32 *dest, u32 length_in_words)
2876{
2877        volatile u32 temp_pcii_reg;
2878        int i, delay = 0;
2879
2880        if (ioa_cfg->sis64)
2881                return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2882                                                       dest, length_in_words);
2883
2884        /* Write IOA interrupt reg starting LDUMP state  */
2885        writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2886               ioa_cfg->regs.set_uproc_interrupt_reg32);
2887
2888        /* Wait for IO debug acknowledge */
2889        if (ipr_wait_iodbg_ack(ioa_cfg,
2890                               IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2891                dev_err(&ioa_cfg->pdev->dev,
2892                        "IOA dump long data transfer timeout\n");
2893                return -EIO;
2894        }
2895
2896        /* Signal LDUMP interlocked - clear IO debug ack */
2897        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2898               ioa_cfg->regs.clr_interrupt_reg);
2899
2900        /* Write Mailbox with starting address */
2901        writel(start_addr, ioa_cfg->ioa_mailbox);
2902
2903        /* Signal address valid - clear IOA Reset alert */
2904        writel(IPR_UPROCI_RESET_ALERT,
2905               ioa_cfg->regs.clr_uproc_interrupt_reg32);
2906
2907        for (i = 0; i < length_in_words; i++) {
2908                /* Wait for IO debug acknowledge */
2909                if (ipr_wait_iodbg_ack(ioa_cfg,
2910                                       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2911                        dev_err(&ioa_cfg->pdev->dev,
2912                                "IOA dump short data transfer timeout\n");
2913                        return -EIO;
2914                }
2915
2916                /* Read data from mailbox and increment destination pointer */
2917                *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2918                dest++;
2919
2920                /* For all but the last word of data, signal data received */
2921                if (i < (length_in_words - 1)) {
2922                        /* Signal dump data received - Clear IO debug Ack */
2923                        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2924                               ioa_cfg->regs.clr_interrupt_reg);
2925                }
2926        }
2927
2928        /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2929        writel(IPR_UPROCI_RESET_ALERT,
2930               ioa_cfg->regs.set_uproc_interrupt_reg32);
2931
2932        writel(IPR_UPROCI_IO_DEBUG_ALERT,
2933               ioa_cfg->regs.clr_uproc_interrupt_reg32);
2934
2935        /* Signal dump data received - Clear IO debug Ack */
2936        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2937               ioa_cfg->regs.clr_interrupt_reg);
2938
2939        /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2940        while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2941                temp_pcii_reg =
2942                    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2943
2944                if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2945                        return 0;
2946
2947                udelay(10);
2948                delay += 10;
2949        }
2950
2951        return 0;
2952}
2953
2954#ifdef CONFIG_SCSI_IPR_DUMP
2955/**
2956 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2957 * @ioa_cfg:            ioa config struct
2958 * @pci_address:        adapter address
2959 * @length:                     length of data to copy
2960 *
2961 * Copy data from PCI adapter to kernel buffer.
2962 * Note: length MUST be a 4 byte multiple
2963 * Return value:
2964 *      0 on success / other on failure
2965 **/
2966static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2967                        unsigned long pci_address, u32 length)
2968{
2969        int bytes_copied = 0;
2970        int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2971        __be32 *page;
2972        unsigned long lock_flags = 0;
2973        struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2974
2975        if (ioa_cfg->sis64)
2976                max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2977        else
2978                max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2979
2980        while (bytes_copied < length &&
2981               (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2982                if (ioa_dump->page_offset >= PAGE_SIZE ||
2983                    ioa_dump->page_offset == 0) {
2984                        page = (__be32 *)__get_free_page(GFP_ATOMIC);
2985
2986                        if (!page) {
2987                                ipr_trace;
2988                                return bytes_copied;
2989                        }
2990
2991                        ioa_dump->page_offset = 0;
2992                        ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2993                        ioa_dump->next_page_index++;
2994                } else
2995                        page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2996
2997                rem_len = length - bytes_copied;
2998                rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2999                cur_len = min(rem_len, rem_page_len);
3000
3001                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3002                if (ioa_cfg->sdt_state == ABORT_DUMP) {
3003                        rc = -EIO;
3004                } else {
3005                        rc = ipr_get_ldump_data_section(ioa_cfg,
3006                                                        pci_address + bytes_copied,
3007                                                        &page[ioa_dump->page_offset / 4],
3008                                                        (cur_len / sizeof(u32)));
3009                }
3010                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3011
3012                if (!rc) {
3013                        ioa_dump->page_offset += cur_len;
3014                        bytes_copied += cur_len;
3015                } else {
3016                        ipr_trace;
3017                        break;
3018                }
3019                schedule();
3020        }
3021
3022        return bytes_copied;
3023}
3024
3025/**
3026 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3027 * @hdr:        dump entry header struct
3028 *
3029 * Return value:
3030 *      nothing
3031 **/
3032static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3033{
3034        hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3035        hdr->num_elems = 1;
3036        hdr->offset = sizeof(*hdr);
3037        hdr->status = IPR_DUMP_STATUS_SUCCESS;
3038}
3039
3040/**
3041 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3042 * @ioa_cfg:    ioa config struct
3043 * @driver_dump:        driver dump struct
3044 *
3045 * Return value:
3046 *      nothing
3047 **/
3048static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3049                                   struct ipr_driver_dump *driver_dump)
3050{
3051        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3052
3053        ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3054        driver_dump->ioa_type_entry.hdr.len =
3055                sizeof(struct ipr_dump_ioa_type_entry) -
3056                sizeof(struct ipr_dump_entry_header);
3057        driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3058        driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3059        driver_dump->ioa_type_entry.type = ioa_cfg->type;
3060        driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3061                (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3062                ucode_vpd->minor_release[1];
3063        driver_dump->hdr.num_entries++;
3064}
3065
3066/**
3067 * ipr_dump_version_data - Fill in the driver version in the dump.
3068 * @ioa_cfg:    ioa config struct
3069 * @driver_dump:        driver dump struct
3070 *
3071 * Return value:
3072 *      nothing
3073 **/
3074static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3075                                  struct ipr_driver_dump *driver_dump)
3076{
3077        ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3078        driver_dump->version_entry.hdr.len =
3079                sizeof(struct ipr_dump_version_entry) -
3080                sizeof(struct ipr_dump_entry_header);
3081        driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3082        driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3083        strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3084        driver_dump->hdr.num_entries++;
3085}
3086
3087/**
3088 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3089 * @ioa_cfg:    ioa config struct
3090 * @driver_dump:        driver dump struct
3091 *
3092 * Return value:
3093 *      nothing
3094 **/
3095static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3096                                   struct ipr_driver_dump *driver_dump)
3097{
3098        ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3099        driver_dump->trace_entry.hdr.len =
3100                sizeof(struct ipr_dump_trace_entry) -
3101                sizeof(struct ipr_dump_entry_header);
3102        driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3103        driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3104        memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3105        driver_dump->hdr.num_entries++;
3106}
3107
3108/**
3109 * ipr_dump_location_data - Fill in the IOA location in the dump.
3110 * @ioa_cfg:    ioa config struct
3111 * @driver_dump:        driver dump struct
3112 *
3113 * Return value:
3114 *      nothing
3115 **/
3116static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3117                                   struct ipr_driver_dump *driver_dump)
3118{
3119        ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3120        driver_dump->location_entry.hdr.len =
3121                sizeof(struct ipr_dump_location_entry) -
3122                sizeof(struct ipr_dump_entry_header);
3123        driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3124        driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3125        strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3126        driver_dump->hdr.num_entries++;
3127}
3128
3129/**
3130 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3131 * @ioa_cfg:    ioa config struct
3132 * @dump:               dump struct
3133 *
3134 * Return value:
3135 *      nothing
3136 **/
3137static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3138{
3139        unsigned long start_addr, sdt_word;
3140        unsigned long lock_flags = 0;
3141        struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3142        struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3143        u32 num_entries, max_num_entries, start_off, end_off;
3144        u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3145        struct ipr_sdt *sdt;
3146        int valid = 1;
3147        int i;
3148
3149        ENTER;
3150
3151        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3152
3153        if (ioa_cfg->sdt_state != READ_DUMP) {
3154                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3155                return;
3156        }
3157
3158        if (ioa_cfg->sis64) {
3159                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160                ssleep(IPR_DUMP_DELAY_SECONDS);
3161                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3162        }
3163
3164        start_addr = readl(ioa_cfg->ioa_mailbox);
3165
3166        if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3167                dev_err(&ioa_cfg->pdev->dev,
3168                        "Invalid dump table format: %lx\n", start_addr);
3169                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3170                return;
3171        }
3172
3173        dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3174
3175        driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3176
3177        /* Initialize the overall dump header */
3178        driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3179        driver_dump->hdr.num_entries = 1;
3180        driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3181        driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3182        driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3183        driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3184
3185        ipr_dump_version_data(ioa_cfg, driver_dump);
3186        ipr_dump_location_data(ioa_cfg, driver_dump);
3187        ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3188        ipr_dump_trace_data(ioa_cfg, driver_dump);
3189
3190        /* Update dump_header */
3191        driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3192
3193        /* IOA Dump entry */
3194        ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3195        ioa_dump->hdr.len = 0;
3196        ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3197        ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3198
3199        /* First entries in sdt are actually a list of dump addresses and
3200         lengths to gather the real dump data.  sdt represents the pointer
3201         to the ioa generated dump table.  Dump data will be extracted based
3202         on entries in this table */
3203        sdt = &ioa_dump->sdt;
3204
3205        if (ioa_cfg->sis64) {
3206                max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3207                max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3208        } else {
3209                max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3210                max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3211        }
3212
3213        bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3214                        (max_num_entries * sizeof(struct ipr_sdt_entry));
3215        rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3216                                        bytes_to_copy / sizeof(__be32));
3217
3218        /* Smart Dump table is ready to use and the first entry is valid */
3219        if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3220            (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3221                dev_err(&ioa_cfg->pdev->dev,
3222                        "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3223                        rc, be32_to_cpu(sdt->hdr.state));
3224                driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3225                ioa_cfg->sdt_state = DUMP_OBTAINED;
3226                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227                return;
3228        }
3229
3230        num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3231
3232        if (num_entries > max_num_entries)
3233                num_entries = max_num_entries;
3234
3235        /* Update dump length to the actual data to be copied */
3236        dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3237        if (ioa_cfg->sis64)
3238                dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3239        else
3240                dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3241
3242        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3243
3244        for (i = 0; i < num_entries; i++) {
3245                if (ioa_dump->hdr.len > max_dump_size) {
3246                        driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3247                        break;
3248                }
3249
3250                if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3251                        sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3252                        if (ioa_cfg->sis64)
3253                                bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3254                        else {
3255                                start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3256                                end_off = be32_to_cpu(sdt->entry[i].end_token);
3257
3258                                if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3259                                        bytes_to_copy = end_off - start_off;
3260                                else
3261                                        valid = 0;
3262                        }
3263                        if (valid) {
3264                                if (bytes_to_copy > max_dump_size) {
3265                                        sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3266                                        continue;
3267                                }
3268
3269                                /* Copy data from adapter to driver buffers */
3270                                bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3271                                                            bytes_to_copy);
3272
3273                                ioa_dump->hdr.len += bytes_copied;
3274
3275                                if (bytes_copied != bytes_to_copy) {
3276                                        driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3277                                        break;
3278                                }
3279                        }
3280                }
3281        }
3282
3283        dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3284
3285        /* Update dump_header */
3286        driver_dump->hdr.len += ioa_dump->hdr.len;
3287        wmb();
3288        ioa_cfg->sdt_state = DUMP_OBTAINED;
3289        LEAVE;
3290}
3291
3292#else
3293#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3294#endif
3295
3296/**
3297 * ipr_release_dump - Free adapter dump memory
3298 * @kref:       kref struct
3299 *
3300 * Return value:
3301 *      nothing
3302 **/
3303static void ipr_release_dump(struct kref *kref)
3304{
3305        struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3306        struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3307        unsigned long lock_flags = 0;
3308        int i;
3309
3310        ENTER;
3311        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3312        ioa_cfg->dump = NULL;
3313        ioa_cfg->sdt_state = INACTIVE;
3314        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315
3316        for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3317                free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3318
3319        vfree(dump->ioa_dump.ioa_data);
3320        kfree(dump);
3321        LEAVE;
3322}
3323
3324static void ipr_add_remove_thread(struct work_struct *work)
3325{
3326        unsigned long lock_flags;
3327        struct ipr_resource_entry *res;
3328        struct scsi_device *sdev;
3329        struct ipr_ioa_cfg *ioa_cfg =
3330                container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3331        u8 bus, target, lun;
3332        int did_work;
3333
3334        ENTER;
3335        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336
3337restart:
3338        do {
3339                did_work = 0;
3340                if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3341                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3342                        return;
3343                }
3344
3345                list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3346                        if (res->del_from_ml && res->sdev) {
3347                                did_work = 1;
3348                                sdev = res->sdev;
3349                                if (!scsi_device_get(sdev)) {
3350                                        if (!res->add_to_ml)
3351                                                list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3352                                        else
3353                                                res->del_from_ml = 0;
3354                                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3355                                        scsi_remove_device(sdev);
3356                                        scsi_device_put(sdev);
3357                                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3358                                }
3359                                break;
3360                        }
3361                }
3362        } while (did_work);
3363
3364        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3365                if (res->add_to_ml) {
3366                        bus = res->bus;
3367                        target = res->target;
3368                        lun = res->lun;
3369                        res->add_to_ml = 0;
3370                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3371                        scsi_add_device(ioa_cfg->host, bus, target, lun);
3372                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3373                        goto restart;
3374                }
3375        }
3376
3377        ioa_cfg->scan_done = 1;
3378        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3379        kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3380        LEAVE;
3381}
3382
3383/**
3384 * ipr_worker_thread - Worker thread
3385 * @work:               ioa config struct
3386 *
3387 * Called at task level from a work thread. This function takes care
3388 * of adding and removing device from the mid-layer as configuration
3389 * changes are detected by the adapter.
3390 *
3391 * Return value:
3392 *      nothing
3393 **/
3394static void ipr_worker_thread(struct work_struct *work)
3395{
3396        unsigned long lock_flags;
3397        struct ipr_dump *dump;
3398        struct ipr_ioa_cfg *ioa_cfg =
3399                container_of(work, struct ipr_ioa_cfg, work_q);
3400
3401        ENTER;
3402        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3403
3404        if (ioa_cfg->sdt_state == READ_DUMP) {
3405                dump = ioa_cfg->dump;
3406                if (!dump) {
3407                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3408                        return;
3409                }
3410                kref_get(&dump->kref);
3411                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3412                ipr_get_ioa_dump(ioa_cfg, dump);
3413                kref_put(&dump->kref, ipr_release_dump);
3414
3415                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3416                if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3417                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3418                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3419                return;
3420        }
3421
3422        if (ioa_cfg->scsi_unblock) {
3423                ioa_cfg->scsi_unblock = 0;
3424                ioa_cfg->scsi_blocked = 0;
3425                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3426                scsi_unblock_requests(ioa_cfg->host);
3427                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3428                if (ioa_cfg->scsi_blocked)
3429                        scsi_block_requests(ioa_cfg->host);
3430        }
3431
3432        if (!ioa_cfg->scan_enabled) {
3433                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3434                return;
3435        }
3436
3437        schedule_work(&ioa_cfg->scsi_add_work_q);
3438
3439        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440        LEAVE;
3441}
3442
3443#ifdef CONFIG_SCSI_IPR_TRACE
3444/**
3445 * ipr_read_trace - Dump the adapter trace
3446 * @filp:               open sysfs file
3447 * @kobj:               kobject struct
3448 * @bin_attr:           bin_attribute struct
3449 * @buf:                buffer
3450 * @off:                offset
3451 * @count:              buffer size
3452 *
3453 * Return value:
3454 *      number of bytes printed to buffer
3455 **/
3456static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3457                              struct bin_attribute *bin_attr,
3458                              char *buf, loff_t off, size_t count)
3459{
3460        struct device *dev = container_of(kobj, struct device, kobj);
3461        struct Scsi_Host *shost = class_to_shost(dev);
3462        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463        unsigned long lock_flags = 0;
3464        ssize_t ret;
3465
3466        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3467        ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3468                                IPR_TRACE_SIZE);
3469        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3470
3471        return ret;
3472}
3473
3474static struct bin_attribute ipr_trace_attr = {
3475        .attr = {
3476                .name = "trace",
3477                .mode = S_IRUGO,
3478        },
3479        .size = 0,
3480        .read = ipr_read_trace,
3481};
3482#endif
3483
3484/**
3485 * ipr_show_fw_version - Show the firmware version
3486 * @dev:        class device struct
3487 * @buf:        buffer
3488 *
3489 * Return value:
3490 *      number of bytes printed to buffer
3491 **/
3492static ssize_t ipr_show_fw_version(struct device *dev,
3493                                   struct device_attribute *attr, char *buf)
3494{
3495        struct Scsi_Host *shost = class_to_shost(dev);
3496        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3497        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3498        unsigned long lock_flags = 0;
3499        int len;
3500
3501        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3502        len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3503                       ucode_vpd->major_release, ucode_vpd->card_type,
3504                       ucode_vpd->minor_release[0],
3505                       ucode_vpd->minor_release[1]);
3506        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3507        return len;
3508}
3509
3510static struct device_attribute ipr_fw_version_attr = {
3511        .attr = {
3512                .name =         "fw_version",
3513                .mode =         S_IRUGO,
3514        },
3515        .show = ipr_show_fw_version,
3516};
3517
3518/**
3519 * ipr_show_log_level - Show the adapter's error logging level
3520 * @dev:        class device struct
3521 * @buf:        buffer
3522 *
3523 * Return value:
3524 *      number of bytes printed to buffer
3525 **/
3526static ssize_t ipr_show_log_level(struct device *dev,
3527                                   struct device_attribute *attr, char *buf)
3528{
3529        struct Scsi_Host *shost = class_to_shost(dev);
3530        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3531        unsigned long lock_flags = 0;
3532        int len;
3533
3534        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3535        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3536        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3537        return len;
3538}
3539
3540/**
3541 * ipr_store_log_level - Change the adapter's error logging level
3542 * @dev:        class device struct
3543 * @buf:        buffer
3544 *
3545 * Return value:
3546 *      number of bytes printed to buffer
3547 **/
3548static ssize_t ipr_store_log_level(struct device *dev,
3549                                   struct device_attribute *attr,
3550                                   const char *buf, size_t count)
3551{
3552        struct Scsi_Host *shost = class_to_shost(dev);
3553        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3554        unsigned long lock_flags = 0;
3555
3556        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557        ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3558        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3559        return strlen(buf);
3560}
3561
3562static struct device_attribute ipr_log_level_attr = {
3563        .attr = {
3564                .name =         "log_level",
3565                .mode =         S_IRUGO | S_IWUSR,
3566        },
3567        .show = ipr_show_log_level,
3568        .store = ipr_store_log_level
3569};
3570
3571/**
3572 * ipr_store_diagnostics - IOA Diagnostics interface
3573 * @dev:        device struct
3574 * @buf:        buffer
3575 * @count:      buffer size
3576 *
3577 * This function will reset the adapter and wait a reasonable
3578 * amount of time for any errors that the adapter might log.
3579 *
3580 * Return value:
3581 *      count on success / other on failure
3582 **/
3583static ssize_t ipr_store_diagnostics(struct device *dev,
3584                                     struct device_attribute *attr,
3585                                     const char *buf, size_t count)
3586{
3587        struct Scsi_Host *shost = class_to_shost(dev);
3588        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3589        unsigned long lock_flags = 0;
3590        int rc = count;
3591
3592        if (!capable(CAP_SYS_ADMIN))
3593                return -EACCES;
3594
3595        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3596        while (ioa_cfg->in_reset_reload) {
3597                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3598                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3599                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3600        }
3601
3602        ioa_cfg->errors_logged = 0;
3603        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3604
3605        if (ioa_cfg->in_reset_reload) {
3606                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3607                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3608
3609                /* Wait for a second for any errors to be logged */
3610                msleep(1000);
3611        } else {
3612                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3613                return -EIO;
3614        }
3615
3616        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3617        if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3618                rc = -EIO;
3619        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3620
3621        return rc;
3622}
3623
3624static struct device_attribute ipr_diagnostics_attr = {
3625        .attr = {
3626                .name =         "run_diagnostics",
3627                .mode =         S_IWUSR,
3628        },
3629        .store = ipr_store_diagnostics
3630};
3631
3632/**
3633 * ipr_show_adapter_state - Show the adapter's state
3634 * @class_dev:  device struct
3635 * @buf:        buffer
3636 *
3637 * Return value:
3638 *      number of bytes printed to buffer
3639 **/
3640static ssize_t ipr_show_adapter_state(struct device *dev,
3641                                      struct device_attribute *attr, char *buf)
3642{
3643        struct Scsi_Host *shost = class_to_shost(dev);
3644        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3645        unsigned long lock_flags = 0;
3646        int len;
3647
3648        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3649        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3650                len = snprintf(buf, PAGE_SIZE, "offline\n");
3651        else
3652                len = snprintf(buf, PAGE_SIZE, "online\n");
3653        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3654        return len;
3655}
3656
3657/**
3658 * ipr_store_adapter_state - Change adapter state
3659 * @dev:        device struct
3660 * @buf:        buffer
3661 * @count:      buffer size
3662 *
3663 * This function will change the adapter's state.
3664 *
3665 * Return value:
3666 *      count on success / other on failure
3667 **/
3668static ssize_t ipr_store_adapter_state(struct device *dev,
3669                                       struct device_attribute *attr,
3670                                       const char *buf, size_t count)
3671{
3672        struct Scsi_Host *shost = class_to_shost(dev);
3673        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3674        unsigned long lock_flags;
3675        int result = count, i;
3676
3677        if (!capable(CAP_SYS_ADMIN))
3678                return -EACCES;
3679
3680        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3681        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3682            !strncmp(buf, "online", 6)) {
3683                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3684                        spin_lock(&ioa_cfg->hrrq[i]._lock);
3685                        ioa_cfg->hrrq[i].ioa_is_dead = 0;
3686                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
3687                }
3688                wmb();
3689                ioa_cfg->reset_retries = 0;
3690                ioa_cfg->in_ioa_bringdown = 0;
3691                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3692        }
3693        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3694        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3695
3696        return result;
3697}
3698
3699static struct device_attribute ipr_ioa_state_attr = {
3700        .attr = {
3701                .name =         "online_state",
3702                .mode =         S_IRUGO | S_IWUSR,
3703        },
3704        .show = ipr_show_adapter_state,
3705        .store = ipr_store_adapter_state
3706};
3707
3708/**
3709 * ipr_store_reset_adapter - Reset the adapter
3710 * @dev:        device struct
3711 * @buf:        buffer
3712 * @count:      buffer size
3713 *
3714 * This function will reset the adapter.
3715 *
3716 * Return value:
3717 *      count on success / other on failure
3718 **/
3719static ssize_t ipr_store_reset_adapter(struct device *dev,
3720                                       struct device_attribute *attr,
3721                                       const char *buf, size_t count)
3722{
3723        struct Scsi_Host *shost = class_to_shost(dev);
3724        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3725        unsigned long lock_flags;
3726        int result = count;
3727
3728        if (!capable(CAP_SYS_ADMIN))
3729                return -EACCES;
3730
3731        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3732        if (!ioa_cfg->in_reset_reload)
3733                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3734        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3735        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3736
3737        return result;
3738}
3739
3740static struct device_attribute ipr_ioa_reset_attr = {
3741        .attr = {
3742                .name =         "reset_host",
3743                .mode =         S_IWUSR,
3744        },
3745        .store = ipr_store_reset_adapter
3746};
3747
3748static int ipr_iopoll(struct irq_poll *iop, int budget);
3749 /**
3750 * ipr_show_iopoll_weight - Show ipr polling mode
3751 * @dev:        class device struct
3752 * @buf:        buffer
3753 *
3754 * Return value:
3755 *      number of bytes printed to buffer
3756 **/
3757static ssize_t ipr_show_iopoll_weight(struct device *dev,
3758                                   struct device_attribute *attr, char *buf)
3759{
3760        struct Scsi_Host *shost = class_to_shost(dev);
3761        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3762        unsigned long lock_flags = 0;
3763        int len;
3764
3765        spin_lock_irqsave(shost->host_lock, lock_flags);
3766        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3767        spin_unlock_irqrestore(shost->host_lock, lock_flags);
3768
3769        return len;
3770}
3771
3772/**
3773 * ipr_store_iopoll_weight - Change the adapter's polling mode
3774 * @dev:        class device struct
3775 * @buf:        buffer
3776 *
3777 * Return value:
3778 *      number of bytes printed to buffer
3779 **/
3780static ssize_t ipr_store_iopoll_weight(struct device *dev,
3781                                        struct device_attribute *attr,
3782                                        const char *buf, size_t count)
3783{
3784        struct Scsi_Host *shost = class_to_shost(dev);
3785        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3786        unsigned long user_iopoll_weight;
3787        unsigned long lock_flags = 0;
3788        int i;
3789
3790        if (!ioa_cfg->sis64) {
3791                dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3792                return -EINVAL;
3793        }
3794        if (kstrtoul(buf, 10, &user_iopoll_weight))
3795                return -EINVAL;
3796
3797        if (user_iopoll_weight > 256) {
3798                dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3799                return -EINVAL;
3800        }
3801
3802        if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3803                dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3804                return strlen(buf);
3805        }
3806
3807        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3808                for (i = 1; i < ioa_cfg->hrrq_num; i++)
3809                        irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3810        }
3811
3812        spin_lock_irqsave(shost->host_lock, lock_flags);
3813        ioa_cfg->iopoll_weight = user_iopoll_weight;
3814        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3815                for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3816                        irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3817                                        ioa_cfg->iopoll_weight, ipr_iopoll);
3818                }
3819        }
3820        spin_unlock_irqrestore(shost->host_lock, lock_flags);
3821
3822        return strlen(buf);
3823}
3824
3825static struct device_attribute ipr_iopoll_weight_attr = {
3826        .attr = {
3827                .name =         "iopoll_weight",
3828                .mode =         S_IRUGO | S_IWUSR,
3829        },
3830        .show = ipr_show_iopoll_weight,
3831        .store = ipr_store_iopoll_weight
3832};
3833
3834/**
3835 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3836 * @buf_len:            buffer length
3837 *
3838 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3839 * list to use for microcode download
3840 *
3841 * Return value:
3842 *      pointer to sglist / NULL on failure
3843 **/
3844static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3845{
3846        int sg_size, order;
3847        struct ipr_sglist *sglist;
3848
3849        /* Get the minimum size per scatter/gather element */
3850        sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3851
3852        /* Get the actual size per element */
3853        order = get_order(sg_size);
3854
3855        /* Allocate a scatter/gather list for the DMA */
3856        sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3857        if (sglist == NULL) {
3858                ipr_trace;
3859                return NULL;
3860        }
3861        sglist->order = order;
3862        sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3863                                              &sglist->num_sg);
3864        if (!sglist->scatterlist) {
3865                kfree(sglist);
3866                return NULL;
3867        }
3868
3869        return sglist;
3870}
3871
3872/**
3873 * ipr_free_ucode_buffer - Frees a microcode download buffer
3874 * @p_dnld:             scatter/gather list pointer
3875 *
3876 * Free a DMA'able ucode download buffer previously allocated with
3877 * ipr_alloc_ucode_buffer
3878 *
3879 * Return value:
3880 *      nothing
3881 **/
3882static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3883{
3884        sgl_free_order(sglist->scatterlist, sglist->order);
3885        kfree(sglist);
3886}
3887
3888/**
3889 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3890 * @sglist:             scatter/gather list pointer
3891 * @buffer:             buffer pointer
3892 * @len:                buffer length
3893 *
3894 * Copy a microcode image from a user buffer into a buffer allocated by
3895 * ipr_alloc_ucode_buffer
3896 *
3897 * Return value:
3898 *      0 on success / other on failure
3899 **/
3900static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3901                                 u8 *buffer, u32 len)
3902{
3903        int bsize_elem, i, result = 0;
3904        struct scatterlist *sg;
3905        void *kaddr;
3906
3907        /* Determine the actual number of bytes per element */
3908        bsize_elem = PAGE_SIZE * (1 << sglist->order);
3909
3910        sg = sglist->scatterlist;
3911
3912        for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3913                        buffer += bsize_elem) {
3914                struct page *page = sg_page(sg);
3915
3916                kaddr = kmap(page);
3917                memcpy(kaddr, buffer, bsize_elem);
3918                kunmap(page);
3919
3920                sg->length = bsize_elem;
3921
3922                if (result != 0) {
3923                        ipr_trace;
3924                        return result;
3925                }
3926        }
3927
3928        if (len % bsize_elem) {
3929                struct page *page = sg_page(sg);
3930
3931                kaddr = kmap(page);
3932                memcpy(kaddr, buffer, len % bsize_elem);
3933                kunmap(page);
3934
3935                sg->length = len % bsize_elem;
3936        }
3937
3938        sglist->buffer_len = len;
3939        return result;
3940}
3941
3942/**
3943 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3944 * @ipr_cmd:            ipr command struct
3945 * @sglist:             scatter/gather list
3946 *
3947 * Builds a microcode download IOA data list (IOADL).
3948 *
3949 **/
3950static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3951                                    struct ipr_sglist *sglist)
3952{
3953        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3954        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3955        struct scatterlist *scatterlist = sglist->scatterlist;
3956        struct scatterlist *sg;
3957        int i;
3958
3959        ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3960        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3961        ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3962
3963        ioarcb->ioadl_len =
3964                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3965        for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3966                ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3967                ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3968                ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3969        }
3970
3971        ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3972}
3973
3974/**
3975 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3976 * @ipr_cmd:    ipr command struct
3977 * @sglist:             scatter/gather list
3978 *
3979 * Builds a microcode download IOA data list (IOADL).
3980 *
3981 **/
3982static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3983                                  struct ipr_sglist *sglist)
3984{
3985        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3986        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3987        struct scatterlist *scatterlist = sglist->scatterlist;
3988        struct scatterlist *sg;
3989        int i;
3990
3991        ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3992        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3993        ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3994
3995        ioarcb->ioadl_len =
3996                cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3997
3998        for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3999                ioadl[i].flags_and_data_len =
4000                        cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
4001                ioadl[i].address =
4002                        cpu_to_be32(sg_dma_address(sg));
4003        }
4004
4005        ioadl[i-1].flags_and_data_len |=
4006                cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4007}
4008
4009/**
4010 * ipr_update_ioa_ucode - Update IOA's microcode
4011 * @ioa_cfg:    ioa config struct
4012 * @sglist:             scatter/gather list
4013 *
4014 * Initiate an adapter reset to update the IOA's microcode
4015 *
4016 * Return value:
4017 *      0 on success / -EIO on failure
4018 **/
4019static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4020                                struct ipr_sglist *sglist)
4021{
4022        unsigned long lock_flags;
4023
4024        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4025        while (ioa_cfg->in_reset_reload) {
4026                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4027                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4028                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4029        }
4030
4031        if (ioa_cfg->ucode_sglist) {
4032                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4033                dev_err(&ioa_cfg->pdev->dev,
4034                        "Microcode download already in progress\n");
4035                return -EIO;
4036        }
4037
4038        sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4039                                        sglist->scatterlist, sglist->num_sg,
4040                                        DMA_TO_DEVICE);
4041
4042        if (!sglist->num_dma_sg) {
4043                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044                dev_err(&ioa_cfg->pdev->dev,
4045                        "Failed to map microcode download buffer!\n");
4046                return -EIO;
4047        }
4048
4049        ioa_cfg->ucode_sglist = sglist;
4050        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4051        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4052        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4053
4054        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4055        ioa_cfg->ucode_sglist = NULL;
4056        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4057        return 0;
4058}
4059
4060/**
4061 * ipr_store_update_fw - Update the firmware on the adapter
4062 * @class_dev:  device struct
4063 * @buf:        buffer
4064 * @count:      buffer size
4065 *
4066 * This function will update the firmware on the adapter.
4067 *
4068 * Return value:
4069 *      count on success / other on failure
4070 **/
4071static ssize_t ipr_store_update_fw(struct device *dev,
4072                                   struct device_attribute *attr,
4073                                   const char *buf, size_t count)
4074{
4075        struct Scsi_Host *shost = class_to_shost(dev);
4076        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4077        struct ipr_ucode_image_header *image_hdr;
4078        const struct firmware *fw_entry;
4079        struct ipr_sglist *sglist;
4080        char fname[100];
4081        char *src;
4082        char *endline;
4083        int result, dnld_size;
4084
4085        if (!capable(CAP_SYS_ADMIN))
4086                return -EACCES;
4087
4088        snprintf(fname, sizeof(fname), "%s", buf);
4089
4090        endline = strchr(fname, '\n');
4091        if (endline)
4092                *endline = '\0';
4093
4094        if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4095                dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4096                return -EIO;
4097        }
4098
4099        image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4100
4101        src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4102        dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4103        sglist = ipr_alloc_ucode_buffer(dnld_size);
4104
4105        if (!sglist) {
4106                dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4107                release_firmware(fw_entry);
4108                return -ENOMEM;
4109        }
4110
4111        result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4112
4113        if (result) {
4114                dev_err(&ioa_cfg->pdev->dev,
4115                        "Microcode buffer copy to DMA buffer failed\n");
4116                goto out;
4117        }
4118
4119        ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4120
4121        result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4122
4123        if (!result)
4124                result = count;
4125out:
4126        ipr_free_ucode_buffer(sglist);
4127        release_firmware(fw_entry);
4128        return result;
4129}
4130
4131static struct device_attribute ipr_update_fw_attr = {
4132        .attr = {
4133                .name =         "update_fw",
4134                .mode =         S_IWUSR,
4135        },
4136        .store = ipr_store_update_fw
4137};
4138
4139/**
4140 * ipr_show_fw_type - Show the adapter's firmware type.
4141 * @dev:        class device struct
4142 * @buf:        buffer
4143 *
4144 * Return value:
4145 *      number of bytes printed to buffer
4146 **/
4147static ssize_t ipr_show_fw_type(struct device *dev,
4148                                struct device_attribute *attr, char *buf)
4149{
4150        struct Scsi_Host *shost = class_to_shost(dev);
4151        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4152        unsigned long lock_flags = 0;
4153        int len;
4154
4155        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4156        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4157        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4158        return len;
4159}
4160
4161static struct device_attribute ipr_ioa_fw_type_attr = {
4162        .attr = {
4163                .name =         "fw_type",
4164                .mode =         S_IRUGO,
4165        },
4166        .show = ipr_show_fw_type
4167};
4168
4169static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4170                                struct bin_attribute *bin_attr, char *buf,
4171                                loff_t off, size_t count)
4172{
4173        struct device *cdev = container_of(kobj, struct device, kobj);
4174        struct Scsi_Host *shost = class_to_shost(cdev);
4175        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4176        struct ipr_hostrcb *hostrcb;
4177        unsigned long lock_flags = 0;
4178        int ret;
4179
4180        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4181        hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4182                                        struct ipr_hostrcb, queue);
4183        if (!hostrcb) {
4184                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4185                return 0;
4186        }
4187        ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4188                                sizeof(hostrcb->hcam));
4189        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4190        return ret;
4191}
4192
4193static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4194                                struct bin_attribute *bin_attr, char *buf,
4195                                loff_t off, size_t count)
4196{
4197        struct device *cdev = container_of(kobj, struct device, kobj);
4198        struct Scsi_Host *shost = class_to_shost(cdev);
4199        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4200        struct ipr_hostrcb *hostrcb;
4201        unsigned long lock_flags = 0;
4202
4203        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4204        hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4205                                        struct ipr_hostrcb, queue);
4206        if (!hostrcb) {
4207                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4208                return count;
4209        }
4210
4211        /* Reclaim hostrcb before exit */
4212        list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4213        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4214        return count;
4215}
4216
4217static struct bin_attribute ipr_ioa_async_err_log = {
4218        .attr = {
4219                .name =         "async_err_log",
4220                .mode =         S_IRUGO | S_IWUSR,
4221        },
4222        .size = 0,
4223        .read = ipr_read_async_err_log,
4224        .write = ipr_next_async_err_log
4225};
4226
4227static struct device_attribute *ipr_ioa_attrs[] = {
4228        &ipr_fw_version_attr,
4229        &ipr_log_level_attr,
4230        &ipr_diagnostics_attr,
4231        &ipr_ioa_state_attr,
4232        &ipr_ioa_reset_attr,
4233        &ipr_update_fw_attr,
4234        &ipr_ioa_fw_type_attr,
4235        &ipr_iopoll_weight_attr,
4236        NULL,
4237};
4238
4239#ifdef CONFIG_SCSI_IPR_DUMP
4240/**
4241 * ipr_read_dump - Dump the adapter
4242 * @filp:               open sysfs file
4243 * @kobj:               kobject struct
4244 * @bin_attr:           bin_attribute struct
4245 * @buf:                buffer
4246 * @off:                offset
4247 * @count:              buffer size
4248 *
4249 * Return value:
4250 *      number of bytes printed to buffer
4251 **/
4252static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4253                             struct bin_attribute *bin_attr,
4254                             char *buf, loff_t off, size_t count)
4255{
4256        struct device *cdev = container_of(kobj, struct device, kobj);
4257        struct Scsi_Host *shost = class_to_shost(cdev);
4258        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4259        struct ipr_dump *dump;
4260        unsigned long lock_flags = 0;
4261        char *src;
4262        int len, sdt_end;
4263        size_t rc = count;
4264
4265        if (!capable(CAP_SYS_ADMIN))
4266                return -EACCES;
4267
4268        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4269        dump = ioa_cfg->dump;
4270
4271        if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4272                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4273                return 0;
4274        }
4275        kref_get(&dump->kref);
4276        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4277
4278        if (off > dump->driver_dump.hdr.len) {
4279                kref_put(&dump->kref, ipr_release_dump);
4280                return 0;
4281        }
4282
4283        if (off + count > dump->driver_dump.hdr.len) {
4284                count = dump->driver_dump.hdr.len - off;
4285                rc = count;
4286        }
4287
4288        if (count && off < sizeof(dump->driver_dump)) {
4289                if (off + count > sizeof(dump->driver_dump))
4290                        len = sizeof(dump->driver_dump) - off;
4291                else
4292                        len = count;
4293                src = (u8 *)&dump->driver_dump + off;
4294                memcpy(buf, src, len);
4295                buf += len;
4296                off += len;
4297                count -= len;
4298        }
4299
4300        off -= sizeof(dump->driver_dump);
4301
4302        if (ioa_cfg->sis64)
4303                sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4304                          (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4305                           sizeof(struct ipr_sdt_entry));
4306        else
4307                sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4308                          (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4309
4310        if (count && off < sdt_end) {
4311                if (off + count > sdt_end)
4312                        len = sdt_end - off;
4313                else
4314                        len = count;
4315                src = (u8 *)&dump->ioa_dump + off;
4316                memcpy(buf, src, len);
4317                buf += len;
4318                off += len;
4319                count -= len;
4320        }
4321
4322        off -= sdt_end;
4323
4324        while (count) {
4325                if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4326                        len = PAGE_ALIGN(off) - off;
4327                else
4328                        len = count;
4329                src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4330                src += off & ~PAGE_MASK;
4331                memcpy(buf, src, len);
4332                buf += len;
4333                off += len;
4334                count -= len;
4335        }
4336
4337        kref_put(&dump->kref, ipr_release_dump);
4338        return rc;
4339}
4340
4341/**
4342 * ipr_alloc_dump - Prepare for adapter dump
4343 * @ioa_cfg:    ioa config struct
4344 *
4345 * Return value:
4346 *      0 on success / other on failure
4347 **/
4348static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4349{
4350        struct ipr_dump *dump;
4351        __be32 **ioa_data;
4352        unsigned long lock_flags = 0;
4353
4354        dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4355
4356        if (!dump) {
4357                ipr_err("Dump memory allocation failed\n");
4358                return -ENOMEM;
4359        }
4360
4361        if (ioa_cfg->sis64)
4362                ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4363                                              sizeof(__be32 *)));
4364        else
4365                ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4366                                              sizeof(__be32 *)));
4367
4368        if (!ioa_data) {
4369                ipr_err("Dump memory allocation failed\n");
4370                kfree(dump);
4371                return -ENOMEM;
4372        }
4373
4374        dump->ioa_dump.ioa_data = ioa_data;
4375
4376        kref_init(&dump->kref);
4377        dump->ioa_cfg = ioa_cfg;
4378
4379        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4380
4381        if (INACTIVE != ioa_cfg->sdt_state) {
4382                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4383                vfree(dump->ioa_dump.ioa_data);
4384                kfree(dump);
4385                return 0;
4386        }
4387
4388        ioa_cfg->dump = dump;
4389        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4390        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4391                ioa_cfg->dump_taken = 1;
4392                schedule_work(&ioa_cfg->work_q);
4393        }
4394        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4395
4396        return 0;
4397}
4398
4399/**
4400 * ipr_free_dump - Free adapter dump memory
4401 * @ioa_cfg:    ioa config struct
4402 *
4403 * Return value:
4404 *      0 on success / other on failure
4405 **/
4406static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4407{
4408        struct ipr_dump *dump;
4409        unsigned long lock_flags = 0;
4410
4411        ENTER;
4412
4413        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4414        dump = ioa_cfg->dump;
4415        if (!dump) {
4416                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4417                return 0;
4418        }
4419
4420        ioa_cfg->dump = NULL;
4421        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4422
4423        kref_put(&dump->kref, ipr_release_dump);
4424
4425        LEAVE;
4426        return 0;
4427}
4428
4429/**
4430 * ipr_write_dump - Setup dump state of adapter
4431 * @filp:               open sysfs file
4432 * @kobj:               kobject struct
4433 * @bin_attr:           bin_attribute struct
4434 * @buf:                buffer
4435 * @off:                offset
4436 * @count:              buffer size
4437 *
4438 * Return value:
4439 *      number of bytes printed to buffer
4440 **/
4441static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4442                              struct bin_attribute *bin_attr,
4443                              char *buf, loff_t off, size_t count)
4444{
4445        struct device *cdev = container_of(kobj, struct device, kobj);
4446        struct Scsi_Host *shost = class_to_shost(cdev);
4447        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4448        int rc;
4449
4450        if (!capable(CAP_SYS_ADMIN))
4451                return -EACCES;
4452
4453        if (buf[0] == '1')
4454                rc = ipr_alloc_dump(ioa_cfg);
4455        else if (buf[0] == '0')
4456                rc = ipr_free_dump(ioa_cfg);
4457        else
4458                return -EINVAL;
4459
4460        if (rc)
4461                return rc;
4462        else
4463                return count;
4464}
4465
4466static struct bin_attribute ipr_dump_attr = {
4467        .attr = {
4468                .name = "dump",
4469                .mode = S_IRUSR | S_IWUSR,
4470        },
4471        .size = 0,
4472        .read = ipr_read_dump,
4473        .write = ipr_write_dump
4474};
4475#else
4476static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4477#endif
4478
4479/**
4480 * ipr_change_queue_depth - Change the device's queue depth
4481 * @sdev:       scsi device struct
4482 * @qdepth:     depth to set
4483 * @reason:     calling context
4484 *
4485 * Return value:
4486 *      actual depth set
4487 **/
4488static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4489{
4490        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4491        struct ipr_resource_entry *res;
4492        unsigned long lock_flags = 0;
4493
4494        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4495        res = (struct ipr_resource_entry *)sdev->hostdata;
4496
4497        if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4498                qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4499        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4500
4501        scsi_change_queue_depth(sdev, qdepth);
4502        return sdev->queue_depth;
4503}
4504
4505/**
4506 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4507 * @dev:        device struct
4508 * @attr:       device attribute structure
4509 * @buf:        buffer
4510 *
4511 * Return value:
4512 *      number of bytes printed to buffer
4513 **/
4514static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4515{
4516        struct scsi_device *sdev = to_scsi_device(dev);
4517        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4518        struct ipr_resource_entry *res;
4519        unsigned long lock_flags = 0;
4520        ssize_t len = -ENXIO;
4521
4522        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4523        res = (struct ipr_resource_entry *)sdev->hostdata;
4524        if (res)
4525                len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4526        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4527        return len;
4528}
4529
4530static struct device_attribute ipr_adapter_handle_attr = {
4531        .attr = {
4532                .name =         "adapter_handle",
4533                .mode =         S_IRUSR,
4534        },
4535        .show = ipr_show_adapter_handle
4536};
4537
4538/**
4539 * ipr_show_resource_path - Show the resource path or the resource address for
4540 *                          this device.
4541 * @dev:        device struct
4542 * @attr:       device attribute structure
4543 * @buf:        buffer
4544 *
4545 * Return value:
4546 *      number of bytes printed to buffer
4547 **/
4548static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4549{
4550        struct scsi_device *sdev = to_scsi_device(dev);
4551        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4552        struct ipr_resource_entry *res;
4553        unsigned long lock_flags = 0;
4554        ssize_t len = -ENXIO;
4555        char buffer[IPR_MAX_RES_PATH_LENGTH];
4556
4557        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4558        res = (struct ipr_resource_entry *)sdev->hostdata;
4559        if (res && ioa_cfg->sis64)
4560                len = snprintf(buf, PAGE_SIZE, "%s\n",
4561                               __ipr_format_res_path(res->res_path, buffer,
4562                                                     sizeof(buffer)));
4563        else if (res)
4564                len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4565                               res->bus, res->target, res->lun);
4566
4567        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4568        return len;
4569}
4570
4571static struct device_attribute ipr_resource_path_attr = {
4572        .attr = {
4573                .name =         "resource_path",
4574                .mode =         S_IRUGO,
4575        },
4576        .show = ipr_show_resource_path
4577};
4578
4579/**
4580 * ipr_show_device_id - Show the device_id for this device.
4581 * @dev:        device struct
4582 * @attr:       device attribute structure
4583 * @buf:        buffer
4584 *
4585 * Return value:
4586 *      number of bytes printed to buffer
4587 **/
4588static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4589{
4590        struct scsi_device *sdev = to_scsi_device(dev);
4591        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4592        struct ipr_resource_entry *res;
4593        unsigned long lock_flags = 0;
4594        ssize_t len = -ENXIO;
4595
4596        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4597        res = (struct ipr_resource_entry *)sdev->hostdata;
4598        if (res && ioa_cfg->sis64)
4599                len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4600        else if (res)
4601                len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4602
4603        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4604        return len;
4605}
4606
4607static struct device_attribute ipr_device_id_attr = {
4608        .attr = {
4609                .name =         "device_id",
4610                .mode =         S_IRUGO,
4611        },
4612        .show = ipr_show_device_id
4613};
4614
4615/**
4616 * ipr_show_resource_type - Show the resource type for this device.
4617 * @dev:        device struct
4618 * @attr:       device attribute structure
4619 * @buf:        buffer
4620 *
4621 * Return value:
4622 *      number of bytes printed to buffer
4623 **/
4624static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4625{
4626        struct scsi_device *sdev = to_scsi_device(dev);
4627        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4628        struct ipr_resource_entry *res;
4629        unsigned long lock_flags = 0;
4630        ssize_t len = -ENXIO;
4631
4632        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4633        res = (struct ipr_resource_entry *)sdev->hostdata;
4634
4635        if (res)
4636                len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4637
4638        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4639        return len;
4640}
4641
4642static struct device_attribute ipr_resource_type_attr = {
4643        .attr = {
4644                .name =         "resource_type",
4645                .mode =         S_IRUGO,
4646        },
4647        .show = ipr_show_resource_type
4648};
4649
4650/**
4651 * ipr_show_raw_mode - Show the adapter's raw mode
4652 * @dev:        class device struct
4653 * @buf:        buffer
4654 *
4655 * Return value:
4656 *      number of bytes printed to buffer
4657 **/
4658static ssize_t ipr_show_raw_mode(struct device *dev,
4659                                 struct device_attribute *attr, char *buf)
4660{
4661        struct scsi_device *sdev = to_scsi_device(dev);
4662        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4663        struct ipr_resource_entry *res;
4664        unsigned long lock_flags = 0;
4665        ssize_t len;
4666
4667        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4668        res = (struct ipr_resource_entry *)sdev->hostdata;
4669        if (res)
4670                len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4671        else
4672                len = -ENXIO;
4673        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4674        return len;
4675}
4676
4677/**
4678 * ipr_store_raw_mode - Change the adapter's raw mode
4679 * @dev:        class device struct
4680 * @buf:        buffer
4681 *
4682 * Return value:
4683 *      number of bytes printed to buffer
4684 **/
4685static ssize_t ipr_store_raw_mode(struct device *dev,
4686                                  struct device_attribute *attr,
4687                                  const char *buf, size_t count)
4688{
4689        struct scsi_device *sdev = to_scsi_device(dev);
4690        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4691        struct ipr_resource_entry *res;
4692        unsigned long lock_flags = 0;
4693        ssize_t len;
4694
4695        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4696        res = (struct ipr_resource_entry *)sdev->hostdata;
4697        if (res) {
4698                if (ipr_is_af_dasd_device(res)) {
4699                        res->raw_mode = simple_strtoul(buf, NULL, 10);
4700                        len = strlen(buf);
4701                        if (res->sdev)
4702                                sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4703                                        res->raw_mode ? "enabled" : "disabled");
4704                } else
4705                        len = -EINVAL;
4706        } else
4707                len = -ENXIO;
4708        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4709        return len;
4710}
4711
4712static struct device_attribute ipr_raw_mode_attr = {
4713        .attr = {
4714                .name =         "raw_mode",
4715                .mode =         S_IRUGO | S_IWUSR,
4716        },
4717        .show = ipr_show_raw_mode,
4718        .store = ipr_store_raw_mode
4719};
4720
4721static struct device_attribute *ipr_dev_attrs[] = {
4722        &ipr_adapter_handle_attr,
4723        &ipr_resource_path_attr,
4724        &ipr_device_id_attr,
4725        &ipr_resource_type_attr,
4726        &ipr_raw_mode_attr,
4727        NULL,
4728};
4729
4730/**
4731 * ipr_biosparam - Return the HSC mapping
4732 * @sdev:                       scsi device struct
4733 * @block_device:       block device pointer
4734 * @capacity:           capacity of the device
4735 * @parm:                       Array containing returned HSC values.
4736 *
4737 * This function generates the HSC parms that fdisk uses.
4738 * We want to make sure we return something that places partitions
4739 * on 4k boundaries for best performance with the IOA.
4740 *
4741 * Return value:
4742 *      0 on success
4743 **/
4744static int ipr_biosparam(struct scsi_device *sdev,
4745                         struct block_device *block_device,
4746                         sector_t capacity, int *parm)
4747{
4748        int heads, sectors;
4749        sector_t cylinders;
4750
4751        heads = 128;
4752        sectors = 32;
4753
4754        cylinders = capacity;
4755        sector_div(cylinders, (128 * 32));
4756
4757        /* return result */
4758        parm[0] = heads;
4759        parm[1] = sectors;
4760        parm[2] = cylinders;
4761
4762        return 0;
4763}
4764
4765/**
4766 * ipr_find_starget - Find target based on bus/target.
4767 * @starget:    scsi target struct
4768 *
4769 * Return value:
4770 *      resource entry pointer if found / NULL if not found
4771 **/
4772static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4773{
4774        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4775        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4776        struct ipr_resource_entry *res;
4777
4778        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4779                if ((res->bus == starget->channel) &&
4780                    (res->target == starget->id)) {
4781                        return res;
4782                }
4783        }
4784
4785        return NULL;
4786}
4787
4788static struct ata_port_info sata_port_info;
4789
4790/**
4791 * ipr_target_alloc - Prepare for commands to a SCSI target
4792 * @starget:    scsi target struct
4793 *
4794 * If the device is a SATA device, this function allocates an
4795 * ATA port with libata, else it does nothing.
4796 *
4797 * Return value:
4798 *      0 on success / non-0 on failure
4799 **/
4800static int ipr_target_alloc(struct scsi_target *starget)
4801{
4802        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4803        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4804        struct ipr_sata_port *sata_port;
4805        struct ata_port *ap;
4806        struct ipr_resource_entry *res;
4807        unsigned long lock_flags;
4808
4809        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4810        res = ipr_find_starget(starget);
4811        starget->hostdata = NULL;
4812
4813        if (res && ipr_is_gata(res)) {
4814                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4815                sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4816                if (!sata_port)
4817                        return -ENOMEM;
4818
4819                ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4820                if (ap) {
4821                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4822                        sata_port->ioa_cfg = ioa_cfg;
4823                        sata_port->ap = ap;
4824                        sata_port->res = res;
4825
4826                        res->sata_port = sata_port;
4827                        ap->private_data = sata_port;
4828                        starget->hostdata = sata_port;
4829                } else {
4830                        kfree(sata_port);
4831                        return -ENOMEM;
4832                }
4833        }
4834        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4835
4836        return 0;
4837}
4838
4839/**
4840 * ipr_target_destroy - Destroy a SCSI target
4841 * @starget:    scsi target struct
4842 *
4843 * If the device was a SATA device, this function frees the libata
4844 * ATA port, else it does nothing.
4845 *
4846 **/
4847static void ipr_target_destroy(struct scsi_target *starget)
4848{
4849        struct ipr_sata_port *sata_port = starget->hostdata;
4850        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4851        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4852
4853        if (ioa_cfg->sis64) {
4854                if (!ipr_find_starget(starget)) {
4855                        if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4856                                clear_bit(starget->id, ioa_cfg->array_ids);
4857                        else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4858                                clear_bit(starget->id, ioa_cfg->vset_ids);
4859                        else if (starget->channel == 0)
4860                                clear_bit(starget->id, ioa_cfg->target_ids);
4861                }
4862        }
4863
4864        if (sata_port) {
4865                starget->hostdata = NULL;
4866                ata_sas_port_destroy(sata_port->ap);
4867                kfree(sata_port);
4868        }
4869}
4870
4871/**
4872 * ipr_find_sdev - Find device based on bus/target/lun.
4873 * @sdev:       scsi device struct
4874 *
4875 * Return value:
4876 *      resource entry pointer if found / NULL if not found
4877 **/
4878static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4879{
4880        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4881        struct ipr_resource_entry *res;
4882
4883        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4884                if ((res->bus == sdev->channel) &&
4885                    (res->target == sdev->id) &&
4886                    (res->lun == sdev->lun))
4887                        return res;
4888        }
4889
4890        return NULL;
4891}
4892
4893/**
4894 * ipr_slave_destroy - Unconfigure a SCSI device
4895 * @sdev:       scsi device struct
4896 *
4897 * Return value:
4898 *      nothing
4899 **/
4900static void ipr_slave_destroy(struct scsi_device *sdev)
4901{
4902        struct ipr_resource_entry *res;
4903        struct ipr_ioa_cfg *ioa_cfg;
4904        unsigned long lock_flags = 0;
4905
4906        ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4907
4908        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4909        res = (struct ipr_resource_entry *) sdev->hostdata;
4910        if (res) {
4911                if (res->sata_port)
4912                        res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4913                sdev->hostdata = NULL;
4914                res->sdev = NULL;
4915                res->sata_port = NULL;
4916        }
4917        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918}
4919
4920/**
4921 * ipr_slave_configure - Configure a SCSI device
4922 * @sdev:       scsi device struct
4923 *
4924 * This function configures the specified scsi device.
4925 *
4926 * Return value:
4927 *      0 on success
4928 **/
4929static int ipr_slave_configure(struct scsi_device *sdev)
4930{
4931        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4932        struct ipr_resource_entry *res;
4933        struct ata_port *ap = NULL;
4934        unsigned long lock_flags = 0;
4935        char buffer[IPR_MAX_RES_PATH_LENGTH];
4936
4937        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4938        res = sdev->hostdata;
4939        if (res) {
4940                if (ipr_is_af_dasd_device(res))
4941                        sdev->type = TYPE_RAID;
4942                if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4943                        sdev->scsi_level = 4;
4944                        sdev->no_uld_attach = 1;
4945                }
4946                if (ipr_is_vset_device(res)) {
4947                        sdev->scsi_level = SCSI_SPC_3;
4948                        sdev->no_report_opcodes = 1;
4949                        blk_queue_rq_timeout(sdev->request_queue,
4950                                             IPR_VSET_RW_TIMEOUT);
4951                        blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4952                }
4953                if (ipr_is_gata(res) && res->sata_port)
4954                        ap = res->sata_port->ap;
4955                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4956
4957                if (ap) {
4958                        scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4959                        ata_sas_slave_configure(sdev, ap);
4960                }
4961
4962                if (ioa_cfg->sis64)
4963                        sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4964                                    ipr_format_res_path(ioa_cfg,
4965                                res->res_path, buffer, sizeof(buffer)));
4966                return 0;
4967        }
4968        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4969        return 0;
4970}
4971
4972/**
4973 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4974 * @sdev:       scsi device struct
4975 *
4976 * This function initializes an ATA port so that future commands
4977 * sent through queuecommand will work.
4978 *
4979 * Return value:
4980 *      0 on success
4981 **/
4982static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4983{
4984        struct ipr_sata_port *sata_port = NULL;
4985        int rc = -ENXIO;
4986
4987        ENTER;
4988        if (sdev->sdev_target)
4989                sata_port = sdev->sdev_target->hostdata;
4990        if (sata_port) {
4991                rc = ata_sas_port_init(sata_port->ap);
4992                if (rc == 0)
4993                        rc = ata_sas_sync_probe(sata_port->ap);
4994        }
4995
4996        if (rc)
4997                ipr_slave_destroy(sdev);
4998
4999        LEAVE;
5000        return rc;
5001}
5002
5003/**
5004 * ipr_slave_alloc - Prepare for commands to a device.
5005 * @sdev:       scsi device struct
5006 *
5007 * This function saves a pointer to the resource entry
5008 * in the scsi device struct if the device exists. We
5009 * can then use this pointer in ipr_queuecommand when
5010 * handling new commands.
5011 *
5012 * Return value:
5013 *      0 on success / -ENXIO if device does not exist
5014 **/
5015static int ipr_slave_alloc(struct scsi_device *sdev)
5016{
5017        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5018        struct ipr_resource_entry *res;
5019        unsigned long lock_flags;
5020        int rc = -ENXIO;
5021
5022        sdev->hostdata = NULL;
5023
5024        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5025
5026        res = ipr_find_sdev(sdev);
5027        if (res) {
5028                res->sdev = sdev;
5029                res->add_to_ml = 0;
5030                res->in_erp = 0;
5031                sdev->hostdata = res;
5032                if (!ipr_is_naca_model(res))
5033                        res->needs_sync_complete = 1;
5034                rc = 0;
5035                if (ipr_is_gata(res)) {
5036                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5037                        return ipr_ata_slave_alloc(sdev);
5038                }
5039        }
5040
5041        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5042
5043        return rc;
5044}
5045
5046/**
5047 * ipr_match_lun - Match function for specified LUN
5048 * @ipr_cmd:    ipr command struct
5049 * @device:             device to match (sdev)
5050 *
5051 * Returns:
5052 *      1 if command matches sdev / 0 if command does not match sdev
5053 **/
5054static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5055{
5056        if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5057                return 1;
5058        return 0;
5059}
5060
5061/**
5062 * ipr_cmnd_is_free - Check if a command is free or not
5063 * @ipr_cmd     ipr command struct
5064 *
5065 * Returns:
5066 *      true / false
5067 **/
5068static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5069{
5070        struct ipr_cmnd *loop_cmd;
5071
5072        list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5073                if (loop_cmd == ipr_cmd)
5074                        return true;
5075        }
5076
5077        return false;
5078}
5079
5080/**
5081 * ipr_match_res - Match function for specified resource entry
5082 * @ipr_cmd:    ipr command struct
5083 * @resource:   resource entry to match
5084 *
5085 * Returns:
5086 *      1 if command matches sdev / 0 if command does not match sdev
5087 **/
5088static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5089{
5090        struct ipr_resource_entry *res = resource;
5091
5092        if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5093                return 1;
5094        return 0;
5095}
5096
5097/**
5098 * ipr_wait_for_ops - Wait for matching commands to complete
5099 * @ipr_cmd:    ipr command struct
5100 * @device:             device to match (sdev)
5101 * @match:              match function to use
5102 *
5103 * Returns:
5104 *      SUCCESS / FAILED
5105 **/
5106static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5107                            int (*match)(struct ipr_cmnd *, void *))
5108{
5109        struct ipr_cmnd *ipr_cmd;
5110        int wait, i;
5111        unsigned long flags;
5112        struct ipr_hrr_queue *hrrq;
5113        signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5114        DECLARE_COMPLETION_ONSTACK(comp);
5115
5116        ENTER;
5117        do {
5118                wait = 0;
5119
5120                for_each_hrrq(hrrq, ioa_cfg) {
5121                        spin_lock_irqsave(hrrq->lock, flags);
5122                        for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5123                                ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5124                                if (!ipr_cmnd_is_free(ipr_cmd)) {
5125                                        if (match(ipr_cmd, device)) {
5126                                                ipr_cmd->eh_comp = &comp;
5127                                                wait++;
5128                                        }
5129                                }
5130                        }
5131                        spin_unlock_irqrestore(hrrq->lock, flags);
5132                }
5133
5134                if (wait) {
5135                        timeout = wait_for_completion_timeout(&comp, timeout);
5136
5137                        if (!timeout) {
5138                                wait = 0;
5139
5140                                for_each_hrrq(hrrq, ioa_cfg) {
5141                                        spin_lock_irqsave(hrrq->lock, flags);
5142                                        for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5143                                                ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5144                                                if (!ipr_cmnd_is_free(ipr_cmd)) {
5145                                                        if (match(ipr_cmd, device)) {
5146                                                                ipr_cmd->eh_comp = NULL;
5147                                                                wait++;
5148                                                        }
5149                                                }
5150                                        }
5151                                        spin_unlock_irqrestore(hrrq->lock, flags);
5152                                }
5153
5154                                if (wait)
5155                                        dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5156                                LEAVE;
5157                                return wait ? FAILED : SUCCESS;
5158                        }
5159                }
5160        } while (wait);
5161
5162        LEAVE;
5163        return SUCCESS;
5164}
5165
5166static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5167{
5168        struct ipr_ioa_cfg *ioa_cfg;
5169        unsigned long lock_flags = 0;
5170        int rc = SUCCESS;
5171
5172        ENTER;
5173        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5174        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5175
5176        if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5177                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5178                dev_err(&ioa_cfg->pdev->dev,
5179                        "Adapter being reset as a result of error recovery.\n");
5180
5181                if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5182                        ioa_cfg->sdt_state = GET_DUMP;
5183        }
5184
5185        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5186        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5187        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5188
5189        /* If we got hit with a host reset while we were already resetting
5190         the adapter for some reason, and the reset failed. */
5191        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5192                ipr_trace;
5193                rc = FAILED;
5194        }
5195
5196        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5197        LEAVE;
5198        return rc;
5199}
5200
5201/**
5202 * ipr_device_reset - Reset the device
5203 * @ioa_cfg:    ioa config struct
5204 * @res:                resource entry struct
5205 *
5206 * This function issues a device reset to the affected device.
5207 * If the device is a SCSI device, a LUN reset will be sent
5208 * to the device first. If that does not work, a target reset
5209 * will be sent. If the device is a SATA device, a PHY reset will
5210 * be sent.
5211 *
5212 * Return value:
5213 *      0 on success / non-zero on failure
5214 **/
5215static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5216                            struct ipr_resource_entry *res)
5217{
5218        struct ipr_cmnd *ipr_cmd;
5219        struct ipr_ioarcb *ioarcb;
5220        struct ipr_cmd_pkt *cmd_pkt;
5221        struct ipr_ioarcb_ata_regs *regs;
5222        u32 ioasc;
5223
5224        ENTER;
5225        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5226        ioarcb = &ipr_cmd->ioarcb;
5227        cmd_pkt = &ioarcb->cmd_pkt;
5228
5229        if (ipr_cmd->ioa_cfg->sis64) {
5230                regs = &ipr_cmd->i.ata_ioadl.regs;
5231                ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5232        } else
5233                regs = &ioarcb->u.add_data.u.regs;
5234
5235        ioarcb->res_handle = res->res_handle;
5236        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5237        cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5238        if (ipr_is_gata(res)) {
5239                cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5240                ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5241                regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5242        }
5243
5244        ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5245        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5246        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5247        if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5248                if (ipr_cmd->ioa_cfg->sis64)
5249                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5250                               sizeof(struct ipr_ioasa_gata));
5251                else
5252                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5253                               sizeof(struct ipr_ioasa_gata));
5254        }
5255
5256        LEAVE;
5257        return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5258}
5259
5260/**
5261 * ipr_sata_reset - Reset the SATA port
5262 * @link:       SATA link to reset
5263 * @classes:    class of the attached device
5264 *
5265 * This function issues a SATA phy reset to the affected ATA link.
5266 *
5267 * Return value:
5268 *      0 on success / non-zero on failure
5269 **/
5270static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5271                                unsigned long deadline)
5272{
5273        struct ipr_sata_port *sata_port = link->ap->private_data;
5274        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5275        struct ipr_resource_entry *res;
5276        unsigned long lock_flags = 0;
5277        int rc = -ENXIO, ret;
5278
5279        ENTER;
5280        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5281        while (ioa_cfg->in_reset_reload) {
5282                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5283                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5284                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5285        }
5286
5287        res = sata_port->res;
5288        if (res) {
5289                rc = ipr_device_reset(ioa_cfg, res);
5290                *classes = res->ata_class;
5291                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5292
5293                ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5294                if (ret != SUCCESS) {
5295                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5296                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5297                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5298
5299                        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5300                }
5301        } else
5302                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5303
5304        LEAVE;
5305        return rc;
5306}
5307
5308/**
5309 * ipr_eh_dev_reset - Reset the device
5310 * @scsi_cmd:   scsi command struct
5311 *
5312 * This function issues a device reset to the affected device.
5313 * A LUN reset will be sent to the device first. If that does
5314 * not work, a target reset will be sent.
5315 *
5316 * Return value:
5317 *      SUCCESS / FAILED
5318 **/
5319static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5320{
5321        struct ipr_cmnd *ipr_cmd;
5322        struct ipr_ioa_cfg *ioa_cfg;
5323        struct ipr_resource_entry *res;
5324        struct ata_port *ap;
5325        int rc = 0, i;
5326        struct ipr_hrr_queue *hrrq;
5327
5328        ENTER;
5329        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5330        res = scsi_cmd->device->hostdata;
5331
5332        /*
5333         * If we are currently going through reset/reload, return failed. This will force the
5334         * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5335         * reset to complete
5336         */
5337        if (ioa_cfg->in_reset_reload)
5338                return FAILED;
5339        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5340                return FAILED;
5341
5342        for_each_hrrq(hrrq, ioa_cfg) {
5343                spin_lock(&hrrq->_lock);
5344                for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5345                        ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5346
5347                        if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5348                                if (!ipr_cmd->qc)
5349                                        continue;
5350                                if (ipr_cmnd_is_free(ipr_cmd))
5351                                        continue;
5352
5353                                ipr_cmd->done = ipr_sata_eh_done;
5354                                if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5355                                        ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5356                                        ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5357                                }
5358                        }
5359                }
5360                spin_unlock(&hrrq->_lock);
5361        }
5362        res->resetting_device = 1;
5363        scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5364
5365        if (ipr_is_gata(res) && res->sata_port) {
5366                ap = res->sata_port->ap;
5367                spin_unlock_irq(scsi_cmd->device->host->host_lock);
5368                ata_std_error_handler(ap);
5369                spin_lock_irq(scsi_cmd->device->host->host_lock);
5370        } else
5371                rc = ipr_device_reset(ioa_cfg, res);
5372        res->resetting_device = 0;
5373        res->reset_occurred = 1;
5374
5375        LEAVE;
5376        return rc ? FAILED : SUCCESS;
5377}
5378
5379static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5380{
5381        int rc;
5382        struct ipr_ioa_cfg *ioa_cfg;
5383        struct ipr_resource_entry *res;
5384
5385        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5386        res = cmd->device->hostdata;
5387
5388        if (!res)
5389                return FAILED;
5390
5391        spin_lock_irq(cmd->device->host->host_lock);
5392        rc = __ipr_eh_dev_reset(cmd);
5393        spin_unlock_irq(cmd->device->host->host_lock);
5394
5395        if (rc == SUCCESS) {
5396                if (ipr_is_gata(res) && res->sata_port)
5397                        rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5398                else
5399                        rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5400        }
5401
5402        return rc;
5403}
5404
5405/**
5406 * ipr_bus_reset_done - Op done function for bus reset.
5407 * @ipr_cmd:    ipr command struct
5408 *
5409 * This function is the op done function for a bus reset
5410 *
5411 * Return value:
5412 *      none
5413 **/
5414static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5415{
5416        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5417        struct ipr_resource_entry *res;
5418
5419        ENTER;
5420        if (!ioa_cfg->sis64)
5421                list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5422                        if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5423                                scsi_report_bus_reset(ioa_cfg->host, res->bus);
5424                                break;
5425                        }
5426                }
5427
5428        /*
5429         * If abort has not completed, indicate the reset has, else call the
5430         * abort's done function to wake the sleeping eh thread
5431         */
5432        if (ipr_cmd->sibling->sibling)
5433                ipr_cmd->sibling->sibling = NULL;
5434        else
5435                ipr_cmd->sibling->done(ipr_cmd->sibling);
5436
5437        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5438        LEAVE;
5439}
5440
5441/**
5442 * ipr_abort_timeout - An abort task has timed out
5443 * @ipr_cmd:    ipr command struct
5444 *
5445 * This function handles when an abort task times out. If this
5446 * happens we issue a bus reset since we have resources tied
5447 * up that must be freed before returning to the midlayer.
5448 *
5449 * Return value:
5450 *      none
5451 **/
5452static void ipr_abort_timeout(struct timer_list *t)
5453{
5454        struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5455        struct ipr_cmnd *reset_cmd;
5456        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5457        struct ipr_cmd_pkt *cmd_pkt;
5458        unsigned long lock_flags = 0;
5459
5460        ENTER;
5461        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5462        if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5463                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5464                return;
5465        }
5466
5467        sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5468        reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5469        ipr_cmd->sibling = reset_cmd;
5470        reset_cmd->sibling = ipr_cmd;
5471        reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5472        cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5473        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5474        cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5475        cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5476
5477        ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5478        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5479        LEAVE;
5480}
5481
5482/**
5483 * ipr_cancel_op - Cancel specified op
5484 * @scsi_cmd:   scsi command struct
5485 *
5486 * This function cancels specified op.
5487 *
5488 * Return value:
5489 *      SUCCESS / FAILED
5490 **/
5491static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5492{
5493        struct ipr_cmnd *ipr_cmd;
5494        struct ipr_ioa_cfg *ioa_cfg;
5495        struct ipr_resource_entry *res;
5496        struct ipr_cmd_pkt *cmd_pkt;
5497        u32 ioasc, int_reg;
5498        int i, op_found = 0;
5499        struct ipr_hrr_queue *hrrq;
5500
5501        ENTER;
5502        ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5503        res = scsi_cmd->device->hostdata;
5504
5505        /* If we are currently going through reset/reload, return failed.
5506         * This will force the mid-layer to call ipr_eh_host_reset,
5507         * which will then go to sleep and wait for the reset to complete
5508         */
5509        if (ioa_cfg->in_reset_reload ||
5510            ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5511                return FAILED;
5512        if (!res)
5513                return FAILED;
5514
5515        /*
5516         * If we are aborting a timed out op, chances are that the timeout was caused
5517         * by a still not detected EEH error. In such cases, reading a register will
5518         * trigger the EEH recovery infrastructure.
5519         */
5520        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5521
5522        if (!ipr_is_gscsi(res))
5523                return FAILED;
5524
5525        for_each_hrrq(hrrq, ioa_cfg) {
5526                spin_lock(&hrrq->_lock);
5527                for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5528                        if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5529                                if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5530                                        op_found = 1;
5531                                        break;
5532                                }
5533                        }
5534                }
5535                spin_unlock(&hrrq->_lock);
5536        }
5537
5538        if (!op_found)
5539                return SUCCESS;
5540
5541        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5542        ipr_cmd->ioarcb.res_handle = res->res_handle;
5543        cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5544        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5545        cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5546        ipr_cmd->u.sdev = scsi_cmd->device;
5547
5548        scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5549                    scsi_cmd->cmnd[0]);
5550        ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5551        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5552
5553        /*
5554         * If the abort task timed out and we sent a bus reset, we will get
5555         * one the following responses to the abort
5556         */
5557        if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5558                ioasc = 0;
5559                ipr_trace;
5560        }
5561
5562        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5563        if (!ipr_is_naca_model(res))
5564                res->needs_sync_complete = 1;
5565
5566        LEAVE;
5567        return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5568}
5569
5570/**
5571 * ipr_eh_abort - Abort a single op
5572 * @scsi_cmd:   scsi command struct
5573 *
5574 * Return value:
5575 *      0 if scan in progress / 1 if scan is complete
5576 **/
5577static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5578{
5579        unsigned long lock_flags;
5580        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5581        int rc = 0;
5582
5583        spin_lock_irqsave(shost->host_lock, lock_flags);
5584        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5585                rc = 1;
5586        if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5587                rc = 1;
5588        spin_unlock_irqrestore(shost->host_lock, lock_flags);
5589        return rc;
5590}
5591
5592/**
5593 * ipr_eh_host_reset - Reset the host adapter
5594 * @scsi_cmd:   scsi command struct
5595 *
5596 * Return value:
5597 *      SUCCESS / FAILED
5598 **/
5599static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5600{
5601        unsigned long flags;
5602        int rc;
5603        struct ipr_ioa_cfg *ioa_cfg;
5604
5605        ENTER;
5606
5607        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5608
5609        spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5610        rc = ipr_cancel_op(scsi_cmd);
5611        spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5612
5613        if (rc == SUCCESS)
5614                rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5615        LEAVE;
5616        return rc;
5617}
5618
5619/**
5620 * ipr_handle_other_interrupt - Handle "other" interrupts
5621 * @ioa_cfg:    ioa config struct
5622 * @int_reg:    interrupt register
5623 *
5624 * Return value:
5625 *      IRQ_NONE / IRQ_HANDLED
5626 **/
5627static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5628                                              u32 int_reg)
5629{
5630        irqreturn_t rc = IRQ_HANDLED;
5631        u32 int_mask_reg;
5632
5633        int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5634        int_reg &= ~int_mask_reg;
5635
5636        /* If an interrupt on the adapter did not occur, ignore it.
5637         * Or in the case of SIS 64, check for a stage change interrupt.
5638         */
5639        if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5640                if (ioa_cfg->sis64) {
5641                        int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5642                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5643                        if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5644
5645                                /* clear stage change */
5646                                writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5647                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5648                                list_del(&ioa_cfg->reset_cmd->queue);
5649                                del_timer(&ioa_cfg->reset_cmd->timer);
5650                                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5651                                return IRQ_HANDLED;
5652                        }
5653                }
5654
5655                return IRQ_NONE;
5656        }
5657
5658        if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5659                /* Mask the interrupt */
5660                writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5661                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5662
5663                list_del(&ioa_cfg->reset_cmd->queue);
5664                del_timer(&ioa_cfg->reset_cmd->timer);
5665                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5666        } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5667                if (ioa_cfg->clear_isr) {
5668                        if (ipr_debug && printk_ratelimit())
5669                                dev_err(&ioa_cfg->pdev->dev,
5670                                        "Spurious interrupt detected. 0x%08X\n", int_reg);
5671                        writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5672                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5673                        return IRQ_NONE;
5674                }
5675        } else {
5676                if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5677                        ioa_cfg->ioa_unit_checked = 1;
5678                else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5679                        dev_err(&ioa_cfg->pdev->dev,
5680                                "No Host RRQ. 0x%08X\n", int_reg);
5681                else
5682                        dev_err(&ioa_cfg->pdev->dev,
5683                                "Permanent IOA failure. 0x%08X\n", int_reg);
5684
5685                if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5686                        ioa_cfg->sdt_state = GET_DUMP;
5687
5688                ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5689                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5690        }
5691
5692        return rc;
5693}
5694
5695/**
5696 * ipr_isr_eh - Interrupt service routine error handler
5697 * @ioa_cfg:    ioa config struct
5698 * @msg:        message to log
5699 *
5700 * Return value:
5701 *      none
5702 **/
5703static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5704{
5705        ioa_cfg->errors_logged++;
5706        dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5707
5708        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5709                ioa_cfg->sdt_state = GET_DUMP;
5710
5711        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5712}
5713
5714static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5715                                                struct list_head *doneq)
5716{
5717        u32 ioasc;
5718        u16 cmd_index;
5719        struct ipr_cmnd *ipr_cmd;
5720        struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5721        int num_hrrq = 0;
5722
5723        /* If interrupts are disabled, ignore the interrupt */
5724        if (!hrr_queue->allow_interrupts)
5725                return 0;
5726
5727        while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5728               hrr_queue->toggle_bit) {
5729
5730                cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5731                             IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5732                             IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5733
5734                if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5735                             cmd_index < hrr_queue->min_cmd_id)) {
5736                        ipr_isr_eh(ioa_cfg,
5737                                "Invalid response handle from IOA: ",
5738                                cmd_index);
5739                        break;
5740                }
5741
5742                ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5743                ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5744
5745                ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5746
5747                list_move_tail(&ipr_cmd->queue, doneq);
5748
5749                if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5750                        hrr_queue->hrrq_curr++;
5751                } else {
5752                        hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5753                        hrr_queue->toggle_bit ^= 1u;
5754                }
5755                num_hrrq++;
5756                if (budget > 0 && num_hrrq >= budget)
5757                        break;
5758        }
5759
5760        return num_hrrq;
5761}
5762
5763static int ipr_iopoll(struct irq_poll *iop, int budget)
5764{
5765        struct ipr_ioa_cfg *ioa_cfg;
5766        struct ipr_hrr_queue *hrrq;
5767        struct ipr_cmnd *ipr_cmd, *temp;
5768        unsigned long hrrq_flags;
5769        int completed_ops;
5770        LIST_HEAD(doneq);
5771
5772        hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5773        ioa_cfg = hrrq->ioa_cfg;
5774
5775        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5776        completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5777
5778        if (completed_ops < budget)
5779                irq_poll_complete(iop);
5780        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5781
5782        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5783                list_del(&ipr_cmd->queue);
5784                del_timer(&ipr_cmd->timer);
5785                ipr_cmd->fast_done(ipr_cmd);
5786        }
5787
5788        return completed_ops;
5789}
5790
5791/**
5792 * ipr_isr - Interrupt service routine
5793 * @irq:        irq number
5794 * @devp:       pointer to ioa config struct
5795 *
5796 * Return value:
5797 *      IRQ_NONE / IRQ_HANDLED
5798 **/
5799static irqreturn_t ipr_isr(int irq, void *devp)
5800{
5801        struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5802        struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5803        unsigned long hrrq_flags = 0;
5804        u32 int_reg = 0;
5805        int num_hrrq = 0;
5806        int irq_none = 0;
5807        struct ipr_cmnd *ipr_cmd, *temp;
5808        irqreturn_t rc = IRQ_NONE;
5809        LIST_HEAD(doneq);
5810
5811        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5812        /* If interrupts are disabled, ignore the interrupt */
5813        if (!hrrq->allow_interrupts) {
5814                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5815                return IRQ_NONE;
5816        }
5817
5818        while (1) {
5819                if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5820                        rc =  IRQ_HANDLED;
5821
5822                        if (!ioa_cfg->clear_isr)
5823                                break;
5824
5825                        /* Clear the PCI interrupt */
5826                        num_hrrq = 0;
5827                        do {
5828                                writel(IPR_PCII_HRRQ_UPDATED,
5829                                     ioa_cfg->regs.clr_interrupt_reg32);
5830                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5831                        } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5832                                num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5833
5834                } else if (rc == IRQ_NONE && irq_none == 0) {
5835                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5836                        irq_none++;
5837                } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5838                           int_reg & IPR_PCII_HRRQ_UPDATED) {
5839                        ipr_isr_eh(ioa_cfg,
5840                                "Error clearing HRRQ: ", num_hrrq);
5841                        rc = IRQ_HANDLED;
5842                        break;
5843                } else
5844                        break;
5845        }
5846
5847        if (unlikely(rc == IRQ_NONE))
5848                rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5849
5850        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5851        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5852                list_del(&ipr_cmd->queue);
5853                del_timer(&ipr_cmd->timer);
5854                ipr_cmd->fast_done(ipr_cmd);
5855        }
5856        return rc;
5857}
5858
5859/**
5860 * ipr_isr_mhrrq - Interrupt service routine
5861 * @irq:        irq number
5862 * @devp:       pointer to ioa config struct
5863 *
5864 * Return value:
5865 *      IRQ_NONE / IRQ_HANDLED
5866 **/
5867static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5868{
5869        struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5870        struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5871        unsigned long hrrq_flags = 0;
5872        struct ipr_cmnd *ipr_cmd, *temp;
5873        irqreturn_t rc = IRQ_NONE;
5874        LIST_HEAD(doneq);
5875
5876        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5877
5878        /* If interrupts are disabled, ignore the interrupt */
5879        if (!hrrq->allow_interrupts) {
5880                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5881                return IRQ_NONE;
5882        }
5883
5884        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5885                if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5886                       hrrq->toggle_bit) {
5887                        irq_poll_sched(&hrrq->iopoll);
5888                        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5889                        return IRQ_HANDLED;
5890                }
5891        } else {
5892                if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5893                        hrrq->toggle_bit)
5894
5895                        if (ipr_process_hrrq(hrrq, -1, &doneq))
5896                                rc =  IRQ_HANDLED;
5897        }
5898
5899        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5900
5901        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5902                list_del(&ipr_cmd->queue);
5903                del_timer(&ipr_cmd->timer);
5904                ipr_cmd->fast_done(ipr_cmd);
5905        }
5906        return rc;
5907}
5908
5909/**
5910 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5911 * @ioa_cfg:    ioa config struct
5912 * @ipr_cmd:    ipr command struct
5913 *
5914 * Return value:
5915 *      0 on success / -1 on failure
5916 **/
5917static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5918                             struct ipr_cmnd *ipr_cmd)
5919{
5920        int i, nseg;
5921        struct scatterlist *sg;
5922        u32 length;
5923        u32 ioadl_flags = 0;
5924        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5925        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5926        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5927
5928        length = scsi_bufflen(scsi_cmd);
5929        if (!length)
5930                return 0;
5931
5932        nseg = scsi_dma_map(scsi_cmd);
5933        if (nseg < 0) {
5934                if (printk_ratelimit())
5935                        dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5936                return -1;
5937        }
5938
5939        ipr_cmd->dma_use_sg = nseg;
5940
5941        ioarcb->data_transfer_length = cpu_to_be32(length);
5942        ioarcb->ioadl_len =
5943                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5944
5945        if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5946                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5947                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5948        } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5949                ioadl_flags = IPR_IOADL_FLAGS_READ;
5950
5951        scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5952                ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5953                ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5954                ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5955        }
5956
5957        ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5958        return 0;
5959}
5960
5961/**
5962 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5963 * @ioa_cfg:    ioa config struct
5964 * @ipr_cmd:    ipr command struct
5965 *
5966 * Return value:
5967 *      0 on success / -1 on failure
5968 **/
5969static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5970                           struct ipr_cmnd *ipr_cmd)
5971{
5972        int i, nseg;
5973        struct scatterlist *sg;
5974        u32 length;
5975        u32 ioadl_flags = 0;
5976        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5977        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5978        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5979
5980        length = scsi_bufflen(scsi_cmd);
5981        if (!length)
5982                return 0;
5983
5984        nseg = scsi_dma_map(scsi_cmd);
5985        if (nseg < 0) {
5986                dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5987                return -1;
5988        }
5989
5990        ipr_cmd->dma_use_sg = nseg;
5991
5992        if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5993                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5994                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5995                ioarcb->data_transfer_length = cpu_to_be32(length);
5996                ioarcb->ioadl_len =
5997                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5998        } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5999                ioadl_flags = IPR_IOADL_FLAGS_READ;
6000                ioarcb->read_data_transfer_length = cpu_to_be32(length);
6001                ioarcb->read_ioadl_len =
6002                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6003        }
6004
6005        if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6006                ioadl = ioarcb->u.add_data.u.ioadl;
6007                ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6008                                    offsetof(struct ipr_ioarcb, u.add_data));
6009                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6010        }
6011
6012        scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6013                ioadl[i].flags_and_data_len =
6014                        cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6015                ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6016        }
6017
6018        ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6019        return 0;
6020}
6021
6022/**
6023 * __ipr_erp_done - Process completion of ERP for a device
6024 * @ipr_cmd:            ipr command struct
6025 *
6026 * This function copies the sense buffer into the scsi_cmd
6027 * struct and pushes the scsi_done function.
6028 *
6029 * Return value:
6030 *      nothing
6031 **/
6032static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6033{
6034        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6035        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6036        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6037
6038        if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6039                scsi_cmd->result |= (DID_ERROR << 16);
6040                scmd_printk(KERN_ERR, scsi_cmd,
6041                            "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6042        } else {
6043                memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6044                       SCSI_SENSE_BUFFERSIZE);
6045        }
6046
6047        if (res) {
6048                if (!ipr_is_naca_model(res))
6049                        res->needs_sync_complete = 1;
6050                res->in_erp = 0;
6051        }
6052        scsi_dma_unmap(ipr_cmd->scsi_cmd);
6053        scsi_cmd->scsi_done(scsi_cmd);
6054        if (ipr_cmd->eh_comp)
6055                complete(ipr_cmd->eh_comp);
6056        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6057}
6058
6059/**
6060 * ipr_erp_done - Process completion of ERP for a device
6061 * @ipr_cmd:            ipr command struct
6062 *
6063 * This function copies the sense buffer into the scsi_cmd
6064 * struct and pushes the scsi_done function.
6065 *
6066 * Return value:
6067 *      nothing
6068 **/
6069static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6070{
6071        struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6072        unsigned long hrrq_flags;
6073
6074        spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6075        __ipr_erp_done(ipr_cmd);
6076        spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6077}
6078
6079/**
6080 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6081 * @ipr_cmd:    ipr command struct
6082 *
6083 * Return value:
6084 *      none
6085 **/
6086static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6087{
6088        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6089        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6090        dma_addr_t dma_addr = ipr_cmd->dma_addr;
6091
6092        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6093        ioarcb->data_transfer_length = 0;
6094        ioarcb->read_data_transfer_length = 0;
6095        ioarcb->ioadl_len = 0;
6096        ioarcb->read_ioadl_len = 0;
6097        ioasa->hdr.ioasc = 0;
6098        ioasa->hdr.residual_data_len = 0;
6099
6100        if (ipr_cmd->ioa_cfg->sis64)
6101                ioarcb->u.sis64_addr_data.data_ioadl_addr =
6102                        cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6103        else {
6104                ioarcb->write_ioadl_addr =
6105                        cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6106                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6107        }
6108}
6109
6110/**
6111 * __ipr_erp_request_sense - Send request sense to a device
6112 * @ipr_cmd:    ipr command struct
6113 *
6114 * This function sends a request sense to a device as a result
6115 * of a check condition.
6116 *
6117 * Return value:
6118 *      nothing
6119 **/
6120static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6121{
6122        struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6123        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6124
6125        if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6126                __ipr_erp_done(ipr_cmd);
6127                return;
6128        }
6129
6130        ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6131
6132        cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6133        cmd_pkt->cdb[0] = REQUEST_SENSE;
6134        cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6135        cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6136        cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6137        cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6138
6139        ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6140                       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6141
6142        ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6143                   IPR_REQUEST_SENSE_TIMEOUT * 2);
6144}
6145
6146/**
6147 * ipr_erp_request_sense - Send request sense to a device
6148 * @ipr_cmd:    ipr command struct
6149 *
6150 * This function sends a request sense to a device as a result
6151 * of a check condition.
6152 *
6153 * Return value:
6154 *      nothing
6155 **/
6156static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6157{
6158        struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6159        unsigned long hrrq_flags;
6160
6161        spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6162        __ipr_erp_request_sense(ipr_cmd);
6163        spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6164}
6165
6166/**
6167 * ipr_erp_cancel_all - Send cancel all to a device
6168 * @ipr_cmd:    ipr command struct
6169 *
6170 * This function sends a cancel all to a device to clear the
6171 * queue. If we are running TCQ on the device, QERR is set to 1,
6172 * which means all outstanding ops have been dropped on the floor.
6173 * Cancel all will return them to us.
6174 *
6175 * Return value:
6176 *      nothing
6177 **/
6178static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6179{
6180        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6181        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6182        struct ipr_cmd_pkt *cmd_pkt;
6183
6184        res->in_erp = 1;
6185
6186        ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6187
6188        if (!scsi_cmd->device->simple_tags) {
6189                __ipr_erp_request_sense(ipr_cmd);
6190                return;
6191        }
6192
6193        cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6194        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6195        cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6196
6197        ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6198                   IPR_CANCEL_ALL_TIMEOUT);
6199}
6200
6201/**
6202 * ipr_dump_ioasa - Dump contents of IOASA
6203 * @ioa_cfg:    ioa config struct
6204 * @ipr_cmd:    ipr command struct
6205 * @res:                resource entry struct
6206 *
6207 * This function is invoked by the interrupt handler when ops
6208 * fail. It will log the IOASA if appropriate. Only called
6209 * for GPDD ops.
6210 *
6211 * Return value:
6212 *      none
6213 **/
6214static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6215                           struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6216{
6217        int i;
6218        u16 data_len;
6219        u32 ioasc, fd_ioasc;
6220        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6221        __be32 *ioasa_data = (__be32 *)ioasa;
6222        int error_index;
6223
6224        ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6225        fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6226
6227        if (0 == ioasc)
6228                return;
6229
6230        if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6231                return;
6232
6233        if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6234                error_index = ipr_get_error(fd_ioasc);
6235        else
6236                error_index = ipr_get_error(ioasc);
6237
6238        if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6239                /* Don't log an error if the IOA already logged one */
6240                if (ioasa->hdr.ilid != 0)
6241                        return;
6242
6243                if (!ipr_is_gscsi(res))
6244                        return;
6245
6246                if (ipr_error_table[error_index].log_ioasa == 0)
6247                        return;
6248        }
6249
6250        ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6251
6252        data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6253        if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6254                data_len = sizeof(struct ipr_ioasa64);
6255        else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6256                data_len = sizeof(struct ipr_ioasa);
6257
6258        ipr_err("IOASA Dump:\n");
6259
6260        for (i = 0; i < data_len / 4; i += 4) {
6261                ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6262                        be32_to_cpu(ioasa_data[i]),
6263                        be32_to_cpu(ioasa_data[i+1]),
6264                        be32_to_cpu(ioasa_data[i+2]),
6265                        be32_to_cpu(ioasa_data[i+3]));
6266        }
6267}
6268
6269/**
6270 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6271 * @ioasa:              IOASA
6272 * @sense_buf:  sense data buffer
6273 *
6274 * Return value:
6275 *      none
6276 **/
6277static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6278{
6279        u32 failing_lba;
6280        u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6281        struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6282        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6283        u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6284
6285        memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6286
6287        if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6288                return;
6289
6290        ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6291
6292        if (ipr_is_vset_device(res) &&
6293            ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6294            ioasa->u.vset.failing_lba_hi != 0) {
6295                sense_buf[0] = 0x72;
6296                sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6297                sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6298                sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6299
6300                sense_buf[7] = 12;
6301                sense_buf[8] = 0;
6302                sense_buf[9] = 0x0A;
6303                sense_buf[10] = 0x80;
6304
6305                failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6306
6307                sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6308                sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6309                sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6310                sense_buf[15] = failing_lba & 0x000000ff;
6311
6312                failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6313
6314                sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6315                sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6316                sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6317                sense_buf[19] = failing_lba & 0x000000ff;
6318        } else {
6319                sense_buf[0] = 0x70;
6320                sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6321                sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6322                sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6323
6324                /* Illegal request */
6325                if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6326                    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6327                        sense_buf[7] = 10;      /* additional length */
6328
6329                        /* IOARCB was in error */
6330                        if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6331                                sense_buf[15] = 0xC0;
6332                        else    /* Parameter data was invalid */
6333                                sense_buf[15] = 0x80;
6334
6335                        sense_buf[16] =
6336                            ((IPR_FIELD_POINTER_MASK &
6337                              be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6338                        sense_buf[17] =
6339                            (IPR_FIELD_POINTER_MASK &
6340                             be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6341                } else {
6342                        if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6343                                if (ipr_is_vset_device(res))
6344                                        failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6345                                else
6346                                        failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6347
6348                                sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6349                                sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6350                                sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6351                                sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6352                                sense_buf[6] = failing_lba & 0x000000ff;
6353                        }
6354
6355                        sense_buf[7] = 6;       /* additional length */
6356                }
6357        }
6358}
6359
6360/**
6361 * ipr_get_autosense - Copy autosense data to sense buffer
6362 * @ipr_cmd:    ipr command struct
6363 *
6364 * This function copies the autosense buffer to the buffer
6365 * in the scsi_cmd, if there is autosense available.
6366 *
6367 * Return value:
6368 *      1 if autosense was available / 0 if not
6369 **/
6370static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6371{
6372        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6373        struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6374
6375        if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6376                return 0;
6377
6378        if (ipr_cmd->ioa_cfg->sis64)
6379                memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6380                       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6381                           SCSI_SENSE_BUFFERSIZE));
6382        else
6383                memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6384                       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6385                           SCSI_SENSE_BUFFERSIZE));
6386        return 1;
6387}
6388
6389/**
6390 * ipr_erp_start - Process an error response for a SCSI op
6391 * @ioa_cfg:    ioa config struct
6392 * @ipr_cmd:    ipr command struct
6393 *
6394 * This function determines whether or not to initiate ERP
6395 * on the affected device.
6396 *
6397 * Return value:
6398 *      nothing
6399 **/
6400static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6401                              struct ipr_cmnd *ipr_cmd)
6402{
6403        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6404        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6405        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6406        u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6407
6408        if (!res) {
6409                __ipr_scsi_eh_done(ipr_cmd);
6410                return;
6411        }
6412
6413        if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6414                ipr_gen_sense(ipr_cmd);
6415
6416        ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6417
6418        switch (masked_ioasc) {
6419        case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6420                if (ipr_is_naca_model(res))
6421                        scsi_cmd->result |= (DID_ABORT << 16);
6422                else
6423                        scsi_cmd->result |= (DID_IMM_RETRY << 16);
6424                break;
6425        case IPR_IOASC_IR_RESOURCE_HANDLE:
6426        case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6427                scsi_cmd->result |= (DID_NO_CONNECT << 16);
6428                break;
6429        case IPR_IOASC_HW_SEL_TIMEOUT:
6430                scsi_cmd->result |= (DID_NO_CONNECT << 16);
6431                if (!ipr_is_naca_model(res))
6432                        res->needs_sync_complete = 1;
6433                break;
6434        case IPR_IOASC_SYNC_REQUIRED:
6435                if (!res->in_erp)
6436                        res->needs_sync_complete = 1;
6437                scsi_cmd->result |= (DID_IMM_RETRY << 16);
6438                break;
6439        case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6440        case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6441                /*
6442                 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6443                 * so SCSI mid-layer and upper layers handle it accordingly.
6444                 */
6445                if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6446                        scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6447                break;
6448        case IPR_IOASC_BUS_WAS_RESET:
6449        case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6450                /*
6451                 * Report the bus reset and ask for a retry. The device
6452                 * will give CC/UA the next command.
6453                 */
6454                if (!res->resetting_device)
6455                        scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6456                scsi_cmd->result |= (DID_ERROR << 16);
6457                if (!ipr_is_naca_model(res))
6458                        res->needs_sync_complete = 1;
6459                break;
6460        case IPR_IOASC_HW_DEV_BUS_STATUS:
6461                scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6462                if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6463                        if (!ipr_get_autosense(ipr_cmd)) {
6464                                if (!ipr_is_naca_model(res)) {
6465                                        ipr_erp_cancel_all(ipr_cmd);
6466                                        return;
6467                                }
6468                        }
6469                }
6470                if (!ipr_is_naca_model(res))
6471                        res->needs_sync_complete = 1;
6472                break;
6473        case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6474                break;
6475        case IPR_IOASC_IR_NON_OPTIMIZED:
6476                if (res->raw_mode) {
6477                        res->raw_mode = 0;
6478                        scsi_cmd->result |= (DID_IMM_RETRY << 16);
6479                } else
6480                        scsi_cmd->result |= (DID_ERROR << 16);
6481                break;
6482        default:
6483                if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6484                        scsi_cmd->result |= (DID_ERROR << 16);
6485                if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6486                        res->needs_sync_complete = 1;
6487                break;
6488        }
6489
6490        scsi_dma_unmap(ipr_cmd->scsi_cmd);
6491        scsi_cmd->scsi_done(scsi_cmd);
6492        if (ipr_cmd->eh_comp)
6493                complete(ipr_cmd->eh_comp);
6494        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6495}
6496
6497/**
6498 * ipr_scsi_done - mid-layer done function
6499 * @ipr_cmd:    ipr command struct
6500 *
6501 * This function is invoked by the interrupt handler for
6502 * ops generated by the SCSI mid-layer
6503 *
6504 * Return value:
6505 *      none
6506 **/
6507static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6508{
6509        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6510        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6511        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6512        unsigned long lock_flags;
6513
6514        scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6515
6516        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6517                scsi_dma_unmap(scsi_cmd);
6518
6519                spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6520                scsi_cmd->scsi_done(scsi_cmd);
6521                if (ipr_cmd->eh_comp)
6522                        complete(ipr_cmd->eh_comp);
6523                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6524                spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6525        } else {
6526                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6527                spin_lock(&ipr_cmd->hrrq->_lock);
6528                ipr_erp_start(ioa_cfg, ipr_cmd);
6529                spin_unlock(&ipr_cmd->hrrq->_lock);
6530                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6531        }
6532}
6533
6534/**
6535 * ipr_queuecommand - Queue a mid-layer request
6536 * @shost:              scsi host struct
6537 * @scsi_cmd:   scsi command struct
6538 *
6539 * This function queues a request generated by the mid-layer.
6540 *
6541 * Return value:
6542 *      0 on success
6543 *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6544 *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6545 **/
6546static int ipr_queuecommand(struct Scsi_Host *shost,
6547                            struct scsi_cmnd *scsi_cmd)
6548{
6549        struct ipr_ioa_cfg *ioa_cfg;
6550        struct ipr_resource_entry *res;
6551        struct ipr_ioarcb *ioarcb;
6552        struct ipr_cmnd *ipr_cmd;
6553        unsigned long hrrq_flags, lock_flags;
6554        int rc;
6555        struct ipr_hrr_queue *hrrq;
6556        int hrrq_id;
6557
6558        ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6559
6560        scsi_cmd->result = (DID_OK << 16);
6561        res = scsi_cmd->device->hostdata;
6562
6563        if (ipr_is_gata(res) && res->sata_port) {
6564                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6565                rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6566                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6567                return rc;
6568        }
6569
6570        hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6571        hrrq = &ioa_cfg->hrrq[hrrq_id];
6572
6573        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6574        /*
6575         * We are currently blocking all devices due to a host reset
6576         * We have told the host to stop giving us new requests, but
6577         * ERP ops don't count. FIXME
6578         */
6579        if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6580                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6581                return SCSI_MLQUEUE_HOST_BUSY;
6582        }
6583
6584        /*
6585         * FIXME - Create scsi_set_host_offline interface
6586         *  and the ioa_is_dead check can be removed
6587         */
6588        if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6589                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6590                goto err_nodev;
6591        }
6592
6593        ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6594        if (ipr_cmd == NULL) {
6595                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6596                return SCSI_MLQUEUE_HOST_BUSY;
6597        }
6598        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6599
6600        ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6601        ioarcb = &ipr_cmd->ioarcb;
6602
6603        memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6604        ipr_cmd->scsi_cmd = scsi_cmd;
6605        ipr_cmd->done = ipr_scsi_eh_done;
6606
6607        if (ipr_is_gscsi(res)) {
6608                if (scsi_cmd->underflow == 0)
6609                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6610
6611                if (res->reset_occurred) {
6612                        res->reset_occurred = 0;
6613                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6614                }
6615        }
6616
6617        if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6618                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6619
6620                ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6621                if (scsi_cmd->flags & SCMD_TAGGED)
6622                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6623                else
6624                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6625        }
6626
6627        if (scsi_cmd->cmnd[0] >= 0xC0 &&
6628            (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6629                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6630        }
6631        if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6632                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6633
6634                if (scsi_cmd->underflow == 0)
6635                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6636        }
6637
6638        if (ioa_cfg->sis64)
6639                rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6640        else
6641                rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6642
6643        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6644        if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6645                list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6646                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6647                if (!rc)
6648                        scsi_dma_unmap(scsi_cmd);
6649                return SCSI_MLQUEUE_HOST_BUSY;
6650        }
6651
6652        if (unlikely(hrrq->ioa_is_dead)) {
6653                list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6654                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6655                scsi_dma_unmap(scsi_cmd);
6656                goto err_nodev;
6657        }
6658
6659        ioarcb->res_handle = res->res_handle;
6660        if (res->needs_sync_complete) {
6661                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6662                res->needs_sync_complete = 0;
6663        }
6664        list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6665        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6666        ipr_send_command(ipr_cmd);
6667        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6668        return 0;
6669
6670err_nodev:
6671        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6672        memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6673        scsi_cmd->result = (DID_NO_CONNECT << 16);
6674        scsi_cmd->scsi_done(scsi_cmd);
6675        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6676        return 0;
6677}
6678
6679/**
6680 * ipr_ioctl - IOCTL handler
6681 * @sdev:       scsi device struct
6682 * @cmd:        IOCTL cmd
6683 * @arg:        IOCTL arg
6684 *
6685 * Return value:
6686 *      0 on success / other on failure
6687 **/
6688static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6689                     void __user *arg)
6690{
6691        struct ipr_resource_entry *res;
6692
6693        res = (struct ipr_resource_entry *)sdev->hostdata;
6694        if (res && ipr_is_gata(res)) {
6695                if (cmd == HDIO_GET_IDENTITY)
6696                        return -ENOTTY;
6697                return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6698        }
6699
6700        return -EINVAL;
6701}
6702
6703/**
6704 * ipr_info - Get information about the card/driver
6705 * @scsi_host:  scsi host struct
6706 *
6707 * Return value:
6708 *      pointer to buffer with description string
6709 **/
6710static const char *ipr_ioa_info(struct Scsi_Host *host)
6711{
6712        static char buffer[512];
6713        struct ipr_ioa_cfg *ioa_cfg;
6714        unsigned long lock_flags = 0;
6715
6716        ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6717
6718        spin_lock_irqsave(host->host_lock, lock_flags);
6719        sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6720        spin_unlock_irqrestore(host->host_lock, lock_flags);
6721
6722        return buffer;
6723}
6724
6725static struct scsi_host_template driver_template = {
6726        .module = THIS_MODULE,
6727        .name = "IPR",
6728        .info = ipr_ioa_info,
6729        .ioctl = ipr_ioctl,
6730#ifdef CONFIG_COMPAT
6731        .compat_ioctl = ipr_ioctl,
6732#endif
6733        .queuecommand = ipr_queuecommand,
6734        .eh_abort_handler = ipr_eh_abort,
6735        .eh_device_reset_handler = ipr_eh_dev_reset,
6736        .eh_host_reset_handler = ipr_eh_host_reset,
6737        .slave_alloc = ipr_slave_alloc,
6738        .slave_configure = ipr_slave_configure,
6739        .slave_destroy = ipr_slave_destroy,
6740        .scan_finished = ipr_scan_finished,
6741        .target_alloc = ipr_target_alloc,
6742        .target_destroy = ipr_target_destroy,
6743        .change_queue_depth = ipr_change_queue_depth,
6744        .bios_param = ipr_biosparam,
6745        .can_queue = IPR_MAX_COMMANDS,
6746        .this_id = -1,
6747        .sg_tablesize = IPR_MAX_SGLIST,
6748        .max_sectors = IPR_IOA_MAX_SECTORS,
6749        .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6750        .shost_attrs = ipr_ioa_attrs,
6751        .sdev_attrs = ipr_dev_attrs,
6752        .proc_name = IPR_NAME,
6753};
6754
6755/**
6756 * ipr_ata_phy_reset - libata phy_reset handler
6757 * @ap:         ata port to reset
6758 *
6759 **/
6760static void ipr_ata_phy_reset(struct ata_port *ap)
6761{
6762        unsigned long flags;
6763        struct ipr_sata_port *sata_port = ap->private_data;
6764        struct ipr_resource_entry *res = sata_port->res;
6765        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6766        int rc;
6767
6768        ENTER;
6769        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6770        while (ioa_cfg->in_reset_reload) {
6771                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6772                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6773                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6774        }
6775
6776        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6777                goto out_unlock;
6778
6779        rc = ipr_device_reset(ioa_cfg, res);
6780
6781        if (rc) {
6782                ap->link.device[0].class = ATA_DEV_NONE;
6783                goto out_unlock;
6784        }
6785
6786        ap->link.device[0].class = res->ata_class;
6787        if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6788                ap->link.device[0].class = ATA_DEV_NONE;
6789
6790out_unlock:
6791        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6792        LEAVE;
6793}
6794
6795/**
6796 * ipr_ata_post_internal - Cleanup after an internal command
6797 * @qc: ATA queued command
6798 *
6799 * Return value:
6800 *      none
6801 **/
6802static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6803{
6804        struct ipr_sata_port *sata_port = qc->ap->private_data;
6805        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6806        struct ipr_cmnd *ipr_cmd;
6807        struct ipr_hrr_queue *hrrq;
6808        unsigned long flags;
6809
6810        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6811        while (ioa_cfg->in_reset_reload) {
6812                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6813                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6814                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6815        }
6816
6817        for_each_hrrq(hrrq, ioa_cfg) {
6818                spin_lock(&hrrq->_lock);
6819                list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6820                        if (ipr_cmd->qc == qc) {
6821                                ipr_device_reset(ioa_cfg, sata_port->res);
6822                                break;
6823                        }
6824                }
6825                spin_unlock(&hrrq->_lock);
6826        }
6827        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6828}
6829
6830/**
6831 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6832 * @regs:       destination
6833 * @tf: source ATA taskfile
6834 *
6835 * Return value:
6836 *      none
6837 **/
6838static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6839                             struct ata_taskfile *tf)
6840{
6841        regs->feature = tf->feature;
6842        regs->nsect = tf->nsect;
6843        regs->lbal = tf->lbal;
6844        regs->lbam = tf->lbam;
6845        regs->lbah = tf->lbah;
6846        regs->device = tf->device;
6847        regs->command = tf->command;
6848        regs->hob_feature = tf->hob_feature;
6849        regs->hob_nsect = tf->hob_nsect;
6850        regs->hob_lbal = tf->hob_lbal;
6851        regs->hob_lbam = tf->hob_lbam;
6852        regs->hob_lbah = tf->hob_lbah;
6853        regs->ctl = tf->ctl;
6854}
6855
6856/**
6857 * ipr_sata_done - done function for SATA commands
6858 * @ipr_cmd:    ipr command struct
6859 *
6860 * This function is invoked by the interrupt handler for
6861 * ops generated by the SCSI mid-layer to SATA devices
6862 *
6863 * Return value:
6864 *      none
6865 **/
6866static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6867{
6868        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6869        struct ata_queued_cmd *qc = ipr_cmd->qc;
6870        struct ipr_sata_port *sata_port = qc->ap->private_data;
6871        struct ipr_resource_entry *res = sata_port->res;
6872        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6873
6874        spin_lock(&ipr_cmd->hrrq->_lock);
6875        if (ipr_cmd->ioa_cfg->sis64)
6876                memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6877                       sizeof(struct ipr_ioasa_gata));
6878        else
6879                memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6880                       sizeof(struct ipr_ioasa_gata));
6881        ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6882
6883        if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6884                scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6885
6886        if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6887                qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6888        else
6889                qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6890        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6891        spin_unlock(&ipr_cmd->hrrq->_lock);
6892        ata_qc_complete(qc);
6893}
6894
6895/**
6896 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6897 * @ipr_cmd:    ipr command struct
6898 * @qc:         ATA queued command
6899 *
6900 **/
6901static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6902                                  struct ata_queued_cmd *qc)
6903{
6904        u32 ioadl_flags = 0;
6905        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6906        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6907        struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6908        int len = qc->nbytes;
6909        struct scatterlist *sg;
6910        unsigned int si;
6911        dma_addr_t dma_addr = ipr_cmd->dma_addr;
6912
6913        if (len == 0)
6914                return;
6915
6916        if (qc->dma_dir == DMA_TO_DEVICE) {
6917                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6918                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6919        } else if (qc->dma_dir == DMA_FROM_DEVICE)
6920                ioadl_flags = IPR_IOADL_FLAGS_READ;
6921
6922        ioarcb->data_transfer_length = cpu_to_be32(len);
6923        ioarcb->ioadl_len =
6924                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6925        ioarcb->u.sis64_addr_data.data_ioadl_addr =
6926                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6927
6928        for_each_sg(qc->sg, sg, qc->n_elem, si) {
6929                ioadl64->flags = cpu_to_be32(ioadl_flags);
6930                ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6931                ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6932
6933                last_ioadl64 = ioadl64;
6934                ioadl64++;
6935        }
6936
6937        if (likely(last_ioadl64))
6938                last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6939}
6940
6941/**
6942 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6943 * @ipr_cmd:    ipr command struct
6944 * @qc:         ATA queued command
6945 *
6946 **/
6947static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6948                                struct ata_queued_cmd *qc)
6949{
6950        u32 ioadl_flags = 0;
6951        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6952        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6953        struct ipr_ioadl_desc *last_ioadl = NULL;
6954        int len = qc->nbytes;
6955        struct scatterlist *sg;
6956        unsigned int si;
6957
6958        if (len == 0)
6959                return;
6960
6961        if (qc->dma_dir == DMA_TO_DEVICE) {
6962                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6963                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6964                ioarcb->data_transfer_length = cpu_to_be32(len);
6965                ioarcb->ioadl_len =
6966                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6967        } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6968                ioadl_flags = IPR_IOADL_FLAGS_READ;
6969                ioarcb->read_data_transfer_length = cpu_to_be32(len);
6970                ioarcb->read_ioadl_len =
6971                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6972        }
6973
6974        for_each_sg(qc->sg, sg, qc->n_elem, si) {
6975                ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6976                ioadl->address = cpu_to_be32(sg_dma_address(sg));
6977
6978                last_ioadl = ioadl;
6979                ioadl++;
6980        }
6981
6982        if (likely(last_ioadl))
6983                last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6984}
6985
6986/**
6987 * ipr_qc_defer - Get a free ipr_cmd
6988 * @qc: queued command
6989 *
6990 * Return value:
6991 *      0 if success
6992 **/
6993static int ipr_qc_defer(struct ata_queued_cmd *qc)
6994{
6995        struct ata_port *ap = qc->ap;
6996        struct ipr_sata_port *sata_port = ap->private_data;
6997        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6998        struct ipr_cmnd *ipr_cmd;
6999        struct ipr_hrr_queue *hrrq;
7000        int hrrq_id;
7001
7002        hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7003        hrrq = &ioa_cfg->hrrq[hrrq_id];
7004
7005        qc->lldd_task = NULL;
7006        spin_lock(&hrrq->_lock);
7007        if (unlikely(hrrq->ioa_is_dead)) {
7008                spin_unlock(&hrrq->_lock);
7009                return 0;
7010        }
7011
7012        if (unlikely(!hrrq->allow_cmds)) {
7013                spin_unlock(&hrrq->_lock);
7014                return ATA_DEFER_LINK;
7015        }
7016
7017        ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7018        if (ipr_cmd == NULL) {
7019                spin_unlock(&hrrq->_lock);
7020                return ATA_DEFER_LINK;
7021        }
7022
7023        qc->lldd_task = ipr_cmd;
7024        spin_unlock(&hrrq->_lock);
7025        return 0;
7026}
7027
7028/**
7029 * ipr_qc_issue - Issue a SATA qc to a device
7030 * @qc: queued command
7031 *
7032 * Return value:
7033 *      0 if success
7034 **/
7035static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7036{
7037        struct ata_port *ap = qc->ap;
7038        struct ipr_sata_port *sata_port = ap->private_data;
7039        struct ipr_resource_entry *res = sata_port->res;
7040        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7041        struct ipr_cmnd *ipr_cmd;
7042        struct ipr_ioarcb *ioarcb;
7043        struct ipr_ioarcb_ata_regs *regs;
7044
7045        if (qc->lldd_task == NULL)
7046                ipr_qc_defer(qc);
7047
7048        ipr_cmd = qc->lldd_task;
7049        if (ipr_cmd == NULL)
7050                return AC_ERR_SYSTEM;
7051
7052        qc->lldd_task = NULL;
7053        spin_lock(&ipr_cmd->hrrq->_lock);
7054        if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7055                        ipr_cmd->hrrq->ioa_is_dead)) {
7056                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7057                spin_unlock(&ipr_cmd->hrrq->_lock);
7058                return AC_ERR_SYSTEM;
7059        }
7060
7061        ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7062        ioarcb = &ipr_cmd->ioarcb;
7063
7064        if (ioa_cfg->sis64) {
7065                regs = &ipr_cmd->i.ata_ioadl.regs;
7066                ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7067        } else
7068                regs = &ioarcb->u.add_data.u.regs;
7069
7070        memset(regs, 0, sizeof(*regs));
7071        ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7072
7073        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7074        ipr_cmd->qc = qc;
7075        ipr_cmd->done = ipr_sata_done;
7076        ipr_cmd->ioarcb.res_handle = res->res_handle;
7077        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7078        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7079        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7080        ipr_cmd->dma_use_sg = qc->n_elem;
7081
7082        if (ioa_cfg->sis64)
7083                ipr_build_ata_ioadl64(ipr_cmd, qc);
7084        else
7085                ipr_build_ata_ioadl(ipr_cmd, qc);
7086
7087        regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7088        ipr_copy_sata_tf(regs, &qc->tf);
7089        memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7090        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7091
7092        switch (qc->tf.protocol) {
7093        case ATA_PROT_NODATA:
7094        case ATA_PROT_PIO:
7095                break;
7096
7097        case ATA_PROT_DMA:
7098                regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7099                break;
7100
7101        case ATAPI_PROT_PIO:
7102        case ATAPI_PROT_NODATA:
7103                regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7104                break;
7105
7106        case ATAPI_PROT_DMA:
7107                regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7108                regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7109                break;
7110
7111        default:
7112                WARN_ON(1);
7113                spin_unlock(&ipr_cmd->hrrq->_lock);
7114                return AC_ERR_INVALID;
7115        }
7116
7117        ipr_send_command(ipr_cmd);
7118        spin_unlock(&ipr_cmd->hrrq->_lock);
7119
7120        return 0;
7121}
7122
7123/**
7124 * ipr_qc_fill_rtf - Read result TF
7125 * @qc: ATA queued command
7126 *
7127 * Return value:
7128 *      true
7129 **/
7130static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7131{
7132        struct ipr_sata_port *sata_port = qc->ap->private_data;
7133        struct ipr_ioasa_gata *g = &sata_port->ioasa;
7134        struct ata_taskfile *tf = &qc->result_tf;
7135
7136        tf->feature = g->error;
7137        tf->nsect = g->nsect;
7138        tf->lbal = g->lbal;
7139        tf->lbam = g->lbam;
7140        tf->lbah = g->lbah;
7141        tf->device = g->device;
7142        tf->command = g->status;
7143        tf->hob_nsect = g->hob_nsect;
7144        tf->hob_lbal = g->hob_lbal;
7145        tf->hob_lbam = g->hob_lbam;
7146        tf->hob_lbah = g->hob_lbah;
7147
7148        return true;
7149}
7150
7151static struct ata_port_operations ipr_sata_ops = {
7152        .phy_reset = ipr_ata_phy_reset,
7153        .hardreset = ipr_sata_reset,
7154        .post_internal_cmd = ipr_ata_post_internal,
7155        .qc_prep = ata_noop_qc_prep,
7156        .qc_defer = ipr_qc_defer,
7157        .qc_issue = ipr_qc_issue,
7158        .qc_fill_rtf = ipr_qc_fill_rtf,
7159        .port_start = ata_sas_port_start,
7160        .port_stop = ata_sas_port_stop
7161};
7162
7163static struct ata_port_info sata_port_info = {
7164        .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7165                          ATA_FLAG_SAS_HOST,
7166        .pio_mask       = ATA_PIO4_ONLY,
7167        .mwdma_mask     = ATA_MWDMA2,
7168        .udma_mask      = ATA_UDMA6,
7169        .port_ops       = &ipr_sata_ops
7170};
7171
7172#ifdef CONFIG_PPC_PSERIES
7173static const u16 ipr_blocked_processors[] = {
7174        PVR_NORTHSTAR,
7175        PVR_PULSAR,
7176        PVR_POWER4,
7177        PVR_ICESTAR,
7178        PVR_SSTAR,
7179        PVR_POWER4p,
7180        PVR_630,
7181        PVR_630p
7182};
7183
7184/**
7185 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7186 * @ioa_cfg:    ioa cfg struct
7187 *
7188 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7189 * certain pSeries hardware. This function determines if the given
7190 * adapter is in one of these confgurations or not.
7191 *
7192 * Return value:
7193 *      1 if adapter is not supported / 0 if adapter is supported
7194 **/
7195static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7196{
7197        int i;
7198
7199        if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7200                for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7201                        if (pvr_version_is(ipr_blocked_processors[i]))
7202                                return 1;
7203                }
7204        }
7205        return 0;
7206}
7207#else
7208#define ipr_invalid_adapter(ioa_cfg) 0
7209#endif
7210
7211/**
7212 * ipr_ioa_bringdown_done - IOA bring down completion.
7213 * @ipr_cmd:    ipr command struct
7214 *
7215 * This function processes the completion of an adapter bring down.
7216 * It wakes any reset sleepers.
7217 *
7218 * Return value:
7219 *      IPR_RC_JOB_RETURN
7220 **/
7221static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7222{
7223        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7224        int i;
7225
7226        ENTER;
7227        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7228                ipr_trace;
7229                ioa_cfg->scsi_unblock = 1;
7230                schedule_work(&ioa_cfg->work_q);
7231        }
7232
7233        ioa_cfg->in_reset_reload = 0;
7234        ioa_cfg->reset_retries = 0;
7235        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7236                spin_lock(&ioa_cfg->hrrq[i]._lock);
7237                ioa_cfg->hrrq[i].ioa_is_dead = 1;
7238                spin_unlock(&ioa_cfg->hrrq[i]._lock);
7239        }
7240        wmb();
7241
7242        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7243        wake_up_all(&ioa_cfg->reset_wait_q);
7244        LEAVE;
7245
7246        return IPR_RC_JOB_RETURN;
7247}
7248
7249/**
7250 * ipr_ioa_reset_done - IOA reset completion.
7251 * @ipr_cmd:    ipr command struct
7252 *
7253 * This function processes the completion of an adapter reset.
7254 * It schedules any necessary mid-layer add/removes and
7255 * wakes any reset sleepers.
7256 *
7257 * Return value:
7258 *      IPR_RC_JOB_RETURN
7259 **/
7260static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7261{
7262        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7263        struct ipr_resource_entry *res;
7264        int j;
7265
7266        ENTER;
7267        ioa_cfg->in_reset_reload = 0;
7268        for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7269                spin_lock(&ioa_cfg->hrrq[j]._lock);
7270                ioa_cfg->hrrq[j].allow_cmds = 1;
7271                spin_unlock(&ioa_cfg->hrrq[j]._lock);
7272        }
7273        wmb();
7274        ioa_cfg->reset_cmd = NULL;
7275        ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7276
7277        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7278                if (res->add_to_ml || res->del_from_ml) {
7279                        ipr_trace;
7280                        break;
7281                }
7282        }
7283        schedule_work(&ioa_cfg->work_q);
7284
7285        for (j = 0; j < IPR_NUM_HCAMS; j++) {
7286                list_del_init(&ioa_cfg->hostrcb[j]->queue);
7287                if (j < IPR_NUM_LOG_HCAMS)
7288                        ipr_send_hcam(ioa_cfg,
7289                                IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7290                                ioa_cfg->hostrcb[j]);
7291                else
7292                        ipr_send_hcam(ioa_cfg,
7293                                IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7294                                ioa_cfg->hostrcb[j]);
7295        }
7296
7297        scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7298        dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7299
7300        ioa_cfg->reset_retries = 0;
7301        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7302        wake_up_all(&ioa_cfg->reset_wait_q);
7303
7304        ioa_cfg->scsi_unblock = 1;
7305        schedule_work(&ioa_cfg->work_q);
7306        LEAVE;
7307        return IPR_RC_JOB_RETURN;
7308}
7309
7310/**
7311 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7312 * @supported_dev:      supported device struct
7313 * @vpids:                      vendor product id struct
7314 *
7315 * Return value:
7316 *      none
7317 **/
7318static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7319                                 struct ipr_std_inq_vpids *vpids)
7320{
7321        memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7322        memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7323        supported_dev->num_records = 1;
7324        supported_dev->data_length =
7325                cpu_to_be16(sizeof(struct ipr_supported_device));
7326        supported_dev->reserved = 0;
7327}
7328
7329/**
7330 * ipr_set_supported_devs - Send Set Supported Devices for a device
7331 * @ipr_cmd:    ipr command struct
7332 *
7333 * This function sends a Set Supported Devices to the adapter
7334 *
7335 * Return value:
7336 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7337 **/
7338static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7339{
7340        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7341        struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7342        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7343        struct ipr_resource_entry *res = ipr_cmd->u.res;
7344
7345        ipr_cmd->job_step = ipr_ioa_reset_done;
7346
7347        list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7348                if (!ipr_is_scsi_disk(res))
7349                        continue;
7350
7351                ipr_cmd->u.res = res;
7352                ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7353
7354                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7355                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7356                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7357
7358                ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7359                ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7360                ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7361                ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7362
7363                ipr_init_ioadl(ipr_cmd,
7364                               ioa_cfg->vpd_cbs_dma +
7365                                 offsetof(struct ipr_misc_cbs, supp_dev),
7366                               sizeof(struct ipr_supported_device),
7367                               IPR_IOADL_FLAGS_WRITE_LAST);
7368
7369                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7370                           IPR_SET_SUP_DEVICE_TIMEOUT);
7371
7372                if (!ioa_cfg->sis64)
7373                        ipr_cmd->job_step = ipr_set_supported_devs;
7374                LEAVE;
7375                return IPR_RC_JOB_RETURN;
7376        }
7377
7378        LEAVE;
7379        return IPR_RC_JOB_CONTINUE;
7380}
7381
7382/**
7383 * ipr_get_mode_page - Locate specified mode page
7384 * @mode_pages: mode page buffer
7385 * @page_code:  page code to find
7386 * @len:                minimum required length for mode page
7387 *
7388 * Return value:
7389 *      pointer to mode page / NULL on failure
7390 **/
7391static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7392                               u32 page_code, u32 len)
7393{
7394        struct ipr_mode_page_hdr *mode_hdr;
7395        u32 page_length;
7396        u32 length;
7397
7398        if (!mode_pages || (mode_pages->hdr.length == 0))
7399                return NULL;
7400
7401        length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7402        mode_hdr = (struct ipr_mode_page_hdr *)
7403                (mode_pages->data + mode_pages->hdr.block_desc_len);
7404
7405        while (length) {
7406                if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7407                        if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7408                                return mode_hdr;
7409                        break;
7410                } else {
7411                        page_length = (sizeof(struct ipr_mode_page_hdr) +
7412                                       mode_hdr->page_length);
7413                        length -= page_length;
7414                        mode_hdr = (struct ipr_mode_page_hdr *)
7415                                ((unsigned long)mode_hdr + page_length);
7416                }
7417        }
7418        return NULL;
7419}
7420
7421/**
7422 * ipr_check_term_power - Check for term power errors
7423 * @ioa_cfg:    ioa config struct
7424 * @mode_pages: IOAFP mode pages buffer
7425 *
7426 * Check the IOAFP's mode page 28 for term power errors
7427 *
7428 * Return value:
7429 *      nothing
7430 **/
7431static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7432                                 struct ipr_mode_pages *mode_pages)
7433{
7434        int i;
7435        int entry_length;
7436        struct ipr_dev_bus_entry *bus;
7437        struct ipr_mode_page28 *mode_page;
7438
7439        mode_page = ipr_get_mode_page(mode_pages, 0x28,
7440                                      sizeof(struct ipr_mode_page28));
7441
7442        entry_length = mode_page->entry_length;
7443
7444        bus = mode_page->bus;
7445
7446        for (i = 0; i < mode_page->num_entries; i++) {
7447                if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7448                        dev_err(&ioa_cfg->pdev->dev,
7449                                "Term power is absent on scsi bus %d\n",
7450                                bus->res_addr.bus);
7451                }
7452
7453                bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7454        }
7455}
7456
7457/**
7458 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7459 * @ioa_cfg:    ioa config struct
7460 *
7461 * Looks through the config table checking for SES devices. If
7462 * the SES device is in the SES table indicating a maximum SCSI
7463 * bus speed, the speed is limited for the bus.
7464 *
7465 * Return value:
7466 *      none
7467 **/
7468static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7469{
7470        u32 max_xfer_rate;
7471        int i;
7472
7473        for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7474                max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7475                                                       ioa_cfg->bus_attr[i].bus_width);
7476
7477                if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7478                        ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7479        }
7480}
7481
7482/**
7483 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7484 * @ioa_cfg:    ioa config struct
7485 * @mode_pages: mode page 28 buffer
7486 *
7487 * Updates mode page 28 based on driver configuration
7488 *
7489 * Return value:
7490 *      none
7491 **/
7492static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7493                                          struct ipr_mode_pages *mode_pages)
7494{
7495        int i, entry_length;
7496        struct ipr_dev_bus_entry *bus;
7497        struct ipr_bus_attributes *bus_attr;
7498        struct ipr_mode_page28 *mode_page;
7499
7500        mode_page = ipr_get_mode_page(mode_pages, 0x28,
7501                                      sizeof(struct ipr_mode_page28));
7502
7503        entry_length = mode_page->entry_length;
7504
7505        /* Loop for each device bus entry */
7506        for (i = 0, bus = mode_page->bus;
7507             i < mode_page->num_entries;
7508             i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7509                if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7510                        dev_err(&ioa_cfg->pdev->dev,
7511                                "Invalid resource address reported: 0x%08X\n",
7512                                IPR_GET_PHYS_LOC(bus->res_addr));
7513                        continue;
7514                }
7515
7516                bus_attr = &ioa_cfg->bus_attr[i];
7517                bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7518                bus->bus_width = bus_attr->bus_width;
7519                bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7520                bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7521                if (bus_attr->qas_enabled)
7522                        bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7523                else
7524                        bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7525        }
7526}
7527
7528/**
7529 * ipr_build_mode_select - Build a mode select command
7530 * @ipr_cmd:    ipr command struct
7531 * @res_handle: resource handle to send command to
7532 * @parm:               Byte 2 of Mode Sense command
7533 * @dma_addr:   DMA buffer address
7534 * @xfer_len:   data transfer length
7535 *
7536 * Return value:
7537 *      none
7538 **/
7539static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7540                                  __be32 res_handle, u8 parm,
7541                                  dma_addr_t dma_addr, u8 xfer_len)
7542{
7543        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7544
7545        ioarcb->res_handle = res_handle;
7546        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7547        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7548        ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7549        ioarcb->cmd_pkt.cdb[1] = parm;
7550        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7551
7552        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7553}
7554
7555/**
7556 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7557 * @ipr_cmd:    ipr command struct
7558 *
7559 * This function sets up the SCSI bus attributes and sends
7560 * a Mode Select for Page 28 to activate them.
7561 *
7562 * Return value:
7563 *      IPR_RC_JOB_RETURN
7564 **/
7565static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7566{
7567        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7568        struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7569        int length;
7570
7571        ENTER;
7572        ipr_scsi_bus_speed_limit(ioa_cfg);
7573        ipr_check_term_power(ioa_cfg, mode_pages);
7574        ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7575        length = mode_pages->hdr.length + 1;
7576        mode_pages->hdr.length = 0;
7577
7578        ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7579                              ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7580                              length);
7581
7582        ipr_cmd->job_step = ipr_set_supported_devs;
7583        ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7584                                    struct ipr_resource_entry, queue);
7585        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7586
7587        LEAVE;
7588        return IPR_RC_JOB_RETURN;
7589}
7590
7591/**
7592 * ipr_build_mode_sense - Builds a mode sense command
7593 * @ipr_cmd:    ipr command struct
7594 * @res:                resource entry struct
7595 * @parm:               Byte 2 of mode sense command
7596 * @dma_addr:   DMA address of mode sense buffer
7597 * @xfer_len:   Size of DMA buffer
7598 *
7599 * Return value:
7600 *      none
7601 **/
7602static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7603                                 __be32 res_handle,
7604                                 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7605{
7606        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7607
7608        ioarcb->res_handle = res_handle;
7609        ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7610        ioarcb->cmd_pkt.cdb[2] = parm;
7611        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7612        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7613
7614        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7615}
7616
7617/**
7618 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7619 * @ipr_cmd:    ipr command struct
7620 *
7621 * This function handles the failure of an IOA bringup command.
7622 *
7623 * Return value:
7624 *      IPR_RC_JOB_RETURN
7625 **/
7626static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7627{
7628        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7629        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7630
7631        dev_err(&ioa_cfg->pdev->dev,
7632                "0x%02X failed with IOASC: 0x%08X\n",
7633                ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7634
7635        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7636        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7637        return IPR_RC_JOB_RETURN;
7638}
7639
7640/**
7641 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7642 * @ipr_cmd:    ipr command struct
7643 *
7644 * This function handles the failure of a Mode Sense to the IOAFP.
7645 * Some adapters do not handle all mode pages.
7646 *
7647 * Return value:
7648 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7649 **/
7650static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7651{
7652        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7653        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7654
7655        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7656                ipr_cmd->job_step = ipr_set_supported_devs;
7657                ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7658                                            struct ipr_resource_entry, queue);
7659                return IPR_RC_JOB_CONTINUE;
7660        }
7661
7662        return ipr_reset_cmd_failed(ipr_cmd);
7663}
7664
7665/**
7666 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7667 * @ipr_cmd:    ipr command struct
7668 *
7669 * This function send a Page 28 mode sense to the IOA to
7670 * retrieve SCSI bus attributes.
7671 *
7672 * Return value:
7673 *      IPR_RC_JOB_RETURN
7674 **/
7675static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7676{
7677        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7678
7679        ENTER;
7680        ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7681                             0x28, ioa_cfg->vpd_cbs_dma +
7682                             offsetof(struct ipr_misc_cbs, mode_pages),
7683                             sizeof(struct ipr_mode_pages));
7684
7685        ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7686        ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7687
7688        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7689
7690        LEAVE;
7691        return IPR_RC_JOB_RETURN;
7692}
7693
7694/**
7695 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7696 * @ipr_cmd:    ipr command struct
7697 *
7698 * This function enables dual IOA RAID support if possible.
7699 *
7700 * Return value:
7701 *      IPR_RC_JOB_RETURN
7702 **/
7703static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7704{
7705        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7706        struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7707        struct ipr_mode_page24 *mode_page;
7708        int length;
7709
7710        ENTER;
7711        mode_page = ipr_get_mode_page(mode_pages, 0x24,
7712                                      sizeof(struct ipr_mode_page24));
7713
7714        if (mode_page)
7715                mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7716
7717        length = mode_pages->hdr.length + 1;
7718        mode_pages->hdr.length = 0;
7719
7720        ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7721                              ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7722                              length);
7723
7724        ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7725        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7726
7727        LEAVE;
7728        return IPR_RC_JOB_RETURN;
7729}
7730
7731/**
7732 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7733 * @ipr_cmd:    ipr command struct
7734 *
7735 * This function handles the failure of a Mode Sense to the IOAFP.
7736 * Some adapters do not handle all mode pages.
7737 *
7738 * Return value:
7739 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7740 **/
7741static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7742{
7743        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7744
7745        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7746                ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7747                return IPR_RC_JOB_CONTINUE;
7748        }
7749
7750        return ipr_reset_cmd_failed(ipr_cmd);
7751}
7752
7753/**
7754 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7755 * @ipr_cmd:    ipr command struct
7756 *
7757 * This function send a mode sense to the IOA to retrieve
7758 * the IOA Advanced Function Control mode page.
7759 *
7760 * Return value:
7761 *      IPR_RC_JOB_RETURN
7762 **/
7763static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7764{
7765        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7766
7767        ENTER;
7768        ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7769                             0x24, ioa_cfg->vpd_cbs_dma +
7770                             offsetof(struct ipr_misc_cbs, mode_pages),
7771                             sizeof(struct ipr_mode_pages));
7772
7773        ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7774        ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7775
7776        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7777
7778        LEAVE;
7779        return IPR_RC_JOB_RETURN;
7780}
7781
7782/**
7783 * ipr_init_res_table - Initialize the resource table
7784 * @ipr_cmd:    ipr command struct
7785 *
7786 * This function looks through the existing resource table, comparing
7787 * it with the config table. This function will take care of old/new
7788 * devices and schedule adding/removing them from the mid-layer
7789 * as appropriate.
7790 *
7791 * Return value:
7792 *      IPR_RC_JOB_CONTINUE
7793 **/
7794static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7795{
7796        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7797        struct ipr_resource_entry *res, *temp;
7798        struct ipr_config_table_entry_wrapper cfgtew;
7799        int entries, found, flag, i;
7800        LIST_HEAD(old_res);
7801
7802        ENTER;
7803        if (ioa_cfg->sis64)
7804                flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7805        else
7806                flag = ioa_cfg->u.cfg_table->hdr.flags;
7807
7808        if (flag & IPR_UCODE_DOWNLOAD_REQ)
7809                dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7810
7811        list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7812                list_move_tail(&res->queue, &old_res);
7813
7814        if (ioa_cfg->sis64)
7815                entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7816        else
7817                entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7818
7819        for (i = 0; i < entries; i++) {
7820                if (ioa_cfg->sis64)
7821                        cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7822                else
7823                        cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7824                found = 0;
7825
7826                list_for_each_entry_safe(res, temp, &old_res, queue) {
7827                        if (ipr_is_same_device(res, &cfgtew)) {
7828                                list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7829                                found = 1;
7830                                break;
7831                        }
7832                }
7833
7834                if (!found) {
7835                        if (list_empty(&ioa_cfg->free_res_q)) {
7836                                dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7837                                break;
7838                        }
7839
7840                        found = 1;
7841                        res = list_entry(ioa_cfg->free_res_q.next,
7842                                         struct ipr_resource_entry, queue);
7843                        list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7844                        ipr_init_res_entry(res, &cfgtew);
7845                        res->add_to_ml = 1;
7846                } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7847                        res->sdev->allow_restart = 1;
7848
7849                if (found)
7850                        ipr_update_res_entry(res, &cfgtew);
7851        }
7852
7853        list_for_each_entry_safe(res, temp, &old_res, queue) {
7854                if (res->sdev) {
7855                        res->del_from_ml = 1;
7856                        res->res_handle = IPR_INVALID_RES_HANDLE;
7857                        list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7858                }
7859        }
7860
7861        list_for_each_entry_safe(res, temp, &old_res, queue) {
7862                ipr_clear_res_target(res);
7863                list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7864        }
7865
7866        if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7867                ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7868        else
7869                ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7870
7871        LEAVE;
7872        return IPR_RC_JOB_CONTINUE;
7873}
7874
7875/**
7876 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7877 * @ipr_cmd:    ipr command struct
7878 *
7879 * This function sends a Query IOA Configuration command
7880 * to the adapter to retrieve the IOA configuration table.
7881 *
7882 * Return value:
7883 *      IPR_RC_JOB_RETURN
7884 **/
7885static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7886{
7887        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7888        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7889        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7890        struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7891
7892        ENTER;
7893        if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7894                ioa_cfg->dual_raid = 1;
7895        dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7896                 ucode_vpd->major_release, ucode_vpd->card_type,
7897                 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7898        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7899        ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7900
7901        ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7902        ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7903        ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7904        ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7905
7906        ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7907                       IPR_IOADL_FLAGS_READ_LAST);
7908
7909        ipr_cmd->job_step = ipr_init_res_table;
7910
7911        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7912
7913        LEAVE;
7914        return IPR_RC_JOB_RETURN;
7915}
7916
7917static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7918{
7919        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7920
7921        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7922                return IPR_RC_JOB_CONTINUE;
7923
7924        return ipr_reset_cmd_failed(ipr_cmd);
7925}
7926
7927static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7928                                         __be32 res_handle, u8 sa_code)
7929{
7930        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7931
7932        ioarcb->res_handle = res_handle;
7933        ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7934        ioarcb->cmd_pkt.cdb[1] = sa_code;
7935        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7936}
7937
7938/**
7939 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7940 * action
7941 *
7942 * Return value:
7943 *      none
7944 **/
7945static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7946{
7947        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7948        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7949        struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7950
7951        ENTER;
7952
7953        ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7954
7955        if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7956                ipr_build_ioa_service_action(ipr_cmd,
7957                                             cpu_to_be32(IPR_IOA_RES_HANDLE),
7958                                             IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7959
7960                ioarcb->cmd_pkt.cdb[2] = 0x40;
7961
7962                ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7963                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7964                           IPR_SET_SUP_DEVICE_TIMEOUT);
7965
7966                LEAVE;
7967                return IPR_RC_JOB_RETURN;
7968        }
7969
7970        LEAVE;
7971        return IPR_RC_JOB_CONTINUE;
7972}
7973
7974/**
7975 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7976 * @ipr_cmd:    ipr command struct
7977 *
7978 * This utility function sends an inquiry to the adapter.
7979 *
7980 * Return value:
7981 *      none
7982 **/
7983static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7984                              dma_addr_t dma_addr, u8 xfer_len)
7985{
7986        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7987
7988        ENTER;
7989        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7990        ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7991
7992        ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7993        ioarcb->cmd_pkt.cdb[1] = flags;
7994        ioarcb->cmd_pkt.cdb[2] = page;
7995        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7996
7997        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7998
7999        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8000        LEAVE;
8001}
8002
8003/**
8004 * ipr_inquiry_page_supported - Is the given inquiry page supported
8005 * @page0:              inquiry page 0 buffer
8006 * @page:               page code.
8007 *
8008 * This function determines if the specified inquiry page is supported.
8009 *
8010 * Return value:
8011 *      1 if page is supported / 0 if not
8012 **/
8013static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8014{
8015        int i;
8016
8017        for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8018                if (page0->page[i] == page)
8019                        return 1;
8020
8021        return 0;
8022}
8023
8024/**
8025 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8026 * @ipr_cmd:    ipr command struct
8027 *
8028 * This function sends a Page 0xC4 inquiry to the adapter
8029 * to retrieve software VPD information.
8030 *
8031 * Return value:
8032 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8033 **/
8034static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8035{
8036        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8037        struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8038        struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8039
8040        ENTER;
8041        ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8042        memset(pageC4, 0, sizeof(*pageC4));
8043
8044        if (ipr_inquiry_page_supported(page0, 0xC4)) {
8045                ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8046                                  (ioa_cfg->vpd_cbs_dma
8047                                   + offsetof(struct ipr_misc_cbs,
8048                                              pageC4_data)),
8049                                  sizeof(struct ipr_inquiry_pageC4));
8050                return IPR_RC_JOB_RETURN;
8051        }
8052
8053        LEAVE;
8054        return IPR_RC_JOB_CONTINUE;
8055}
8056
8057/**
8058 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8059 * @ipr_cmd:    ipr command struct
8060 *
8061 * This function sends a Page 0xD0 inquiry to the adapter
8062 * to retrieve adapter capabilities.
8063 *
8064 * Return value:
8065 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8066 **/
8067static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8068{
8069        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8070        struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8071        struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8072
8073        ENTER;
8074        ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8075        memset(cap, 0, sizeof(*cap));
8076
8077        if (ipr_inquiry_page_supported(page0, 0xD0)) {
8078                ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8079                                  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8080                                  sizeof(struct ipr_inquiry_cap));
8081                return IPR_RC_JOB_RETURN;
8082        }
8083
8084        LEAVE;
8085        return IPR_RC_JOB_CONTINUE;
8086}
8087
8088/**
8089 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8090 * @ipr_cmd:    ipr command struct
8091 *
8092 * This function sends a Page 3 inquiry to the adapter
8093 * to retrieve software VPD information.
8094 *
8095 * Return value:
8096 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8097 **/
8098static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8099{
8100        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8101
8102        ENTER;
8103
8104        ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8105
8106        ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8107                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8108                          sizeof(struct ipr_inquiry_page3));
8109
8110        LEAVE;
8111        return IPR_RC_JOB_RETURN;
8112}
8113
8114/**
8115 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8116 * @ipr_cmd:    ipr command struct
8117 *
8118 * This function sends a Page 0 inquiry to the adapter
8119 * to retrieve supported inquiry pages.
8120 *
8121 * Return value:
8122 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8123 **/
8124static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8125{
8126        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8127        char type[5];
8128
8129        ENTER;
8130
8131        /* Grab the type out of the VPD and store it away */
8132        memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8133        type[4] = '\0';
8134        ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8135
8136        if (ipr_invalid_adapter(ioa_cfg)) {
8137                dev_err(&ioa_cfg->pdev->dev,
8138                        "Adapter not supported in this hardware configuration.\n");
8139
8140                if (!ipr_testmode) {
8141                        ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8142                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8143                        list_add_tail(&ipr_cmd->queue,
8144                                        &ioa_cfg->hrrq->hrrq_free_q);
8145                        return IPR_RC_JOB_RETURN;
8146                }
8147        }
8148
8149        ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8150
8151        ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8152                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8153                          sizeof(struct ipr_inquiry_page0));
8154
8155        LEAVE;
8156        return IPR_RC_JOB_RETURN;
8157}
8158
8159/**
8160 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8161 * @ipr_cmd:    ipr command struct
8162 *
8163 * This function sends a standard inquiry to the adapter.
8164 *
8165 * Return value:
8166 *      IPR_RC_JOB_RETURN
8167 **/
8168static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8169{
8170        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8171
8172        ENTER;
8173        ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8174
8175        ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8176                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8177                          sizeof(struct ipr_ioa_vpd));
8178
8179        LEAVE;
8180        return IPR_RC_JOB_RETURN;
8181}
8182
8183/**
8184 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8185 * @ipr_cmd:    ipr command struct
8186 *
8187 * This function send an Identify Host Request Response Queue
8188 * command to establish the HRRQ with the adapter.
8189 *
8190 * Return value:
8191 *      IPR_RC_JOB_RETURN
8192 **/
8193static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8194{
8195        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8196        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8197        struct ipr_hrr_queue *hrrq;
8198
8199        ENTER;
8200        ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8201        if (ioa_cfg->identify_hrrq_index == 0)
8202                dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8203
8204        if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8205                hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8206
8207                ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8208                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8209
8210                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8211                if (ioa_cfg->sis64)
8212                        ioarcb->cmd_pkt.cdb[1] = 0x1;
8213
8214                if (ioa_cfg->nvectors == 1)
8215                        ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8216                else
8217                        ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8218
8219                ioarcb->cmd_pkt.cdb[2] =
8220                        ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8221                ioarcb->cmd_pkt.cdb[3] =
8222                        ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8223                ioarcb->cmd_pkt.cdb[4] =
8224                        ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8225                ioarcb->cmd_pkt.cdb[5] =
8226                        ((u64) hrrq->host_rrq_dma) & 0xff;
8227                ioarcb->cmd_pkt.cdb[7] =
8228                        ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8229                ioarcb->cmd_pkt.cdb[8] =
8230                        (sizeof(u32) * hrrq->size) & 0xff;
8231
8232                if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8233                        ioarcb->cmd_pkt.cdb[9] =
8234                                        ioa_cfg->identify_hrrq_index;
8235
8236                if (ioa_cfg->sis64) {
8237                        ioarcb->cmd_pkt.cdb[10] =
8238                                ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8239                        ioarcb->cmd_pkt.cdb[11] =
8240                                ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8241                        ioarcb->cmd_pkt.cdb[12] =
8242                                ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8243                        ioarcb->cmd_pkt.cdb[13] =
8244                                ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8245                }
8246
8247                if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8248                        ioarcb->cmd_pkt.cdb[14] =
8249                                        ioa_cfg->identify_hrrq_index;
8250
8251                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8252                           IPR_INTERNAL_TIMEOUT);
8253
8254                if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8255                        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8256
8257                LEAVE;
8258                return IPR_RC_JOB_RETURN;
8259        }
8260
8261        LEAVE;
8262        return IPR_RC_JOB_CONTINUE;
8263}
8264
8265/**
8266 * ipr_reset_timer_done - Adapter reset timer function
8267 * @ipr_cmd:    ipr command struct
8268 *
8269 * Description: This function is used in adapter reset processing
8270 * for timing events. If the reset_cmd pointer in the IOA
8271 * config struct is not this adapter's we are doing nested
8272 * resets and fail_all_ops will take care of freeing the
8273 * command block.
8274 *
8275 * Return value:
8276 *      none
8277 **/
8278static void ipr_reset_timer_done(struct timer_list *t)
8279{
8280        struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8281        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8282        unsigned long lock_flags = 0;
8283
8284        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8285
8286        if (ioa_cfg->reset_cmd == ipr_cmd) {
8287                list_del(&ipr_cmd->queue);
8288                ipr_cmd->done(ipr_cmd);
8289        }
8290
8291        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8292}
8293
8294/**
8295 * ipr_reset_start_timer - Start a timer for adapter reset job
8296 * @ipr_cmd:    ipr command struct
8297 * @timeout:    timeout value
8298 *
8299 * Description: This function is used in adapter reset processing
8300 * for timing events. If the reset_cmd pointer in the IOA
8301 * config struct is not this adapter's we are doing nested
8302 * resets and fail_all_ops will take care of freeing the
8303 * command block.
8304 *
8305 * Return value:
8306 *      none
8307 **/
8308static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8309                                  unsigned long timeout)
8310{
8311
8312        ENTER;
8313        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8314        ipr_cmd->done = ipr_reset_ioa_job;
8315
8316        ipr_cmd->timer.expires = jiffies + timeout;
8317        ipr_cmd->timer.function = ipr_reset_timer_done;
8318        add_timer(&ipr_cmd->timer);
8319}
8320
8321/**
8322 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8323 * @ioa_cfg:    ioa cfg struct
8324 *
8325 * Return value:
8326 *      nothing
8327 **/
8328static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8329{
8330        struct ipr_hrr_queue *hrrq;
8331
8332        for_each_hrrq(hrrq, ioa_cfg) {
8333                spin_lock(&hrrq->_lock);
8334                memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8335
8336                /* Initialize Host RRQ pointers */
8337                hrrq->hrrq_start = hrrq->host_rrq;
8338                hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8339                hrrq->hrrq_curr = hrrq->hrrq_start;
8340                hrrq->toggle_bit = 1;
8341                spin_unlock(&hrrq->_lock);
8342        }
8343        wmb();
8344
8345        ioa_cfg->identify_hrrq_index = 0;
8346        if (ioa_cfg->hrrq_num == 1)
8347                atomic_set(&ioa_cfg->hrrq_index, 0);
8348        else
8349                atomic_set(&ioa_cfg->hrrq_index, 1);
8350
8351        /* Zero out config table */
8352        memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8353}
8354
8355/**
8356 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8357 * @ipr_cmd:    ipr command struct
8358 *
8359 * Return value:
8360 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8361 **/
8362static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8363{
8364        unsigned long stage, stage_time;
8365        u32 feedback;
8366        volatile u32 int_reg;
8367        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8368        u64 maskval = 0;
8369
8370        feedback = readl(ioa_cfg->regs.init_feedback_reg);
8371        stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8372        stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8373
8374        ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8375
8376        /* sanity check the stage_time value */
8377        if (stage_time == 0)
8378                stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8379        else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8380                stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8381        else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8382                stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8383
8384        if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8385                writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8386                int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8387                stage_time = ioa_cfg->transop_timeout;
8388                ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8389        } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8390                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8391                if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8392                        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8393                        maskval = IPR_PCII_IPL_STAGE_CHANGE;
8394                        maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8395                        writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8396                        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8397                        return IPR_RC_JOB_CONTINUE;
8398                }
8399        }
8400
8401        ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8402        ipr_cmd->timer.function = ipr_oper_timeout;
8403        ipr_cmd->done = ipr_reset_ioa_job;
8404        add_timer(&ipr_cmd->timer);
8405
8406        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8407
8408        return IPR_RC_JOB_RETURN;
8409}
8410
8411/**
8412 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8413 * @ipr_cmd:    ipr command struct
8414 *
8415 * This function reinitializes some control blocks and
8416 * enables destructive diagnostics on the adapter.
8417 *
8418 * Return value:
8419 *      IPR_RC_JOB_RETURN
8420 **/
8421static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8422{
8423        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8424        volatile u32 int_reg;
8425        volatile u64 maskval;
8426        int i;
8427
8428        ENTER;
8429        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8430        ipr_init_ioa_mem(ioa_cfg);
8431
8432        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8433                spin_lock(&ioa_cfg->hrrq[i]._lock);
8434                ioa_cfg->hrrq[i].allow_interrupts = 1;
8435                spin_unlock(&ioa_cfg->hrrq[i]._lock);
8436        }
8437        if (ioa_cfg->sis64) {
8438                /* Set the adapter to the correct endian mode. */
8439                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8440                int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8441        }
8442
8443        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8444
8445        if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8446                writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8447                       ioa_cfg->regs.clr_interrupt_mask_reg32);
8448                int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8449                return IPR_RC_JOB_CONTINUE;
8450        }
8451
8452        /* Enable destructive diagnostics on IOA */
8453        writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8454
8455        if (ioa_cfg->sis64) {
8456                maskval = IPR_PCII_IPL_STAGE_CHANGE;
8457                maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8458                writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8459        } else
8460                writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8461
8462        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8463
8464        dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8465
8466        if (ioa_cfg->sis64) {
8467                ipr_cmd->job_step = ipr_reset_next_stage;
8468                return IPR_RC_JOB_CONTINUE;
8469        }
8470
8471        ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8472        ipr_cmd->timer.function = ipr_oper_timeout;
8473        ipr_cmd->done = ipr_reset_ioa_job;
8474        add_timer(&ipr_cmd->timer);
8475        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8476
8477        LEAVE;
8478        return IPR_RC_JOB_RETURN;
8479}
8480
8481/**
8482 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8483 * @ipr_cmd:    ipr command struct
8484 *
8485 * This function is invoked when an adapter dump has run out
8486 * of processing time.
8487 *
8488 * Return value:
8489 *      IPR_RC_JOB_CONTINUE
8490 **/
8491static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8492{
8493        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8494
8495        if (ioa_cfg->sdt_state == GET_DUMP)
8496                ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8497        else if (ioa_cfg->sdt_state == READ_DUMP)
8498                ioa_cfg->sdt_state = ABORT_DUMP;
8499
8500        ioa_cfg->dump_timeout = 1;
8501        ipr_cmd->job_step = ipr_reset_alert;
8502
8503        return IPR_RC_JOB_CONTINUE;
8504}
8505
8506/**
8507 * ipr_unit_check_no_data - Log a unit check/no data error log
8508 * @ioa_cfg:            ioa config struct
8509 *
8510 * Logs an error indicating the adapter unit checked, but for some
8511 * reason, we were unable to fetch the unit check buffer.
8512 *
8513 * Return value:
8514 *      nothing
8515 **/
8516static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8517{
8518        ioa_cfg->errors_logged++;
8519        dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8520}
8521
8522/**
8523 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8524 * @ioa_cfg:            ioa config struct
8525 *
8526 * Fetches the unit check buffer from the adapter by clocking the data
8527 * through the mailbox register.
8528 *
8529 * Return value:
8530 *      nothing
8531 **/
8532static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8533{
8534        unsigned long mailbox;
8535        struct ipr_hostrcb *hostrcb;
8536        struct ipr_uc_sdt sdt;
8537        int rc, length;
8538        u32 ioasc;
8539
8540        mailbox = readl(ioa_cfg->ioa_mailbox);
8541
8542        if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8543                ipr_unit_check_no_data(ioa_cfg);
8544                return;
8545        }
8546
8547        memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8548        rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8549                                        (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8550
8551        if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8552            ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8553            (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8554                ipr_unit_check_no_data(ioa_cfg);
8555                return;
8556        }
8557
8558        /* Find length of the first sdt entry (UC buffer) */
8559        if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8560                length = be32_to_cpu(sdt.entry[0].end_token);
8561        else
8562                length = (be32_to_cpu(sdt.entry[0].end_token) -
8563                          be32_to_cpu(sdt.entry[0].start_token)) &
8564                          IPR_FMT2_MBX_ADDR_MASK;
8565
8566        hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8567                             struct ipr_hostrcb, queue);
8568        list_del_init(&hostrcb->queue);
8569        memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8570
8571        rc = ipr_get_ldump_data_section(ioa_cfg,
8572                                        be32_to_cpu(sdt.entry[0].start_token),
8573                                        (__be32 *)&hostrcb->hcam,
8574                                        min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8575
8576        if (!rc) {
8577                ipr_handle_log_data(ioa_cfg, hostrcb);
8578                ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8579                if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8580                    ioa_cfg->sdt_state == GET_DUMP)
8581                        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8582        } else
8583                ipr_unit_check_no_data(ioa_cfg);
8584
8585        list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8586}
8587
8588/**
8589 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8590 * @ipr_cmd:    ipr command struct
8591 *
8592 * Description: This function will call to get the unit check buffer.
8593 *
8594 * Return value:
8595 *      IPR_RC_JOB_RETURN
8596 **/
8597static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8598{
8599        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8600
8601        ENTER;
8602        ioa_cfg->ioa_unit_checked = 0;
8603        ipr_get_unit_check_buffer(ioa_cfg);
8604        ipr_cmd->job_step = ipr_reset_alert;
8605        ipr_reset_start_timer(ipr_cmd, 0);
8606
8607        LEAVE;
8608        return IPR_RC_JOB_RETURN;
8609}
8610
8611static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8612{
8613        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8614
8615        ENTER;
8616
8617        if (ioa_cfg->sdt_state != GET_DUMP)
8618                return IPR_RC_JOB_RETURN;
8619
8620        if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8621            (readl(ioa_cfg->regs.sense_interrupt_reg) &
8622             IPR_PCII_MAILBOX_STABLE)) {
8623
8624                if (!ipr_cmd->u.time_left)
8625                        dev_err(&ioa_cfg->pdev->dev,
8626                                "Timed out waiting for Mailbox register.\n");
8627
8628                ioa_cfg->sdt_state = READ_DUMP;
8629                ioa_cfg->dump_timeout = 0;
8630                if (ioa_cfg->sis64)
8631                        ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8632                else
8633                        ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8634                ipr_cmd->job_step = ipr_reset_wait_for_dump;
8635                schedule_work(&ioa_cfg->work_q);
8636
8637        } else {
8638                ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8639                ipr_reset_start_timer(ipr_cmd,
8640                                      IPR_CHECK_FOR_RESET_TIMEOUT);
8641        }
8642
8643        LEAVE;
8644        return IPR_RC_JOB_RETURN;
8645}
8646
8647/**
8648 * ipr_reset_restore_cfg_space - Restore PCI config space.
8649 * @ipr_cmd:    ipr command struct
8650 *
8651 * Description: This function restores the saved PCI config space of
8652 * the adapter, fails all outstanding ops back to the callers, and
8653 * fetches the dump/unit check if applicable to this reset.
8654 *
8655 * Return value:
8656 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8657 **/
8658static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8659{
8660        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8661        u32 int_reg;
8662
8663        ENTER;
8664        ioa_cfg->pdev->state_saved = true;
8665        pci_restore_state(ioa_cfg->pdev);
8666
8667        if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8668                ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8669                return IPR_RC_JOB_CONTINUE;
8670        }
8671
8672        ipr_fail_all_ops(ioa_cfg);
8673
8674        if (ioa_cfg->sis64) {
8675                /* Set the adapter to the correct endian mode. */
8676                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8677                int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8678        }
8679
8680        if (ioa_cfg->ioa_unit_checked) {
8681                if (ioa_cfg->sis64) {
8682                        ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8683                        ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8684                        return IPR_RC_JOB_RETURN;
8685                } else {
8686                        ioa_cfg->ioa_unit_checked = 0;
8687                        ipr_get_unit_check_buffer(ioa_cfg);
8688                        ipr_cmd->job_step = ipr_reset_alert;
8689                        ipr_reset_start_timer(ipr_cmd, 0);
8690                        return IPR_RC_JOB_RETURN;
8691                }
8692        }
8693
8694        if (ioa_cfg->in_ioa_bringdown) {
8695                ipr_cmd->job_step = ipr_ioa_bringdown_done;
8696        } else if (ioa_cfg->sdt_state == GET_DUMP) {
8697                ipr_cmd->job_step = ipr_dump_mailbox_wait;
8698                ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8699        } else {
8700                ipr_cmd->job_step = ipr_reset_enable_ioa;
8701        }
8702
8703        LEAVE;
8704        return IPR_RC_JOB_CONTINUE;
8705}
8706
8707/**
8708 * ipr_reset_bist_done - BIST has completed on the adapter.
8709 * @ipr_cmd:    ipr command struct
8710 *
8711 * Description: Unblock config space and resume the reset process.
8712 *
8713 * Return value:
8714 *      IPR_RC_JOB_CONTINUE
8715 **/
8716static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8717{
8718        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8719
8720        ENTER;
8721        if (ioa_cfg->cfg_locked)
8722                pci_cfg_access_unlock(ioa_cfg->pdev);
8723        ioa_cfg->cfg_locked = 0;
8724        ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8725        LEAVE;
8726        return IPR_RC_JOB_CONTINUE;
8727}
8728
8729/**
8730 * ipr_reset_start_bist - Run BIST on the adapter.
8731 * @ipr_cmd:    ipr command struct
8732 *
8733 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8734 *
8735 * Return value:
8736 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8737 **/
8738static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8739{
8740        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8741        int rc = PCIBIOS_SUCCESSFUL;
8742
8743        ENTER;
8744        if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8745                writel(IPR_UPROCI_SIS64_START_BIST,
8746                       ioa_cfg->regs.set_uproc_interrupt_reg32);
8747        else
8748                rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8749
8750        if (rc == PCIBIOS_SUCCESSFUL) {
8751                ipr_cmd->job_step = ipr_reset_bist_done;
8752                ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8753                rc = IPR_RC_JOB_RETURN;
8754        } else {
8755                if (ioa_cfg->cfg_locked)
8756                        pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8757                ioa_cfg->cfg_locked = 0;
8758                ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8759                rc = IPR_RC_JOB_CONTINUE;
8760        }
8761
8762        LEAVE;
8763        return rc;
8764}
8765
8766/**
8767 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8768 * @ipr_cmd:    ipr command struct
8769 *
8770 * Description: This clears PCI reset to the adapter and delays two seconds.
8771 *
8772 * Return value:
8773 *      IPR_RC_JOB_RETURN
8774 **/
8775static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8776{
8777        ENTER;
8778        ipr_cmd->job_step = ipr_reset_bist_done;
8779        ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8780        LEAVE;
8781        return IPR_RC_JOB_RETURN;
8782}
8783
8784/**
8785 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8786 * @work:       work struct
8787 *
8788 * Description: This pulses warm reset to a slot.
8789 *
8790 **/
8791static void ipr_reset_reset_work(struct work_struct *work)
8792{
8793        struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8794        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8795        struct pci_dev *pdev = ioa_cfg->pdev;
8796        unsigned long lock_flags = 0;
8797
8798        ENTER;
8799        pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8800        msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8801        pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8802
8803        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8804        if (ioa_cfg->reset_cmd == ipr_cmd)
8805                ipr_reset_ioa_job(ipr_cmd);
8806        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8807        LEAVE;
8808}
8809
8810/**
8811 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8812 * @ipr_cmd:    ipr command struct
8813 *
8814 * Description: This asserts PCI reset to the adapter.
8815 *
8816 * Return value:
8817 *      IPR_RC_JOB_RETURN
8818 **/
8819static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8820{
8821        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8822
8823        ENTER;
8824        INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8825        queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8826        ipr_cmd->job_step = ipr_reset_slot_reset_done;
8827        LEAVE;
8828        return IPR_RC_JOB_RETURN;
8829}
8830
8831/**
8832 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8833 * @ipr_cmd:    ipr command struct
8834 *
8835 * Description: This attempts to block config access to the IOA.
8836 *
8837 * Return value:
8838 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8839 **/
8840static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8841{
8842        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8843        int rc = IPR_RC_JOB_CONTINUE;
8844
8845        if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8846                ioa_cfg->cfg_locked = 1;
8847                ipr_cmd->job_step = ioa_cfg->reset;
8848        } else {
8849                if (ipr_cmd->u.time_left) {
8850                        rc = IPR_RC_JOB_RETURN;
8851                        ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8852                        ipr_reset_start_timer(ipr_cmd,
8853                                              IPR_CHECK_FOR_RESET_TIMEOUT);
8854                } else {
8855                        ipr_cmd->job_step = ioa_cfg->reset;
8856                        dev_err(&ioa_cfg->pdev->dev,
8857                                "Timed out waiting to lock config access. Resetting anyway.\n");
8858                }
8859        }
8860
8861        return rc;
8862}
8863
8864/**
8865 * ipr_reset_block_config_access - Block config access to the IOA
8866 * @ipr_cmd:    ipr command struct
8867 *
8868 * Description: This attempts to block config access to the IOA
8869 *
8870 * Return value:
8871 *      IPR_RC_JOB_CONTINUE
8872 **/
8873static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8874{
8875        ipr_cmd->ioa_cfg->cfg_locked = 0;
8876        ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8877        ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8878        return IPR_RC_JOB_CONTINUE;
8879}
8880
8881/**
8882 * ipr_reset_allowed - Query whether or not IOA can be reset
8883 * @ioa_cfg:    ioa config struct
8884 *
8885 * Return value:
8886 *      0 if reset not allowed / non-zero if reset is allowed
8887 **/
8888static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8889{
8890        volatile u32 temp_reg;
8891
8892        temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8893        return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8894}
8895
8896/**
8897 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8898 * @ipr_cmd:    ipr command struct
8899 *
8900 * Description: This function waits for adapter permission to run BIST,
8901 * then runs BIST. If the adapter does not give permission after a
8902 * reasonable time, we will reset the adapter anyway. The impact of
8903 * resetting the adapter without warning the adapter is the risk of
8904 * losing the persistent error log on the adapter. If the adapter is
8905 * reset while it is writing to the flash on the adapter, the flash
8906 * segment will have bad ECC and be zeroed.
8907 *
8908 * Return value:
8909 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8910 **/
8911static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8912{
8913        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8914        int rc = IPR_RC_JOB_RETURN;
8915
8916        if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8917                ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8918                ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8919        } else {
8920                ipr_cmd->job_step = ipr_reset_block_config_access;
8921                rc = IPR_RC_JOB_CONTINUE;
8922        }
8923
8924        return rc;
8925}
8926
8927/**
8928 * ipr_reset_alert - Alert the adapter of a pending reset
8929 * @ipr_cmd:    ipr command struct
8930 *
8931 * Description: This function alerts the adapter that it will be reset.
8932 * If memory space is not currently enabled, proceed directly
8933 * to running BIST on the adapter. The timer must always be started
8934 * so we guarantee we do not run BIST from ipr_isr.
8935 *
8936 * Return value:
8937 *      IPR_RC_JOB_RETURN
8938 **/
8939static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8940{
8941        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8942        u16 cmd_reg;
8943        int rc;
8944
8945        ENTER;
8946        rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8947
8948        if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8949                ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8950                writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8951                ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8952        } else {
8953                ipr_cmd->job_step = ipr_reset_block_config_access;
8954        }
8955
8956        ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8957        ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8958
8959        LEAVE;
8960        return IPR_RC_JOB_RETURN;
8961}
8962
8963/**
8964 * ipr_reset_quiesce_done - Complete IOA disconnect
8965 * @ipr_cmd:    ipr command struct
8966 *
8967 * Description: Freeze the adapter to complete quiesce processing
8968 *
8969 * Return value:
8970 *      IPR_RC_JOB_CONTINUE
8971 **/
8972static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8973{
8974        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8975
8976        ENTER;
8977        ipr_cmd->job_step = ipr_ioa_bringdown_done;
8978        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8979        LEAVE;
8980        return IPR_RC_JOB_CONTINUE;
8981}
8982
8983/**
8984 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8985 * @ipr_cmd:    ipr command struct
8986 *
8987 * Description: Ensure nothing is outstanding to the IOA and
8988 *                      proceed with IOA disconnect. Otherwise reset the IOA.
8989 *
8990 * Return value:
8991 *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8992 **/
8993static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8994{
8995        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8996        struct ipr_cmnd *loop_cmd;
8997        struct ipr_hrr_queue *hrrq;
8998        int rc = IPR_RC_JOB_CONTINUE;
8999        int count = 0;
9000
9001        ENTER;
9002        ipr_cmd->job_step = ipr_reset_quiesce_done;
9003
9004        for_each_hrrq(hrrq, ioa_cfg) {
9005                spin_lock(&hrrq->_lock);
9006                list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9007                        count++;
9008                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9009                        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9010                        rc = IPR_RC_JOB_RETURN;
9011                        break;
9012                }
9013                spin_unlock(&hrrq->_lock);
9014
9015                if (count)
9016                        break;
9017        }
9018
9019        LEAVE;
9020        return rc;
9021}
9022
9023/**
9024 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9025 * @ipr_cmd:    ipr command struct
9026 *
9027 * Description: Cancel any oustanding HCAMs to the IOA.
9028 *
9029 * Return value:
9030 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9031 **/
9032static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9033{
9034        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9035        int rc = IPR_RC_JOB_CONTINUE;
9036        struct ipr_cmd_pkt *cmd_pkt;
9037        struct ipr_cmnd *hcam_cmd;
9038        struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9039
9040        ENTER;
9041        ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9042
9043        if (!hrrq->ioa_is_dead) {
9044                if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9045                        list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9046                                if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9047                                        continue;
9048
9049                                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9050                                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9051                                cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9052                                cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9053                                cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9054                                cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9055                                cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9056                                cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9057                                cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9058                                cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9059                                cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9060                                cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9061                                cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9062                                cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9063
9064                                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9065                                           IPR_CANCEL_TIMEOUT);
9066
9067                                rc = IPR_RC_JOB_RETURN;
9068                                ipr_cmd->job_step = ipr_reset_cancel_hcam;
9069                                break;
9070                        }
9071                }
9072        } else
9073                ipr_cmd->job_step = ipr_reset_alert;
9074
9075        LEAVE;
9076        return rc;
9077}
9078
9079/**
9080 * ipr_reset_ucode_download_done - Microcode download completion
9081 * @ipr_cmd:    ipr command struct
9082 *
9083 * Description: This function unmaps the microcode download buffer.
9084 *
9085 * Return value:
9086 *      IPR_RC_JOB_CONTINUE
9087 **/
9088static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9089{
9090        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9091        struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9092
9093        dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9094                     sglist->num_sg, DMA_TO_DEVICE);
9095
9096        ipr_cmd->job_step = ipr_reset_alert;
9097        return IPR_RC_JOB_CONTINUE;
9098}
9099
9100/**
9101 * ipr_reset_ucode_download - Download microcode to the adapter
9102 * @ipr_cmd:    ipr command struct
9103 *
9104 * Description: This function checks to see if it there is microcode
9105 * to download to the adapter. If there is, a download is performed.
9106 *
9107 * Return value:
9108 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9109 **/
9110static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9111{
9112        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9113        struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9114
9115        ENTER;
9116        ipr_cmd->job_step = ipr_reset_alert;
9117
9118        if (!sglist)
9119                return IPR_RC_JOB_CONTINUE;
9120
9121        ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9122        ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9123        ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9124        ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9125        ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9126        ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9127        ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9128
9129        if (ioa_cfg->sis64)
9130                ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9131        else
9132                ipr_build_ucode_ioadl(ipr_cmd, sglist);
9133        ipr_cmd->job_step = ipr_reset_ucode_download_done;
9134
9135        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9136                   IPR_WRITE_BUFFER_TIMEOUT);
9137
9138        LEAVE;
9139        return IPR_RC_JOB_RETURN;
9140}
9141
9142/**
9143 * ipr_reset_shutdown_ioa - Shutdown the adapter
9144 * @ipr_cmd:    ipr command struct
9145 *
9146 * Description: This function issues an adapter shutdown of the
9147 * specified type to the specified adapter as part of the
9148 * adapter reset job.
9149 *
9150 * Return value:
9151 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9152 **/
9153static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9154{
9155        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9156        enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9157        unsigned long timeout;
9158        int rc = IPR_RC_JOB_CONTINUE;
9159
9160        ENTER;
9161        if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9162                ipr_cmd->job_step = ipr_reset_cancel_hcam;
9163        else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9164                        !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9165                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9166                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9167                ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9168                ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9169
9170                if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9171                        timeout = IPR_SHUTDOWN_TIMEOUT;
9172                else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9173                        timeout = IPR_INTERNAL_TIMEOUT;
9174                else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9175                        timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9176                else
9177                        timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9178
9179                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9180
9181                rc = IPR_RC_JOB_RETURN;
9182                ipr_cmd->job_step = ipr_reset_ucode_download;
9183        } else
9184                ipr_cmd->job_step = ipr_reset_alert;
9185
9186        LEAVE;
9187        return rc;
9188}
9189
9190/**
9191 * ipr_reset_ioa_job - Adapter reset job
9192 * @ipr_cmd:    ipr command struct
9193 *
9194 * Description: This function is the job router for the adapter reset job.
9195 *
9196 * Return value:
9197 *      none
9198 **/
9199static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9200{
9201        u32 rc, ioasc;
9202        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9203
9204        do {
9205                ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9206
9207                if (ioa_cfg->reset_cmd != ipr_cmd) {
9208                        /*
9209                         * We are doing nested adapter resets and this is
9210                         * not the current reset job.
9211                         */
9212                        list_add_tail(&ipr_cmd->queue,
9213                                        &ipr_cmd->hrrq->hrrq_free_q);
9214                        return;
9215                }
9216
9217                if (IPR_IOASC_SENSE_KEY(ioasc)) {
9218                        rc = ipr_cmd->job_step_failed(ipr_cmd);
9219                        if (rc == IPR_RC_JOB_RETURN)
9220                                return;
9221                }
9222
9223                ipr_reinit_ipr_cmnd(ipr_cmd);
9224                ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9225                rc = ipr_cmd->job_step(ipr_cmd);
9226        } while (rc == IPR_RC_JOB_CONTINUE);
9227}
9228
9229/**
9230 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9231 * @ioa_cfg:            ioa config struct
9232 * @job_step:           first job step of reset job
9233 * @shutdown_type:      shutdown type
9234 *
9235 * Description: This function will initiate the reset of the given adapter
9236 * starting at the selected job step.
9237 * If the caller needs to wait on the completion of the reset,
9238 * the caller must sleep on the reset_wait_q.
9239 *
9240 * Return value:
9241 *      none
9242 **/
9243static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9244                                    int (*job_step) (struct ipr_cmnd *),
9245                                    enum ipr_shutdown_type shutdown_type)
9246{
9247        struct ipr_cmnd *ipr_cmd;
9248        int i;
9249
9250        ioa_cfg->in_reset_reload = 1;
9251        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9252                spin_lock(&ioa_cfg->hrrq[i]._lock);
9253                ioa_cfg->hrrq[i].allow_cmds = 0;
9254                spin_unlock(&ioa_cfg->hrrq[i]._lock);
9255        }
9256        wmb();
9257        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9258                ioa_cfg->scsi_unblock = 0;
9259                ioa_cfg->scsi_blocked = 1;
9260                scsi_block_requests(ioa_cfg->host);
9261        }
9262
9263        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9264        ioa_cfg->reset_cmd = ipr_cmd;
9265        ipr_cmd->job_step = job_step;
9266        ipr_cmd->u.shutdown_type = shutdown_type;
9267
9268        ipr_reset_ioa_job(ipr_cmd);
9269}
9270
9271/**
9272 * ipr_initiate_ioa_reset - Initiate an adapter reset
9273 * @ioa_cfg:            ioa config struct
9274 * @shutdown_type:      shutdown type
9275 *
9276 * Description: This function will initiate the reset of the given adapter.
9277 * If the caller needs to wait on the completion of the reset,
9278 * the caller must sleep on the reset_wait_q.
9279 *
9280 * Return value:
9281 *      none
9282 **/
9283static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9284                                   enum ipr_shutdown_type shutdown_type)
9285{
9286        int i;
9287
9288        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9289                return;
9290
9291        if (ioa_cfg->in_reset_reload) {
9292                if (ioa_cfg->sdt_state == GET_DUMP)
9293                        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9294                else if (ioa_cfg->sdt_state == READ_DUMP)
9295                        ioa_cfg->sdt_state = ABORT_DUMP;
9296        }
9297
9298        if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9299                dev_err(&ioa_cfg->pdev->dev,
9300                        "IOA taken offline - error recovery failed\n");
9301
9302                ioa_cfg->reset_retries = 0;
9303                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9304                        spin_lock(&ioa_cfg->hrrq[i]._lock);
9305                        ioa_cfg->hrrq[i].ioa_is_dead = 1;
9306                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
9307                }
9308                wmb();
9309
9310                if (ioa_cfg->in_ioa_bringdown) {
9311                        ioa_cfg->reset_cmd = NULL;
9312                        ioa_cfg->in_reset_reload = 0;
9313                        ipr_fail_all_ops(ioa_cfg);
9314                        wake_up_all(&ioa_cfg->reset_wait_q);
9315
9316                        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9317                                ioa_cfg->scsi_unblock = 1;
9318                                schedule_work(&ioa_cfg->work_q);
9319                        }
9320                        return;
9321                } else {
9322                        ioa_cfg->in_ioa_bringdown = 1;
9323                        shutdown_type = IPR_SHUTDOWN_NONE;
9324                }
9325        }
9326
9327        _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9328                                shutdown_type);
9329}
9330
9331/**
9332 * ipr_reset_freeze - Hold off all I/O activity
9333 * @ipr_cmd:    ipr command struct
9334 *
9335 * Description: If the PCI slot is frozen, hold off all I/O
9336 * activity; then, as soon as the slot is available again,
9337 * initiate an adapter reset.
9338 */
9339static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9340{
9341        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9342        int i;
9343
9344        /* Disallow new interrupts, avoid loop */
9345        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9346                spin_lock(&ioa_cfg->hrrq[i]._lock);
9347                ioa_cfg->hrrq[i].allow_interrupts = 0;
9348                spin_unlock(&ioa_cfg->hrrq[i]._lock);
9349        }
9350        wmb();
9351        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9352        ipr_cmd->done = ipr_reset_ioa_job;
9353        return IPR_RC_JOB_RETURN;
9354}
9355
9356/**
9357 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9358 * @pdev:       PCI device struct
9359 *
9360 * Description: This routine is called to tell us that the MMIO
9361 * access to the IOA has been restored
9362 */
9363static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9364{
9365        unsigned long flags = 0;
9366        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9367
9368        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9369        if (!ioa_cfg->probe_done)
9370                pci_save_state(pdev);
9371        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9372        return PCI_ERS_RESULT_NEED_RESET;
9373}
9374
9375/**
9376 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9377 * @pdev:       PCI device struct
9378 *
9379 * Description: This routine is called to tell us that the PCI bus
9380 * is down. Can't do anything here, except put the device driver
9381 * into a holding pattern, waiting for the PCI bus to come back.
9382 */
9383static void ipr_pci_frozen(struct pci_dev *pdev)
9384{
9385        unsigned long flags = 0;
9386        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9387
9388        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9389        if (ioa_cfg->probe_done)
9390                _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9391        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9392}
9393
9394/**
9395 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9396 * @pdev:       PCI device struct
9397 *
9398 * Description: This routine is called by the pci error recovery
9399 * code after the PCI slot has been reset, just before we
9400 * should resume normal operations.
9401 */
9402static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9403{
9404        unsigned long flags = 0;
9405        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9406
9407        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9408        if (ioa_cfg->probe_done) {
9409                if (ioa_cfg->needs_warm_reset)
9410                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9411                else
9412                        _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9413                                                IPR_SHUTDOWN_NONE);
9414        } else
9415                wake_up_all(&ioa_cfg->eeh_wait_q);
9416        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9417        return PCI_ERS_RESULT_RECOVERED;
9418}
9419
9420/**
9421 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9422 * @pdev:       PCI device struct
9423 *
9424 * Description: This routine is called when the PCI bus has
9425 * permanently failed.
9426 */
9427static void ipr_pci_perm_failure(struct pci_dev *pdev)
9428{
9429        unsigned long flags = 0;
9430        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9431        int i;
9432
9433        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9434        if (ioa_cfg->probe_done) {
9435                if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9436                        ioa_cfg->sdt_state = ABORT_DUMP;
9437                ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9438                ioa_cfg->in_ioa_bringdown = 1;
9439                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9440                        spin_lock(&ioa_cfg->hrrq[i]._lock);
9441                        ioa_cfg->hrrq[i].allow_cmds = 0;
9442                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
9443                }
9444                wmb();
9445                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9446        } else
9447                wake_up_all(&ioa_cfg->eeh_wait_q);
9448        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9449}
9450
9451/**
9452 * ipr_pci_error_detected - Called when a PCI error is detected.
9453 * @pdev:       PCI device struct
9454 * @state:      PCI channel state
9455 *
9456 * Description: Called when a PCI error is detected.
9457 *
9458 * Return value:
9459 *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9460 */
9461static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9462                                               pci_channel_state_t state)
9463{
9464        switch (state) {
9465        case pci_channel_io_frozen:
9466                ipr_pci_frozen(pdev);
9467                return PCI_ERS_RESULT_CAN_RECOVER;
9468        case pci_channel_io_perm_failure:
9469                ipr_pci_perm_failure(pdev);
9470                return PCI_ERS_RESULT_DISCONNECT;
9471                break;
9472        default:
9473                break;
9474        }
9475        return PCI_ERS_RESULT_NEED_RESET;
9476}
9477
9478/**
9479 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9480 * @ioa_cfg:    ioa cfg struct
9481 *
9482 * Description: This is the second phase of adapter initialization
9483 * This function takes care of initilizing the adapter to the point
9484 * where it can accept new commands.
9485
9486 * Return value:
9487 *      0 on success / -EIO on failure
9488 **/
9489static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9490{
9491        int rc = 0;
9492        unsigned long host_lock_flags = 0;
9493
9494        ENTER;
9495        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9496        dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9497        ioa_cfg->probe_done = 1;
9498        if (ioa_cfg->needs_hard_reset) {
9499                ioa_cfg->needs_hard_reset = 0;
9500                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9501        } else
9502                _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9503                                        IPR_SHUTDOWN_NONE);
9504        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9505
9506        LEAVE;
9507        return rc;
9508}
9509
9510/**
9511 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9512 * @ioa_cfg:    ioa config struct
9513 *
9514 * Return value:
9515 *      none
9516 **/
9517static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9518{
9519        int i;
9520
9521        if (ioa_cfg->ipr_cmnd_list) {
9522                for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9523                        if (ioa_cfg->ipr_cmnd_list[i])
9524                                dma_pool_free(ioa_cfg->ipr_cmd_pool,
9525                                              ioa_cfg->ipr_cmnd_list[i],
9526                                              ioa_cfg->ipr_cmnd_list_dma[i]);
9527
9528                        ioa_cfg->ipr_cmnd_list[i] = NULL;
9529                }
9530        }
9531
9532        if (ioa_cfg->ipr_cmd_pool)
9533                dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9534
9535        kfree(ioa_cfg->ipr_cmnd_list);
9536        kfree(ioa_cfg->ipr_cmnd_list_dma);
9537        ioa_cfg->ipr_cmnd_list = NULL;
9538        ioa_cfg->ipr_cmnd_list_dma = NULL;
9539        ioa_cfg->ipr_cmd_pool = NULL;
9540}
9541
9542/**
9543 * ipr_free_mem - Frees memory allocated for an adapter
9544 * @ioa_cfg:    ioa cfg struct
9545 *
9546 * Return value:
9547 *      nothing
9548 **/
9549static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9550{
9551        int i;
9552
9553        kfree(ioa_cfg->res_entries);
9554        dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9555                          ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9556        ipr_free_cmd_blks(ioa_cfg);
9557
9558        for (i = 0; i < ioa_cfg->hrrq_num; i++)
9559                dma_free_coherent(&ioa_cfg->pdev->dev,
9560                                  sizeof(u32) * ioa_cfg->hrrq[i].size,
9561                                  ioa_cfg->hrrq[i].host_rrq,
9562                                  ioa_cfg->hrrq[i].host_rrq_dma);
9563
9564        dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9565                          ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9566
9567        for (i = 0; i < IPR_MAX_HCAMS; i++) {
9568                dma_free_coherent(&ioa_cfg->pdev->dev,
9569                                  sizeof(struct ipr_hostrcb),
9570                                  ioa_cfg->hostrcb[i],
9571                                  ioa_cfg->hostrcb_dma[i]);
9572        }
9573
9574        ipr_free_dump(ioa_cfg);
9575        kfree(ioa_cfg->trace);
9576}
9577
9578/**
9579 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9580 * @ioa_cfg:    ipr cfg struct
9581 *
9582 * This function frees all allocated IRQs for the
9583 * specified adapter.
9584 *
9585 * Return value:
9586 *      none
9587 **/
9588static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9589{
9590        struct pci_dev *pdev = ioa_cfg->pdev;
9591        int i;
9592
9593        for (i = 0; i < ioa_cfg->nvectors; i++)
9594                free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9595        pci_free_irq_vectors(pdev);
9596}
9597
9598/**
9599 * ipr_free_all_resources - Free all allocated resources for an adapter.
9600 * @ipr_cmd:    ipr command struct
9601 *
9602 * This function frees all allocated resources for the
9603 * specified adapter.
9604 *
9605 * Return value:
9606 *      none
9607 **/
9608static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9609{
9610        struct pci_dev *pdev = ioa_cfg->pdev;
9611
9612        ENTER;
9613        ipr_free_irqs(ioa_cfg);
9614        if (ioa_cfg->reset_work_q)
9615                destroy_workqueue(ioa_cfg->reset_work_q);
9616        iounmap(ioa_cfg->hdw_dma_regs);
9617        pci_release_regions(pdev);
9618        ipr_free_mem(ioa_cfg);
9619        scsi_host_put(ioa_cfg->host);
9620        pci_disable_device(pdev);
9621        LEAVE;
9622}
9623
9624/**
9625 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9626 * @ioa_cfg:    ioa config struct
9627 *
9628 * Return value:
9629 *      0 on success / -ENOMEM on allocation failure
9630 **/
9631static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9632{
9633        struct ipr_cmnd *ipr_cmd;
9634        struct ipr_ioarcb *ioarcb;
9635        dma_addr_t dma_addr;
9636        int i, entries_each_hrrq, hrrq_id = 0;
9637
9638        ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9639                                                sizeof(struct ipr_cmnd), 512, 0);
9640
9641        if (!ioa_cfg->ipr_cmd_pool)
9642                return -ENOMEM;
9643
9644        ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9645        ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9646
9647        if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9648                ipr_free_cmd_blks(ioa_cfg);
9649                return -ENOMEM;
9650        }
9651
9652        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9653                if (ioa_cfg->hrrq_num > 1) {
9654                        if (i == 0) {
9655                                entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9656                                ioa_cfg->hrrq[i].min_cmd_id = 0;
9657                                ioa_cfg->hrrq[i].max_cmd_id =
9658                                        (entries_each_hrrq - 1);
9659                        } else {
9660                                entries_each_hrrq =
9661                                        IPR_NUM_BASE_CMD_BLKS/
9662                                        (ioa_cfg->hrrq_num - 1);
9663                                ioa_cfg->hrrq[i].min_cmd_id =
9664                                        IPR_NUM_INTERNAL_CMD_BLKS +
9665                                        (i - 1) * entries_each_hrrq;
9666                                ioa_cfg->hrrq[i].max_cmd_id =
9667                                        (IPR_NUM_INTERNAL_CMD_BLKS +
9668                                        i * entries_each_hrrq - 1);
9669                        }
9670                } else {
9671                        entries_each_hrrq = IPR_NUM_CMD_BLKS;
9672                        ioa_cfg->hrrq[i].min_cmd_id = 0;
9673                        ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9674                }
9675                ioa_cfg->hrrq[i].size = entries_each_hrrq;
9676        }
9677
9678        BUG_ON(ioa_cfg->hrrq_num == 0);
9679
9680        i = IPR_NUM_CMD_BLKS -
9681                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9682        if (i > 0) {
9683                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9684                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9685        }
9686
9687        for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9688                ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9689                                GFP_KERNEL, &dma_addr);
9690
9691                if (!ipr_cmd) {
9692                        ipr_free_cmd_blks(ioa_cfg);
9693                        return -ENOMEM;
9694                }
9695
9696                ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9697                ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9698
9699                ioarcb = &ipr_cmd->ioarcb;
9700                ipr_cmd->dma_addr = dma_addr;
9701                if (ioa_cfg->sis64)
9702                        ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9703                else
9704                        ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9705
9706                ioarcb->host_response_handle = cpu_to_be32(i << 2);
9707                if (ioa_cfg->sis64) {
9708                        ioarcb->u.sis64_addr_data.data_ioadl_addr =
9709                                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9710                        ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9711                                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9712                } else {
9713                        ioarcb->write_ioadl_addr =
9714                                cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9715                        ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9716                        ioarcb->ioasa_host_pci_addr =
9717                                cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9718                }
9719                ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9720                ipr_cmd->cmd_index = i;
9721                ipr_cmd->ioa_cfg = ioa_cfg;
9722                ipr_cmd->sense_buffer_dma = dma_addr +
9723                        offsetof(struct ipr_cmnd, sense_buffer);
9724
9725                ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9726                ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9727                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9728                if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9729                        hrrq_id++;
9730        }
9731
9732        return 0;
9733}
9734
9735/**
9736 * ipr_alloc_mem - Allocate memory for an adapter
9737 * @ioa_cfg:    ioa config struct
9738 *
9739 * Return value:
9740 *      0 on success / non-zero for error
9741 **/
9742static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9743{
9744        struct pci_dev *pdev = ioa_cfg->pdev;
9745        int i, rc = -ENOMEM;
9746
9747        ENTER;
9748        ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9749                                       sizeof(struct ipr_resource_entry),
9750                                       GFP_KERNEL);
9751
9752        if (!ioa_cfg->res_entries)
9753                goto out;
9754
9755        for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9756                list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9757                ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9758        }
9759
9760        ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9761                                              sizeof(struct ipr_misc_cbs),
9762                                              &ioa_cfg->vpd_cbs_dma,
9763                                              GFP_KERNEL);
9764
9765        if (!ioa_cfg->vpd_cbs)
9766                goto out_free_res_entries;
9767
9768        if (ipr_alloc_cmd_blks(ioa_cfg))
9769                goto out_free_vpd_cbs;
9770
9771        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9772                ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9773                                        sizeof(u32) * ioa_cfg->hrrq[i].size,
9774                                        &ioa_cfg->hrrq[i].host_rrq_dma,
9775                                        GFP_KERNEL);
9776
9777                if (!ioa_cfg->hrrq[i].host_rrq)  {
9778                        while (--i > 0)
9779                                dma_free_coherent(&pdev->dev,
9780                                        sizeof(u32) * ioa_cfg->hrrq[i].size,
9781                                        ioa_cfg->hrrq[i].host_rrq,
9782                                        ioa_cfg->hrrq[i].host_rrq_dma);
9783                        goto out_ipr_free_cmd_blocks;
9784                }
9785                ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9786        }
9787
9788        ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9789                                                  ioa_cfg->cfg_table_size,
9790                                                  &ioa_cfg->cfg_table_dma,
9791                                                  GFP_KERNEL);
9792
9793        if (!ioa_cfg->u.cfg_table)
9794                goto out_free_host_rrq;
9795
9796        for (i = 0; i < IPR_MAX_HCAMS; i++) {
9797                ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9798                                                         sizeof(struct ipr_hostrcb),
9799                                                         &ioa_cfg->hostrcb_dma[i],
9800                                                         GFP_KERNEL);
9801
9802                if (!ioa_cfg->hostrcb[i])
9803                        goto out_free_hostrcb_dma;
9804
9805                ioa_cfg->hostrcb[i]->hostrcb_dma =
9806                        ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9807                ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9808                list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9809        }
9810
9811        ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9812                                 sizeof(struct ipr_trace_entry),
9813                                 GFP_KERNEL);
9814
9815        if (!ioa_cfg->trace)
9816                goto out_free_hostrcb_dma;
9817
9818        rc = 0;
9819out:
9820        LEAVE;
9821        return rc;
9822
9823out_free_hostrcb_dma:
9824        while (i-- > 0) {
9825                dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9826                                  ioa_cfg->hostrcb[i],
9827                                  ioa_cfg->hostrcb_dma[i]);
9828        }
9829        dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9830                          ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9831out_free_host_rrq:
9832        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9833                dma_free_coherent(&pdev->dev,
9834                                  sizeof(u32) * ioa_cfg->hrrq[i].size,
9835                                  ioa_cfg->hrrq[i].host_rrq,
9836                                  ioa_cfg->hrrq[i].host_rrq_dma);
9837        }
9838out_ipr_free_cmd_blocks:
9839        ipr_free_cmd_blks(ioa_cfg);
9840out_free_vpd_cbs:
9841        dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9842                          ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9843out_free_res_entries:
9844        kfree(ioa_cfg->res_entries);
9845        goto out;
9846}
9847
9848/**
9849 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9850 * @ioa_cfg:    ioa config struct
9851 *
9852 * Return value:
9853 *      none
9854 **/
9855static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9856{
9857        int i;
9858
9859        for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9860                ioa_cfg->bus_attr[i].bus = i;
9861                ioa_cfg->bus_attr[i].qas_enabled = 0;
9862                ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9863                if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9864                        ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9865                else
9866                        ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9867        }
9868}
9869
9870/**
9871 * ipr_init_regs - Initialize IOA registers
9872 * @ioa_cfg:    ioa config struct
9873 *
9874 * Return value:
9875 *      none
9876 **/
9877static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9878{
9879        const struct ipr_interrupt_offsets *p;
9880        struct ipr_interrupts *t;
9881        void __iomem *base;
9882
9883        p = &ioa_cfg->chip_cfg->regs;
9884        t = &ioa_cfg->regs;
9885        base = ioa_cfg->hdw_dma_regs;
9886
9887        t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9888        t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9889        t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9890        t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9891        t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9892        t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9893        t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9894        t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9895        t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9896        t->ioarrin_reg = base + p->ioarrin_reg;
9897        t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9898        t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9899        t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9900        t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9901        t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9902        t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9903
9904        if (ioa_cfg->sis64) {
9905                t->init_feedback_reg = base + p->init_feedback_reg;
9906                t->dump_addr_reg = base + p->dump_addr_reg;
9907                t->dump_data_reg = base + p->dump_data_reg;
9908                t->endian_swap_reg = base + p->endian_swap_reg;
9909        }
9910}
9911
9912/**
9913 * ipr_init_ioa_cfg - Initialize IOA config struct
9914 * @ioa_cfg:    ioa config struct
9915 * @host:               scsi host struct
9916 * @pdev:               PCI dev struct
9917 *
9918 * Return value:
9919 *      none
9920 **/
9921static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9922                             struct Scsi_Host *host, struct pci_dev *pdev)
9923{
9924        int i;
9925
9926        ioa_cfg->host = host;
9927        ioa_cfg->pdev = pdev;
9928        ioa_cfg->log_level = ipr_log_level;
9929        ioa_cfg->doorbell = IPR_DOORBELL;
9930        sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9931        sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9932        sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9933        sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9934        sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9935        sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9936
9937        INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9938        INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9939        INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9940        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9941        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9942        INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9943        INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9944        init_waitqueue_head(&ioa_cfg->reset_wait_q);
9945        init_waitqueue_head(&ioa_cfg->msi_wait_q);
9946        init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9947        ioa_cfg->sdt_state = INACTIVE;
9948
9949        ipr_initialize_bus_attr(ioa_cfg);
9950        ioa_cfg->max_devs_supported = ipr_max_devs;
9951
9952        if (ioa_cfg->sis64) {
9953                host->max_channel = IPR_MAX_SIS64_BUSES;
9954                host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9955                host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9956                if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9957                        ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9958                ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9959                                           + ((sizeof(struct ipr_config_table_entry64)
9960                                               * ioa_cfg->max_devs_supported)));
9961        } else {
9962                host->max_channel = IPR_VSET_BUS;
9963                host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9964                host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9965                if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9966                        ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9967                ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9968                                           + ((sizeof(struct ipr_config_table_entry)
9969                                               * ioa_cfg->max_devs_supported)));
9970        }
9971
9972        host->unique_id = host->host_no;
9973        host->max_cmd_len = IPR_MAX_CDB_LEN;
9974        host->can_queue = ioa_cfg->max_cmds;
9975        pci_set_drvdata(pdev, ioa_cfg);
9976
9977        for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9978                INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9979                INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9980                spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9981                if (i == 0)
9982                        ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9983                else
9984                        ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9985        }
9986}
9987
9988/**
9989 * ipr_get_chip_info - Find adapter chip information
9990 * @dev_id:             PCI device id struct
9991 *
9992 * Return value:
9993 *      ptr to chip information on success / NULL on failure
9994 **/
9995static const struct ipr_chip_t *
9996ipr_get_chip_info(const struct pci_device_id *dev_id)
9997{
9998        int i;
9999
10000        for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10001                if (ipr_chip[i].vendor == dev_id->vendor &&
10002                    ipr_chip[i].device == dev_id->device)
10003                        return &ipr_chip[i];
10004        return NULL;
10005}
10006
10007/**
10008 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10009 *                                              during probe time
10010 * @ioa_cfg:    ioa config struct
10011 *
10012 * Return value:
10013 *      None
10014 **/
10015static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10016{
10017        struct pci_dev *pdev = ioa_cfg->pdev;
10018
10019        if (pci_channel_offline(pdev)) {
10020                wait_event_timeout(ioa_cfg->eeh_wait_q,
10021                                   !pci_channel_offline(pdev),
10022                                   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10023                pci_restore_state(pdev);
10024        }
10025}
10026
10027static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10028{
10029        int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10030
10031        for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10032                snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10033                         "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10034                ioa_cfg->vectors_info[vec_idx].
10035                        desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10036        }
10037}
10038
10039static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10040                struct pci_dev *pdev)
10041{
10042        int i, rc;
10043
10044        for (i = 1; i < ioa_cfg->nvectors; i++) {
10045                rc = request_irq(pci_irq_vector(pdev, i),
10046                        ipr_isr_mhrrq,
10047                        0,
10048                        ioa_cfg->vectors_info[i].desc,
10049                        &ioa_cfg->hrrq[i]);
10050                if (rc) {
10051                        while (--i >= 0)
10052                                free_irq(pci_irq_vector(pdev, i),
10053                                        &ioa_cfg->hrrq[i]);
10054                        return rc;
10055                }
10056        }
10057        return 0;
10058}
10059
10060/**
10061 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10062 * @pdev:               PCI device struct
10063 *
10064 * Description: Simply set the msi_received flag to 1 indicating that
10065 * Message Signaled Interrupts are supported.
10066 *
10067 * Return value:
10068 *      0 on success / non-zero on failure
10069 **/
10070static irqreturn_t ipr_test_intr(int irq, void *devp)
10071{
10072        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10073        unsigned long lock_flags = 0;
10074        irqreturn_t rc = IRQ_HANDLED;
10075
10076        dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10077        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10078
10079        ioa_cfg->msi_received = 1;
10080        wake_up(&ioa_cfg->msi_wait_q);
10081
10082        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10083        return rc;
10084}
10085
10086/**
10087 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10088 * @pdev:               PCI device struct
10089 *
10090 * Description: This routine sets up and initiates a test interrupt to determine
10091 * if the interrupt is received via the ipr_test_intr() service routine.
10092 * If the tests fails, the driver will fall back to LSI.
10093 *
10094 * Return value:
10095 *      0 on success / non-zero on failure
10096 **/
10097static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10098{
10099        int rc;
10100        volatile u32 int_reg;
10101        unsigned long lock_flags = 0;
10102        int irq = pci_irq_vector(pdev, 0);
10103
10104        ENTER;
10105
10106        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10107        init_waitqueue_head(&ioa_cfg->msi_wait_q);
10108        ioa_cfg->msi_received = 0;
10109        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10110        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10111        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10112        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10113
10114        rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10115        if (rc) {
10116                dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10117                return rc;
10118        } else if (ipr_debug)
10119                dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10120
10121        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10122        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10123        wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10124        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10125        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10126
10127        if (!ioa_cfg->msi_received) {
10128                /* MSI test failed */
10129                dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10130                rc = -EOPNOTSUPP;
10131        } else if (ipr_debug)
10132                dev_info(&pdev->dev, "MSI test succeeded.\n");
10133
10134        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10135
10136        free_irq(irq, ioa_cfg);
10137
10138        LEAVE;
10139
10140        return rc;
10141}
10142
10143 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10144 * @pdev:               PCI device struct
10145 * @dev_id:             PCI device id struct
10146 *
10147 * Return value:
10148 *      0 on success / non-zero on failure
10149 **/
10150static int ipr_probe_ioa(struct pci_dev *pdev,
10151                         const struct pci_device_id *dev_id)
10152{
10153        struct ipr_ioa_cfg *ioa_cfg;
10154        struct Scsi_Host *host;
10155        unsigned long ipr_regs_pci;
10156        void __iomem *ipr_regs;
10157        int rc = PCIBIOS_SUCCESSFUL;
10158        volatile u32 mask, uproc, interrupts;
10159        unsigned long lock_flags, driver_lock_flags;
10160        unsigned int irq_flag;
10161
10162        ENTER;
10163
10164        dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10165        host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10166
10167        if (!host) {
10168                dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10169                rc = -ENOMEM;
10170                goto out;
10171        }
10172
10173        ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10174        memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10175        ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10176
10177        ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10178
10179        if (!ioa_cfg->ipr_chip) {
10180                dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10181                        dev_id->vendor, dev_id->device);
10182                goto out_scsi_host_put;
10183        }
10184
10185        /* set SIS 32 or SIS 64 */
10186        ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10187        ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10188        ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10189        ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10190
10191        if (ipr_transop_timeout)
10192                ioa_cfg->transop_timeout = ipr_transop_timeout;
10193        else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10194                ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10195        else
10196                ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10197
10198        ioa_cfg->revid = pdev->revision;
10199
10200        ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10201
10202        ipr_regs_pci = pci_resource_start(pdev, 0);
10203
10204        rc = pci_request_regions(pdev, IPR_NAME);
10205        if (rc < 0) {
10206                dev_err(&pdev->dev,
10207                        "Couldn't register memory range of registers\n");
10208                goto out_scsi_host_put;
10209        }
10210
10211        rc = pci_enable_device(pdev);
10212
10213        if (rc || pci_channel_offline(pdev)) {
10214                if (pci_channel_offline(pdev)) {
10215                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10216                        rc = pci_enable_device(pdev);
10217                }
10218
10219                if (rc) {
10220                        dev_err(&pdev->dev, "Cannot enable adapter\n");
10221                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10222                        goto out_release_regions;
10223                }
10224        }
10225
10226        ipr_regs = pci_ioremap_bar(pdev, 0);
10227
10228        if (!ipr_regs) {
10229                dev_err(&pdev->dev,
10230                        "Couldn't map memory range of registers\n");
10231                rc = -ENOMEM;
10232                goto out_disable;
10233        }
10234
10235        ioa_cfg->hdw_dma_regs = ipr_regs;
10236        ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10237        ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10238
10239        ipr_init_regs(ioa_cfg);
10240
10241        if (ioa_cfg->sis64) {
10242                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10243                if (rc < 0) {
10244                        dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10245                        rc = dma_set_mask_and_coherent(&pdev->dev,
10246                                                       DMA_BIT_MASK(32));
10247                }
10248        } else
10249                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10250
10251        if (rc < 0) {
10252                dev_err(&pdev->dev, "Failed to set DMA mask\n");
10253                goto cleanup_nomem;
10254        }
10255
10256        rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10257                                   ioa_cfg->chip_cfg->cache_line_size);
10258
10259        if (rc != PCIBIOS_SUCCESSFUL) {
10260                dev_err(&pdev->dev, "Write of cache line size failed\n");
10261                ipr_wait_for_pci_err_recovery(ioa_cfg);
10262                rc = -EIO;
10263                goto cleanup_nomem;
10264        }
10265
10266        /* Issue MMIO read to ensure card is not in EEH */
10267        interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10268        ipr_wait_for_pci_err_recovery(ioa_cfg);
10269
10270        if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10271                dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10272                        IPR_MAX_MSIX_VECTORS);
10273                ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10274        }
10275
10276        irq_flag = PCI_IRQ_LEGACY;
10277        if (ioa_cfg->ipr_chip->has_msi)
10278                irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10279        rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10280        if (rc < 0) {
10281                ipr_wait_for_pci_err_recovery(ioa_cfg);
10282                goto cleanup_nomem;
10283        }
10284        ioa_cfg->nvectors = rc;
10285
10286        if (!pdev->msi_enabled && !pdev->msix_enabled)
10287                ioa_cfg->clear_isr = 1;
10288
10289        pci_set_master(pdev);
10290
10291        if (pci_channel_offline(pdev)) {
10292                ipr_wait_for_pci_err_recovery(ioa_cfg);
10293                pci_set_master(pdev);
10294                if (pci_channel_offline(pdev)) {
10295                        rc = -EIO;
10296                        goto out_msi_disable;
10297                }
10298        }
10299
10300        if (pdev->msi_enabled || pdev->msix_enabled) {
10301                rc = ipr_test_msi(ioa_cfg, pdev);
10302                switch (rc) {
10303                case 0:
10304                        dev_info(&pdev->dev,
10305                                "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10306                                pdev->msix_enabled ? "-X" : "");
10307                        break;
10308                case -EOPNOTSUPP:
10309                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10310                        pci_free_irq_vectors(pdev);
10311
10312                        ioa_cfg->nvectors = 1;
10313                        ioa_cfg->clear_isr = 1;
10314                        break;
10315                default:
10316                        goto out_msi_disable;
10317                }
10318        }
10319
10320        ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10321                                (unsigned int)num_online_cpus(),
10322                                (unsigned int)IPR_MAX_HRRQ_NUM);
10323
10324        if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10325                goto out_msi_disable;
10326
10327        if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10328                goto out_msi_disable;
10329
10330        rc = ipr_alloc_mem(ioa_cfg);
10331        if (rc < 0) {
10332                dev_err(&pdev->dev,
10333                        "Couldn't allocate enough memory for device driver!\n");
10334                goto out_msi_disable;
10335        }
10336
10337        /* Save away PCI config space for use following IOA reset */
10338        rc = pci_save_state(pdev);
10339
10340        if (rc != PCIBIOS_SUCCESSFUL) {
10341                dev_err(&pdev->dev, "Failed to save PCI config space\n");
10342                rc = -EIO;
10343                goto cleanup_nolog;
10344        }
10345
10346        /*
10347         * If HRRQ updated interrupt is not masked, or reset alert is set,
10348         * the card is in an unknown state and needs a hard reset
10349         */
10350        mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10351        interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10352        uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10353        if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10354                ioa_cfg->needs_hard_reset = 1;
10355        if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10356                ioa_cfg->needs_hard_reset = 1;
10357        if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10358                ioa_cfg->ioa_unit_checked = 1;
10359
10360        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10361        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10362        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10363
10364        if (pdev->msi_enabled || pdev->msix_enabled) {
10365                name_msi_vectors(ioa_cfg);
10366                rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10367                        ioa_cfg->vectors_info[0].desc,
10368                        &ioa_cfg->hrrq[0]);
10369                if (!rc)
10370                        rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10371        } else {
10372                rc = request_irq(pdev->irq, ipr_isr,
10373                         IRQF_SHARED,
10374                         IPR_NAME, &ioa_cfg->hrrq[0]);
10375        }
10376        if (rc) {
10377                dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10378                        pdev->irq, rc);
10379                goto cleanup_nolog;
10380        }
10381
10382        if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10383            (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10384                ioa_cfg->needs_warm_reset = 1;
10385                ioa_cfg->reset = ipr_reset_slot_reset;
10386
10387                ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10388                                                                WQ_MEM_RECLAIM, host->host_no);
10389
10390                if (!ioa_cfg->reset_work_q) {
10391                        dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10392                        rc = -ENOMEM;
10393                        goto out_free_irq;
10394                }
10395        } else
10396                ioa_cfg->reset = ipr_reset_start_bist;
10397
10398        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10399        list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10400        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10401
10402        LEAVE;
10403out:
10404        return rc;
10405
10406out_free_irq:
10407        ipr_free_irqs(ioa_cfg);
10408cleanup_nolog:
10409        ipr_free_mem(ioa_cfg);
10410out_msi_disable:
10411        ipr_wait_for_pci_err_recovery(ioa_cfg);
10412        pci_free_irq_vectors(pdev);
10413cleanup_nomem:
10414        iounmap(ipr_regs);
10415out_disable:
10416        pci_disable_device(pdev);
10417out_release_regions:
10418        pci_release_regions(pdev);
10419out_scsi_host_put:
10420        scsi_host_put(host);
10421        goto out;
10422}
10423
10424/**
10425 * ipr_initiate_ioa_bringdown - Bring down an adapter
10426 * @ioa_cfg:            ioa config struct
10427 * @shutdown_type:      shutdown type
10428 *
10429 * Description: This function will initiate bringing down the adapter.
10430 * This consists of issuing an IOA shutdown to the adapter
10431 * to flush the cache, and running BIST.
10432 * If the caller needs to wait on the completion of the reset,
10433 * the caller must sleep on the reset_wait_q.
10434 *
10435 * Return value:
10436 *      none
10437 **/
10438static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10439                                       enum ipr_shutdown_type shutdown_type)
10440{
10441        ENTER;
10442        if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10443                ioa_cfg->sdt_state = ABORT_DUMP;
10444        ioa_cfg->reset_retries = 0;
10445        ioa_cfg->in_ioa_bringdown = 1;
10446        ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10447        LEAVE;
10448}
10449
10450/**
10451 * __ipr_remove - Remove a single adapter
10452 * @pdev:       pci device struct
10453 *
10454 * Adapter hot plug remove entry point.
10455 *
10456 * Return value:
10457 *      none
10458 **/
10459static void __ipr_remove(struct pci_dev *pdev)
10460{
10461        unsigned long host_lock_flags = 0;
10462        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10463        int i;
10464        unsigned long driver_lock_flags;
10465        ENTER;
10466
10467        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10468        while (ioa_cfg->in_reset_reload) {
10469                spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10470                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10471                spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10472        }
10473
10474        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10475                spin_lock(&ioa_cfg->hrrq[i]._lock);
10476                ioa_cfg->hrrq[i].removing_ioa = 1;
10477                spin_unlock(&ioa_cfg->hrrq[i]._lock);
10478        }
10479        wmb();
10480        ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10481
10482        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10483        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10484        flush_work(&ioa_cfg->work_q);
10485        if (ioa_cfg->reset_work_q)
10486                flush_workqueue(ioa_cfg->reset_work_q);
10487        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10488        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10489
10490        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10491        list_del(&ioa_cfg->queue);
10492        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10493
10494        if (ioa_cfg->sdt_state == ABORT_DUMP)
10495                ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10496        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10497
10498        ipr_free_all_resources(ioa_cfg);
10499
10500        LEAVE;
10501}
10502
10503/**
10504 * ipr_remove - IOA hot plug remove entry point
10505 * @pdev:       pci device struct
10506 *
10507 * Adapter hot plug remove entry point.
10508 *
10509 * Return value:
10510 *      none
10511 **/
10512static void ipr_remove(struct pci_dev *pdev)
10513{
10514        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10515
10516        ENTER;
10517
10518        ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10519                              &ipr_trace_attr);
10520        ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10521                             &ipr_dump_attr);
10522        sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10523                        &ipr_ioa_async_err_log);
10524        scsi_remove_host(ioa_cfg->host);
10525
10526        __ipr_remove(pdev);
10527
10528        LEAVE;
10529}
10530
10531/**
10532 * ipr_probe - Adapter hot plug add entry point
10533 *
10534 * Return value:
10535 *      0 on success / non-zero on failure
10536 **/
10537static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10538{
10539        struct ipr_ioa_cfg *ioa_cfg;
10540        unsigned long flags;
10541        int rc, i;
10542
10543        rc = ipr_probe_ioa(pdev, dev_id);
10544
10545        if (rc)
10546                return rc;
10547
10548        ioa_cfg = pci_get_drvdata(pdev);
10549        rc = ipr_probe_ioa_part2(ioa_cfg);
10550
10551        if (rc) {
10552                __ipr_remove(pdev);
10553                return rc;
10554        }
10555
10556        rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10557
10558        if (rc) {
10559                __ipr_remove(pdev);
10560                return rc;
10561        }
10562
10563        rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10564                                   &ipr_trace_attr);
10565
10566        if (rc) {
10567                scsi_remove_host(ioa_cfg->host);
10568                __ipr_remove(pdev);
10569                return rc;
10570        }
10571
10572        rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10573                        &ipr_ioa_async_err_log);
10574
10575        if (rc) {
10576                ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10577                                &ipr_dump_attr);
10578                ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10579                                &ipr_trace_attr);
10580                scsi_remove_host(ioa_cfg->host);
10581                __ipr_remove(pdev);
10582                return rc;
10583        }
10584
10585        rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10586                                   &ipr_dump_attr);
10587
10588        if (rc) {
10589                sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10590                                      &ipr_ioa_async_err_log);
10591                ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10592                                      &ipr_trace_attr);
10593                scsi_remove_host(ioa_cfg->host);
10594                __ipr_remove(pdev);
10595                return rc;
10596        }
10597        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10598        ioa_cfg->scan_enabled = 1;
10599        schedule_work(&ioa_cfg->work_q);
10600        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10601
10602        ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10603
10604        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10605                for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10606                        irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10607                                        ioa_cfg->iopoll_weight, ipr_iopoll);
10608                }
10609        }
10610
10611        scsi_scan_host(ioa_cfg->host);
10612
10613        return 0;
10614}
10615
10616/**
10617 * ipr_shutdown - Shutdown handler.
10618 * @pdev:       pci device struct
10619 *
10620 * This function is invoked upon system shutdown/reboot. It will issue
10621 * an adapter shutdown to the adapter to flush the write cache.
10622 *
10623 * Return value:
10624 *      none
10625 **/
10626static void ipr_shutdown(struct pci_dev *pdev)
10627{
10628        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10629        unsigned long lock_flags = 0;
10630        enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10631        int i;
10632
10633        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10634        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10635                ioa_cfg->iopoll_weight = 0;
10636                for (i = 1; i < ioa_cfg->hrrq_num; i++)
10637                        irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10638        }
10639
10640        while (ioa_cfg->in_reset_reload) {
10641                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10642                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10643                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10644        }
10645
10646        if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10647                shutdown_type = IPR_SHUTDOWN_QUIESCE;
10648
10649        ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10650        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10651        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10652        if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10653                ipr_free_irqs(ioa_cfg);
10654                pci_disable_device(ioa_cfg->pdev);
10655        }
10656}
10657
10658static struct pci_device_id ipr_pci_table[] = {
10659        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10660                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10661        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10662                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10663        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10664                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10665        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10666                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10667        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10668                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10669        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10670                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10671        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10672                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10673        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10674                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10675                IPR_USE_LONG_TRANSOP_TIMEOUT },
10676        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10677              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10678        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10679              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10680              IPR_USE_LONG_TRANSOP_TIMEOUT },
10681        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10682              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10683              IPR_USE_LONG_TRANSOP_TIMEOUT },
10684        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10685              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10686        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10687              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10688              IPR_USE_LONG_TRANSOP_TIMEOUT},
10689        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10690              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10691              IPR_USE_LONG_TRANSOP_TIMEOUT },
10692        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10693              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10694              IPR_USE_LONG_TRANSOP_TIMEOUT },
10695        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10696              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10697        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10698              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10699        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10700              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10701              IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10702        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10703                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10704        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10705                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10706        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10707                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10708                IPR_USE_LONG_TRANSOP_TIMEOUT },
10709        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10710                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10711                IPR_USE_LONG_TRANSOP_TIMEOUT },
10712        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10713                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10714        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10715                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10716        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10717                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10718        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10719                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10720        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10721                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10722        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10723                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10724        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10726        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10728        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10730        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10731                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10732        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10733                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10734        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10735                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10736        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10737                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10738        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10739                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10740        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10741                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10742        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10743                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10744        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10745                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10746        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10747                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10748        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10749                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10750        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10751                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10752        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10753                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10754        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10755                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10756        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10757                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10758        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10759                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10760        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10761                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10762        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10763                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10764        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10765                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10766        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10767                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10768        { }
10769};
10770MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10771
10772static const struct pci_error_handlers ipr_err_handler = {
10773        .error_detected = ipr_pci_error_detected,
10774        .mmio_enabled = ipr_pci_mmio_enabled,
10775        .slot_reset = ipr_pci_slot_reset,
10776};
10777
10778static struct pci_driver ipr_driver = {
10779        .name = IPR_NAME,
10780        .id_table = ipr_pci_table,
10781        .probe = ipr_probe,
10782        .remove = ipr_remove,
10783        .shutdown = ipr_shutdown,
10784        .err_handler = &ipr_err_handler,
10785};
10786
10787/**
10788 * ipr_halt_done - Shutdown prepare completion
10789 *
10790 * Return value:
10791 *      none
10792 **/
10793static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10794{
10795        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10796}
10797
10798/**
10799 * ipr_halt - Issue shutdown prepare to all adapters
10800 *
10801 * Return value:
10802 *      NOTIFY_OK on success / NOTIFY_DONE on failure
10803 **/
10804static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10805{
10806        struct ipr_cmnd *ipr_cmd;
10807        struct ipr_ioa_cfg *ioa_cfg;
10808        unsigned long flags = 0, driver_lock_flags;
10809
10810        if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10811                return NOTIFY_DONE;
10812
10813        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10814
10815        list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10816                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10817                if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10818                    (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10819                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10820                        continue;
10821                }
10822
10823                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10824                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10825                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10826                ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10827                ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10828
10829                ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10830                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10831        }
10832        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10833
10834        return NOTIFY_OK;
10835}
10836
10837static struct notifier_block ipr_notifier = {
10838        ipr_halt, NULL, 0
10839};
10840
10841/**
10842 * ipr_init - Module entry point
10843 *
10844 * Return value:
10845 *      0 on success / negative value on failure
10846 **/
10847static int __init ipr_init(void)
10848{
10849        ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10850                 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10851
10852        register_reboot_notifier(&ipr_notifier);
10853        return pci_register_driver(&ipr_driver);
10854}
10855
10856/**
10857 * ipr_exit - Module unload
10858 *
10859 * Module unload entry point.
10860 *
10861 * Return value:
10862 *      none
10863 **/
10864static void __exit ipr_exit(void)
10865{
10866        unregister_reboot_notifier(&ipr_notifier);
10867        pci_unregister_driver(&ipr_driver);
10868}
10869
10870module_init(ipr_init);
10871module_exit(ipr_exit);
10872