linux/drivers/scsi/ipr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * ipr.c -- driver for IBM Power Linux RAID adapters
   4 *
   5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
   6 *
   7 * Copyright (C) 2003, 2004 IBM Corporation
   8 */
   9
  10/*
  11 * Notes:
  12 *
  13 * This driver is used to control the following SCSI adapters:
  14 *
  15 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
  16 *
  17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
  18 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
  19 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
  20 *              Embedded SCSI adapter on p615 and p655 systems
  21 *
  22 * Supported Hardware Features:
  23 *      - Ultra 320 SCSI controller
  24 *      - PCI-X host interface
  25 *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
  26 *      - Non-Volatile Write Cache
  27 *      - Supports attachment of non-RAID disks, tape, and optical devices
  28 *      - RAID Levels 0, 5, 10
  29 *      - Hot spare
  30 *      - Background Parity Checking
  31 *      - Background Data Scrubbing
  32 *      - Ability to increase the capacity of an existing RAID 5 disk array
  33 *              by adding disks
  34 *
  35 * Driver Features:
  36 *      - Tagged command queuing
  37 *      - Adapter microcode download
  38 *      - PCI hot plug
  39 *      - SCSI device hot plug
  40 *
  41 */
  42
  43#include <linux/fs.h>
  44#include <linux/init.h>
  45#include <linux/types.h>
  46#include <linux/errno.h>
  47#include <linux/kernel.h>
  48#include <linux/slab.h>
  49#include <linux/vmalloc.h>
  50#include <linux/ioport.h>
  51#include <linux/delay.h>
  52#include <linux/pci.h>
  53#include <linux/wait.h>
  54#include <linux/spinlock.h>
  55#include <linux/sched.h>
  56#include <linux/interrupt.h>
  57#include <linux/blkdev.h>
  58#include <linux/firmware.h>
  59#include <linux/module.h>
  60#include <linux/moduleparam.h>
  61#include <linux/libata.h>
  62#include <linux/hdreg.h>
  63#include <linux/reboot.h>
  64#include <linux/stringify.h>
  65#include <asm/io.h>
  66#include <asm/irq.h>
  67#include <asm/processor.h>
  68#include <scsi/scsi.h>
  69#include <scsi/scsi_host.h>
  70#include <scsi/scsi_tcq.h>
  71#include <scsi/scsi_eh.h>
  72#include <scsi/scsi_cmnd.h>
  73#include "ipr.h"
  74
  75/*
  76 *   Global Data
  77 */
  78static LIST_HEAD(ipr_ioa_head);
  79static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
  80static unsigned int ipr_max_speed = 1;
  81static int ipr_testmode = 0;
  82static unsigned int ipr_fastfail = 0;
  83static unsigned int ipr_transop_timeout = 0;
  84static unsigned int ipr_debug = 0;
  85static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
  86static unsigned int ipr_dual_ioa_raid = 1;
  87static unsigned int ipr_number_of_msix = 16;
  88static unsigned int ipr_fast_reboot;
  89static DEFINE_SPINLOCK(ipr_driver_lock);
  90
  91/* This table describes the differences between DMA controller chips */
  92static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
  93        { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
  94                .mailbox = 0x0042C,
  95                .max_cmds = 100,
  96                .cache_line_size = 0x20,
  97                .clear_isr = 1,
  98                .iopoll_weight = 0,
  99                {
 100                        .set_interrupt_mask_reg = 0x0022C,
 101                        .clr_interrupt_mask_reg = 0x00230,
 102                        .clr_interrupt_mask_reg32 = 0x00230,
 103                        .sense_interrupt_mask_reg = 0x0022C,
 104                        .sense_interrupt_mask_reg32 = 0x0022C,
 105                        .clr_interrupt_reg = 0x00228,
 106                        .clr_interrupt_reg32 = 0x00228,
 107                        .sense_interrupt_reg = 0x00224,
 108                        .sense_interrupt_reg32 = 0x00224,
 109                        .ioarrin_reg = 0x00404,
 110                        .sense_uproc_interrupt_reg = 0x00214,
 111                        .sense_uproc_interrupt_reg32 = 0x00214,
 112                        .set_uproc_interrupt_reg = 0x00214,
 113                        .set_uproc_interrupt_reg32 = 0x00214,
 114                        .clr_uproc_interrupt_reg = 0x00218,
 115                        .clr_uproc_interrupt_reg32 = 0x00218
 116                }
 117        },
 118        { /* Snipe and Scamp */
 119                .mailbox = 0x0052C,
 120                .max_cmds = 100,
 121                .cache_line_size = 0x20,
 122                .clear_isr = 1,
 123                .iopoll_weight = 0,
 124                {
 125                        .set_interrupt_mask_reg = 0x00288,
 126                        .clr_interrupt_mask_reg = 0x0028C,
 127                        .clr_interrupt_mask_reg32 = 0x0028C,
 128                        .sense_interrupt_mask_reg = 0x00288,
 129                        .sense_interrupt_mask_reg32 = 0x00288,
 130                        .clr_interrupt_reg = 0x00284,
 131                        .clr_interrupt_reg32 = 0x00284,
 132                        .sense_interrupt_reg = 0x00280,
 133                        .sense_interrupt_reg32 = 0x00280,
 134                        .ioarrin_reg = 0x00504,
 135                        .sense_uproc_interrupt_reg = 0x00290,
 136                        .sense_uproc_interrupt_reg32 = 0x00290,
 137                        .set_uproc_interrupt_reg = 0x00290,
 138                        .set_uproc_interrupt_reg32 = 0x00290,
 139                        .clr_uproc_interrupt_reg = 0x00294,
 140                        .clr_uproc_interrupt_reg32 = 0x00294
 141                }
 142        },
 143        { /* CRoC */
 144                .mailbox = 0x00044,
 145                .max_cmds = 1000,
 146                .cache_line_size = 0x20,
 147                .clear_isr = 0,
 148                .iopoll_weight = 64,
 149                {
 150                        .set_interrupt_mask_reg = 0x00010,
 151                        .clr_interrupt_mask_reg = 0x00018,
 152                        .clr_interrupt_mask_reg32 = 0x0001C,
 153                        .sense_interrupt_mask_reg = 0x00010,
 154                        .sense_interrupt_mask_reg32 = 0x00014,
 155                        .clr_interrupt_reg = 0x00008,
 156                        .clr_interrupt_reg32 = 0x0000C,
 157                        .sense_interrupt_reg = 0x00000,
 158                        .sense_interrupt_reg32 = 0x00004,
 159                        .ioarrin_reg = 0x00070,
 160                        .sense_uproc_interrupt_reg = 0x00020,
 161                        .sense_uproc_interrupt_reg32 = 0x00024,
 162                        .set_uproc_interrupt_reg = 0x00020,
 163                        .set_uproc_interrupt_reg32 = 0x00024,
 164                        .clr_uproc_interrupt_reg = 0x00028,
 165                        .clr_uproc_interrupt_reg32 = 0x0002C,
 166                        .init_feedback_reg = 0x0005C,
 167                        .dump_addr_reg = 0x00064,
 168                        .dump_data_reg = 0x00068,
 169                        .endian_swap_reg = 0x00084
 170                }
 171        },
 172};
 173
 174static const struct ipr_chip_t ipr_chip[] = {
 175        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 176        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 177        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 178        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 179        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 180        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 181        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 182        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
 183        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
 184        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
 185};
 186
 187static int ipr_max_bus_speeds[] = {
 188        IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
 189};
 190
 191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
 192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
 193module_param_named(max_speed, ipr_max_speed, uint, 0);
 194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
 195module_param_named(log_level, ipr_log_level, uint, 0);
 196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
 197module_param_named(testmode, ipr_testmode, int, 0);
 198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
 199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
 200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
 201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
 202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
 203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
 204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
 205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
 206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
 207module_param_named(max_devs, ipr_max_devs, int, 0);
 208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
 209                 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
 210module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
 211MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
 212module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
 213MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
 214MODULE_LICENSE("GPL");
 215MODULE_VERSION(IPR_DRIVER_VERSION);
 216
 217/*  A constant array of IOASCs/URCs/Error Messages */
 218static const
 219struct ipr_error_table_t ipr_error_table[] = {
 220        {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
 221        "8155: An unknown error was received"},
 222        {0x00330000, 0, 0,
 223        "Soft underlength error"},
 224        {0x005A0000, 0, 0,
 225        "Command to be cancelled not found"},
 226        {0x00808000, 0, 0,
 227        "Qualified success"},
 228        {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
 229        "FFFE: Soft device bus error recovered by the IOA"},
 230        {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
 231        "4101: Soft device bus fabric error"},
 232        {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
 233        "FFFC: Logical block guard error recovered by the device"},
 234        {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
 235        "FFFC: Logical block reference tag error recovered by the device"},
 236        {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
 237        "4171: Recovered scatter list tag / sequence number error"},
 238        {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
 239        "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
 240        {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
 241        "4171: Recovered logical block sequence number error on IOA to Host transfer"},
 242        {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
 243        "FFFD: Recovered logical block reference tag error detected by the IOA"},
 244        {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
 245        "FFFD: Logical block guard error recovered by the IOA"},
 246        {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
 247        "FFF9: Device sector reassign successful"},
 248        {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
 249        "FFF7: Media error recovered by device rewrite procedures"},
 250        {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
 251        "7001: IOA sector reassignment successful"},
 252        {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
 253        "FFF9: Soft media error. Sector reassignment recommended"},
 254        {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
 255        "FFF7: Media error recovered by IOA rewrite procedures"},
 256        {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
 257        "FF3D: Soft PCI bus error recovered by the IOA"},
 258        {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
 259        "FFF6: Device hardware error recovered by the IOA"},
 260        {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
 261        "FFF6: Device hardware error recovered by the device"},
 262        {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
 263        "FF3D: Soft IOA error recovered by the IOA"},
 264        {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
 265        "FFFA: Undefined device response recovered by the IOA"},
 266        {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
 267        "FFF6: Device bus error, message or command phase"},
 268        {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
 269        "FFFE: Task Management Function failed"},
 270        {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
 271        "FFF6: Failure prediction threshold exceeded"},
 272        {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
 273        "8009: Impending cache battery pack failure"},
 274        {0x02040100, 0, 0,
 275        "Logical Unit in process of becoming ready"},
 276        {0x02040200, 0, 0,
 277        "Initializing command required"},
 278        {0x02040400, 0, 0,
 279        "34FF: Disk device format in progress"},
 280        {0x02040C00, 0, 0,
 281        "Logical unit not accessible, target port in unavailable state"},
 282        {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
 283        "9070: IOA requested reset"},
 284        {0x023F0000, 0, 0,
 285        "Synchronization required"},
 286        {0x02408500, 0, 0,
 287        "IOA microcode download required"},
 288        {0x02408600, 0, 0,
 289        "Device bus connection is prohibited by host"},
 290        {0x024E0000, 0, 0,
 291        "No ready, IOA shutdown"},
 292        {0x025A0000, 0, 0,
 293        "Not ready, IOA has been shutdown"},
 294        {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
 295        "3020: Storage subsystem configuration error"},
 296        {0x03110B00, 0, 0,
 297        "FFF5: Medium error, data unreadable, recommend reassign"},
 298        {0x03110C00, 0, 0,
 299        "7000: Medium error, data unreadable, do not reassign"},
 300        {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
 301        "FFF3: Disk media format bad"},
 302        {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
 303        "3002: Addressed device failed to respond to selection"},
 304        {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
 305        "3100: Device bus error"},
 306        {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
 307        "3109: IOA timed out a device command"},
 308        {0x04088000, 0, 0,
 309        "3120: SCSI bus is not operational"},
 310        {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
 311        "4100: Hard device bus fabric error"},
 312        {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
 313        "310C: Logical block guard error detected by the device"},
 314        {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
 315        "310C: Logical block reference tag error detected by the device"},
 316        {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
 317        "4170: Scatter list tag / sequence number error"},
 318        {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
 319        "8150: Logical block CRC error on IOA to Host transfer"},
 320        {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
 321        "4170: Logical block sequence number error on IOA to Host transfer"},
 322        {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
 323        "310D: Logical block reference tag error detected by the IOA"},
 324        {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
 325        "310D: Logical block guard error detected by the IOA"},
 326        {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
 327        "9000: IOA reserved area data check"},
 328        {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
 329        "9001: IOA reserved area invalid data pattern"},
 330        {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
 331        "9002: IOA reserved area LRC error"},
 332        {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
 333        "Hardware Error, IOA metadata access error"},
 334        {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
 335        "102E: Out of alternate sectors for disk storage"},
 336        {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
 337        "FFF4: Data transfer underlength error"},
 338        {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
 339        "FFF4: Data transfer overlength error"},
 340        {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
 341        "3400: Logical unit failure"},
 342        {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
 343        "FFF4: Device microcode is corrupt"},
 344        {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
 345        "8150: PCI bus error"},
 346        {0x04430000, 1, 0,
 347        "Unsupported device bus message received"},
 348        {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
 349        "FFF4: Disk device problem"},
 350        {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
 351        "8150: Permanent IOA failure"},
 352        {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
 353        "3010: Disk device returned wrong response to IOA"},
 354        {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
 355        "8151: IOA microcode error"},
 356        {0x04448500, 0, 0,
 357        "Device bus status error"},
 358        {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
 359        "8157: IOA error requiring IOA reset to recover"},
 360        {0x04448700, 0, 0,
 361        "ATA device status error"},
 362        {0x04490000, 0, 0,
 363        "Message reject received from the device"},
 364        {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
 365        "8008: A permanent cache battery pack failure occurred"},
 366        {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
 367        "9090: Disk unit has been modified after the last known status"},
 368        {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
 369        "9081: IOA detected device error"},
 370        {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
 371        "9082: IOA detected device error"},
 372        {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
 373        "3110: Device bus error, message or command phase"},
 374        {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
 375        "3110: SAS Command / Task Management Function failed"},
 376        {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
 377        "9091: Incorrect hardware configuration change has been detected"},
 378        {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
 379        "9073: Invalid multi-adapter configuration"},
 380        {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
 381        "4010: Incorrect connection between cascaded expanders"},
 382        {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
 383        "4020: Connections exceed IOA design limits"},
 384        {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
 385        "4030: Incorrect multipath connection"},
 386        {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
 387        "4110: Unsupported enclosure function"},
 388        {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
 389        "4120: SAS cable VPD cannot be read"},
 390        {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
 391        "FFF4: Command to logical unit failed"},
 392        {0x05240000, 1, 0,
 393        "Illegal request, invalid request type or request packet"},
 394        {0x05250000, 0, 0,
 395        "Illegal request, invalid resource handle"},
 396        {0x05258000, 0, 0,
 397        "Illegal request, commands not allowed to this device"},
 398        {0x05258100, 0, 0,
 399        "Illegal request, command not allowed to a secondary adapter"},
 400        {0x05258200, 0, 0,
 401        "Illegal request, command not allowed to a non-optimized resource"},
 402        {0x05260000, 0, 0,
 403        "Illegal request, invalid field in parameter list"},
 404        {0x05260100, 0, 0,
 405        "Illegal request, parameter not supported"},
 406        {0x05260200, 0, 0,
 407        "Illegal request, parameter value invalid"},
 408        {0x052C0000, 0, 0,
 409        "Illegal request, command sequence error"},
 410        {0x052C8000, 1, 0,
 411        "Illegal request, dual adapter support not enabled"},
 412        {0x052C8100, 1, 0,
 413        "Illegal request, another cable connector was physically disabled"},
 414        {0x054E8000, 1, 0,
 415        "Illegal request, inconsistent group id/group count"},
 416        {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
 417        "9031: Array protection temporarily suspended, protection resuming"},
 418        {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
 419        "9040: Array protection temporarily suspended, protection resuming"},
 420        {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
 421        "4080: IOA exceeded maximum operating temperature"},
 422        {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
 423        "4085: Service required"},
 424        {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
 425        "4086: SAS Adapter Hardware Configuration Error"},
 426        {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
 427        "3140: Device bus not ready to ready transition"},
 428        {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
 429        "FFFB: SCSI bus was reset"},
 430        {0x06290500, 0, 0,
 431        "FFFE: SCSI bus transition to single ended"},
 432        {0x06290600, 0, 0,
 433        "FFFE: SCSI bus transition to LVD"},
 434        {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
 435        "FFFB: SCSI bus was reset by another initiator"},
 436        {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
 437        "3029: A device replacement has occurred"},
 438        {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
 439        "4102: Device bus fabric performance degradation"},
 440        {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
 441        "9051: IOA cache data exists for a missing or failed device"},
 442        {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
 443        "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
 444        {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
 445        "9025: Disk unit is not supported at its physical location"},
 446        {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
 447        "3020: IOA detected a SCSI bus configuration error"},
 448        {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
 449        "3150: SCSI bus configuration error"},
 450        {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
 451        "9074: Asymmetric advanced function disk configuration"},
 452        {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
 453        "4040: Incomplete multipath connection between IOA and enclosure"},
 454        {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
 455        "4041: Incomplete multipath connection between enclosure and device"},
 456        {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
 457        "9075: Incomplete multipath connection between IOA and remote IOA"},
 458        {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
 459        "9076: Configuration error, missing remote IOA"},
 460        {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
 461        "4050: Enclosure does not support a required multipath function"},
 462        {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
 463        "4121: Configuration error, required cable is missing"},
 464        {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
 465        "4122: Cable is not plugged into the correct location on remote IOA"},
 466        {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
 467        "4123: Configuration error, invalid cable vital product data"},
 468        {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
 469        "4124: Configuration error, both cable ends are plugged into the same IOA"},
 470        {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
 471        "4070: Logically bad block written on device"},
 472        {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
 473        "9041: Array protection temporarily suspended"},
 474        {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
 475        "9042: Corrupt array parity detected on specified device"},
 476        {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
 477        "9030: Array no longer protected due to missing or failed disk unit"},
 478        {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
 479        "9071: Link operational transition"},
 480        {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
 481        "9072: Link not operational transition"},
 482        {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
 483        "9032: Array exposed but still protected"},
 484        {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
 485        "70DD: Device forced failed by disrupt device command"},
 486        {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
 487        "4061: Multipath redundancy level got better"},
 488        {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
 489        "4060: Multipath redundancy level got worse"},
 490        {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
 491        "9083: Device raw mode enabled"},
 492        {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
 493        "9084: Device raw mode disabled"},
 494        {0x07270000, 0, 0,
 495        "Failure due to other device"},
 496        {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
 497        "9008: IOA does not support functions expected by devices"},
 498        {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
 499        "9010: Cache data associated with attached devices cannot be found"},
 500        {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
 501        "9011: Cache data belongs to devices other than those attached"},
 502        {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
 503        "9020: Array missing 2 or more devices with only 1 device present"},
 504        {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
 505        "9021: Array missing 2 or more devices with 2 or more devices present"},
 506        {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
 507        "9022: Exposed array is missing a required device"},
 508        {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
 509        "9023: Array member(s) not at required physical locations"},
 510        {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
 511        "9024: Array not functional due to present hardware configuration"},
 512        {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
 513        "9026: Array not functional due to present hardware configuration"},
 514        {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
 515        "9027: Array is missing a device and parity is out of sync"},
 516        {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
 517        "9028: Maximum number of arrays already exist"},
 518        {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
 519        "9050: Required cache data cannot be located for a disk unit"},
 520        {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
 521        "9052: Cache data exists for a device that has been modified"},
 522        {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
 523        "9054: IOA resources not available due to previous problems"},
 524        {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
 525        "9092: Disk unit requires initialization before use"},
 526        {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
 527        "9029: Incorrect hardware configuration change has been detected"},
 528        {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
 529        "9060: One or more disk pairs are missing from an array"},
 530        {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
 531        "9061: One or more disks are missing from an array"},
 532        {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
 533        "9062: One or more disks are missing from an array"},
 534        {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
 535        "9063: Maximum number of functional arrays has been exceeded"},
 536        {0x07279A00, 0, 0,
 537        "Data protect, other volume set problem"},
 538        {0x0B260000, 0, 0,
 539        "Aborted command, invalid descriptor"},
 540        {0x0B3F9000, 0, 0,
 541        "Target operating conditions have changed, dual adapter takeover"},
 542        {0x0B530200, 0, 0,
 543        "Aborted command, medium removal prevented"},
 544        {0x0B5A0000, 0, 0,
 545        "Command terminated by host"},
 546        {0x0B5B8000, 0, 0,
 547        "Aborted command, command terminated by host"}
 548};
 549
 550static const struct ipr_ses_table_entry ipr_ses_table[] = {
 551        { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
 552        { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
 553        { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
 554        { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
 555        { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
 556        { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
 557        { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
 558        { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
 559        { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
 560        { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
 561        { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
 562        { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
 563        { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
 564};
 565
 566/*
 567 *  Function Prototypes
 568 */
 569static int ipr_reset_alert(struct ipr_cmnd *);
 570static void ipr_process_ccn(struct ipr_cmnd *);
 571static void ipr_process_error(struct ipr_cmnd *);
 572static void ipr_reset_ioa_job(struct ipr_cmnd *);
 573static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
 574                                   enum ipr_shutdown_type);
 575
 576#ifdef CONFIG_SCSI_IPR_TRACE
 577/**
 578 * ipr_trc_hook - Add a trace entry to the driver trace
 579 * @ipr_cmd:    ipr command struct
 580 * @type:               trace type
 581 * @add_data:   additional data
 582 *
 583 * Return value:
 584 *      none
 585 **/
 586static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
 587                         u8 type, u32 add_data)
 588{
 589        struct ipr_trace_entry *trace_entry;
 590        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 591        unsigned int trace_index;
 592
 593        trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
 594        trace_entry = &ioa_cfg->trace[trace_index];
 595        trace_entry->time = jiffies;
 596        trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
 597        trace_entry->type = type;
 598        if (ipr_cmd->ioa_cfg->sis64)
 599                trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
 600        else
 601                trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
 602        trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
 603        trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
 604        trace_entry->u.add_data = add_data;
 605        wmb();
 606}
 607#else
 608#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
 609#endif
 610
 611/**
 612 * ipr_lock_and_done - Acquire lock and complete command
 613 * @ipr_cmd:    ipr command struct
 614 *
 615 * Return value:
 616 *      none
 617 **/
 618static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
 619{
 620        unsigned long lock_flags;
 621        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 622
 623        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 624        ipr_cmd->done(ipr_cmd);
 625        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 626}
 627
 628/**
 629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
 630 * @ipr_cmd:    ipr command struct
 631 *
 632 * Return value:
 633 *      none
 634 **/
 635static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
 636{
 637        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 638        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
 639        struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
 640        dma_addr_t dma_addr = ipr_cmd->dma_addr;
 641        int hrrq_id;
 642
 643        hrrq_id = ioarcb->cmd_pkt.hrrq_id;
 644        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
 645        ioarcb->cmd_pkt.hrrq_id = hrrq_id;
 646        ioarcb->data_transfer_length = 0;
 647        ioarcb->read_data_transfer_length = 0;
 648        ioarcb->ioadl_len = 0;
 649        ioarcb->read_ioadl_len = 0;
 650
 651        if (ipr_cmd->ioa_cfg->sis64) {
 652                ioarcb->u.sis64_addr_data.data_ioadl_addr =
 653                        cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
 654                ioasa64->u.gata.status = 0;
 655        } else {
 656                ioarcb->write_ioadl_addr =
 657                        cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
 658                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
 659                ioasa->u.gata.status = 0;
 660        }
 661
 662        ioasa->hdr.ioasc = 0;
 663        ioasa->hdr.residual_data_len = 0;
 664        ipr_cmd->scsi_cmd = NULL;
 665        ipr_cmd->qc = NULL;
 666        ipr_cmd->sense_buffer[0] = 0;
 667        ipr_cmd->dma_use_sg = 0;
 668}
 669
 670/**
 671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
 672 * @ipr_cmd:    ipr command struct
 673 *
 674 * Return value:
 675 *      none
 676 **/
 677static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
 678                              void (*fast_done) (struct ipr_cmnd *))
 679{
 680        ipr_reinit_ipr_cmnd(ipr_cmd);
 681        ipr_cmd->u.scratch = 0;
 682        ipr_cmd->sibling = NULL;
 683        ipr_cmd->eh_comp = NULL;
 684        ipr_cmd->fast_done = fast_done;
 685        timer_setup(&ipr_cmd->timer, NULL, 0);
 686}
 687
 688/**
 689 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
 690 * @ioa_cfg:    ioa config struct
 691 *
 692 * Return value:
 693 *      pointer to ipr command struct
 694 **/
 695static
 696struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
 697{
 698        struct ipr_cmnd *ipr_cmd = NULL;
 699
 700        if (likely(!list_empty(&hrrq->hrrq_free_q))) {
 701                ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
 702                        struct ipr_cmnd, queue);
 703                list_del(&ipr_cmd->queue);
 704        }
 705
 706
 707        return ipr_cmd;
 708}
 709
 710/**
 711 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
 712 * @ioa_cfg:    ioa config struct
 713 *
 714 * Return value:
 715 *      pointer to ipr command struct
 716 **/
 717static
 718struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 719{
 720        struct ipr_cmnd *ipr_cmd =
 721                __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
 722        ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
 723        return ipr_cmd;
 724}
 725
 726/**
 727 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
 728 * @ioa_cfg:    ioa config struct
 729 * @clr_ints:     interrupts to clear
 730 *
 731 * This function masks all interrupts on the adapter, then clears the
 732 * interrupts specified in the mask
 733 *
 734 * Return value:
 735 *      none
 736 **/
 737static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
 738                                          u32 clr_ints)
 739{
 740        volatile u32 int_reg;
 741        int i;
 742
 743        /* Stop new interrupts */
 744        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
 745                spin_lock(&ioa_cfg->hrrq[i]._lock);
 746                ioa_cfg->hrrq[i].allow_interrupts = 0;
 747                spin_unlock(&ioa_cfg->hrrq[i]._lock);
 748        }
 749
 750        /* Set interrupt mask to stop all new interrupts */
 751        if (ioa_cfg->sis64)
 752                writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 753        else
 754                writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 755
 756        /* Clear any pending interrupts */
 757        if (ioa_cfg->sis64)
 758                writel(~0, ioa_cfg->regs.clr_interrupt_reg);
 759        writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
 760        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 761}
 762
 763/**
 764 * ipr_save_pcix_cmd_reg - Save PCI-X command register
 765 * @ioa_cfg:    ioa config struct
 766 *
 767 * Return value:
 768 *      0 on success / -EIO on failure
 769 **/
 770static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
 771{
 772        int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
 773
 774        if (pcix_cmd_reg == 0)
 775                return 0;
 776
 777        if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
 778                                 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
 779                dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
 780                return -EIO;
 781        }
 782
 783        ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
 784        return 0;
 785}
 786
 787/**
 788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
 789 * @ioa_cfg:    ioa config struct
 790 *
 791 * Return value:
 792 *      0 on success / -EIO on failure
 793 **/
 794static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
 795{
 796        int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
 797
 798        if (pcix_cmd_reg) {
 799                if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
 800                                          ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
 801                        dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
 802                        return -EIO;
 803                }
 804        }
 805
 806        return 0;
 807}
 808
 809/**
 810 * __ipr_sata_eh_done - done function for aborted SATA commands
 811 * @ipr_cmd:    ipr command struct
 812 *
 813 * This function is invoked for ops generated to SATA
 814 * devices which are being aborted.
 815 *
 816 * Return value:
 817 *      none
 818 **/
 819static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
 820{
 821        struct ata_queued_cmd *qc = ipr_cmd->qc;
 822        struct ipr_sata_port *sata_port = qc->ap->private_data;
 823
 824        qc->err_mask |= AC_ERR_OTHER;
 825        sata_port->ioasa.status |= ATA_BUSY;
 826        ata_qc_complete(qc);
 827        if (ipr_cmd->eh_comp)
 828                complete(ipr_cmd->eh_comp);
 829        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 830}
 831
 832/**
 833 * ipr_sata_eh_done - done function for aborted SATA commands
 834 * @ipr_cmd:    ipr command struct
 835 *
 836 * This function is invoked for ops generated to SATA
 837 * devices which are being aborted.
 838 *
 839 * Return value:
 840 *      none
 841 **/
 842static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
 843{
 844        struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
 845        unsigned long hrrq_flags;
 846
 847        spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
 848        __ipr_sata_eh_done(ipr_cmd);
 849        spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
 850}
 851
 852/**
 853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
 854 * @ipr_cmd:    ipr command struct
 855 *
 856 * This function is invoked by the interrupt handler for
 857 * ops generated by the SCSI mid-layer which are being aborted.
 858 *
 859 * Return value:
 860 *      none
 861 **/
 862static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 863{
 864        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 865
 866        scsi_cmd->result |= (DID_ERROR << 16);
 867
 868        scsi_dma_unmap(ipr_cmd->scsi_cmd);
 869        scsi_cmd->scsi_done(scsi_cmd);
 870        if (ipr_cmd->eh_comp)
 871                complete(ipr_cmd->eh_comp);
 872        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 873}
 874
 875/**
 876 * ipr_scsi_eh_done - mid-layer done function for aborted ops
 877 * @ipr_cmd:    ipr command struct
 878 *
 879 * This function is invoked by the interrupt handler for
 880 * ops generated by the SCSI mid-layer which are being aborted.
 881 *
 882 * Return value:
 883 *      none
 884 **/
 885static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 886{
 887        unsigned long hrrq_flags;
 888        struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
 889
 890        spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
 891        __ipr_scsi_eh_done(ipr_cmd);
 892        spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
 893}
 894
 895/**
 896 * ipr_fail_all_ops - Fails all outstanding ops.
 897 * @ioa_cfg:    ioa config struct
 898 *
 899 * This function fails all outstanding ops.
 900 *
 901 * Return value:
 902 *      none
 903 **/
 904static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
 905{
 906        struct ipr_cmnd *ipr_cmd, *temp;
 907        struct ipr_hrr_queue *hrrq;
 908
 909        ENTER;
 910        for_each_hrrq(hrrq, ioa_cfg) {
 911                spin_lock(&hrrq->_lock);
 912                list_for_each_entry_safe(ipr_cmd,
 913                                        temp, &hrrq->hrrq_pending_q, queue) {
 914                        list_del(&ipr_cmd->queue);
 915
 916                        ipr_cmd->s.ioasa.hdr.ioasc =
 917                                cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
 918                        ipr_cmd->s.ioasa.hdr.ilid =
 919                                cpu_to_be32(IPR_DRIVER_ILID);
 920
 921                        if (ipr_cmd->scsi_cmd)
 922                                ipr_cmd->done = __ipr_scsi_eh_done;
 923                        else if (ipr_cmd->qc)
 924                                ipr_cmd->done = __ipr_sata_eh_done;
 925
 926                        ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
 927                                     IPR_IOASC_IOA_WAS_RESET);
 928                        del_timer(&ipr_cmd->timer);
 929                        ipr_cmd->done(ipr_cmd);
 930                }
 931                spin_unlock(&hrrq->_lock);
 932        }
 933        LEAVE;
 934}
 935
 936/**
 937 * ipr_send_command -  Send driver initiated requests.
 938 * @ipr_cmd:            ipr command struct
 939 *
 940 * This function sends a command to the adapter using the correct write call.
 941 * In the case of sis64, calculate the ioarcb size required. Then or in the
 942 * appropriate bits.
 943 *
 944 * Return value:
 945 *      none
 946 **/
 947static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
 948{
 949        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 950        dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
 951
 952        if (ioa_cfg->sis64) {
 953                /* The default size is 256 bytes */
 954                send_dma_addr |= 0x1;
 955
 956                /* If the number of ioadls * size of ioadl > 128 bytes,
 957                   then use a 512 byte ioarcb */
 958                if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
 959                        send_dma_addr |= 0x4;
 960                writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
 961        } else
 962                writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
 963}
 964
 965/**
 966 * ipr_do_req -  Send driver initiated requests.
 967 * @ipr_cmd:            ipr command struct
 968 * @done:                       done function
 969 * @timeout_func:       timeout function
 970 * @timeout:            timeout value
 971 *
 972 * This function sends the specified command to the adapter with the
 973 * timeout given. The done function is invoked on command completion.
 974 *
 975 * Return value:
 976 *      none
 977 **/
 978static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
 979                       void (*done) (struct ipr_cmnd *),
 980                       void (*timeout_func) (struct timer_list *), u32 timeout)
 981{
 982        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
 983
 984        ipr_cmd->done = done;
 985
 986        ipr_cmd->timer.expires = jiffies + timeout;
 987        ipr_cmd->timer.function = timeout_func;
 988
 989        add_timer(&ipr_cmd->timer);
 990
 991        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
 992
 993        ipr_send_command(ipr_cmd);
 994}
 995
 996/**
 997 * ipr_internal_cmd_done - Op done function for an internally generated op.
 998 * @ipr_cmd:    ipr command struct
 999 *
1000 * This function is the op done function for an internally generated,
1001 * blocking op. It simply wakes the sleeping thread.
1002 *
1003 * Return value:
1004 *      none
1005 **/
1006static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1007{
1008        if (ipr_cmd->sibling)
1009                ipr_cmd->sibling = NULL;
1010        else
1011                complete(&ipr_cmd->completion);
1012}
1013
1014/**
1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1016 * @ipr_cmd:    ipr command struct
1017 * @dma_addr:   dma address
1018 * @len:        transfer length
1019 * @flags:      ioadl flag value
1020 *
1021 * This function initializes an ioadl in the case where there is only a single
1022 * descriptor.
1023 *
1024 * Return value:
1025 *      nothing
1026 **/
1027static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1028                           u32 len, int flags)
1029{
1030        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1031        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1032
1033        ipr_cmd->dma_use_sg = 1;
1034
1035        if (ipr_cmd->ioa_cfg->sis64) {
1036                ioadl64->flags = cpu_to_be32(flags);
1037                ioadl64->data_len = cpu_to_be32(len);
1038                ioadl64->address = cpu_to_be64(dma_addr);
1039
1040                ipr_cmd->ioarcb.ioadl_len =
1041                        cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1042                ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1043        } else {
1044                ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1045                ioadl->address = cpu_to_be32(dma_addr);
1046
1047                if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1048                        ipr_cmd->ioarcb.read_ioadl_len =
1049                                cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1050                        ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1051                } else {
1052                        ipr_cmd->ioarcb.ioadl_len =
1053                                cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1054                        ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1055                }
1056        }
1057}
1058
1059/**
1060 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1061 * @ipr_cmd:    ipr command struct
1062 * @timeout_func:       function to invoke if command times out
1063 * @timeout:    timeout
1064 *
1065 * Return value:
1066 *      none
1067 **/
1068static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1069                                  void (*timeout_func) (struct timer_list *),
1070                                  u32 timeout)
1071{
1072        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1073
1074        init_completion(&ipr_cmd->completion);
1075        ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1076
1077        spin_unlock_irq(ioa_cfg->host->host_lock);
1078        wait_for_completion(&ipr_cmd->completion);
1079        spin_lock_irq(ioa_cfg->host->host_lock);
1080}
1081
1082static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1083{
1084        unsigned int hrrq;
1085
1086        if (ioa_cfg->hrrq_num == 1)
1087                hrrq = 0;
1088        else {
1089                hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1090                hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1091        }
1092        return hrrq;
1093}
1094
1095/**
1096 * ipr_send_hcam - Send an HCAM to the adapter.
1097 * @ioa_cfg:    ioa config struct
1098 * @type:               HCAM type
1099 * @hostrcb:    hostrcb struct
1100 *
1101 * This function will send a Host Controlled Async command to the adapter.
1102 * If HCAMs are currently not allowed to be issued to the adapter, it will
1103 * place the hostrcb on the free queue.
1104 *
1105 * Return value:
1106 *      none
1107 **/
1108static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1109                          struct ipr_hostrcb *hostrcb)
1110{
1111        struct ipr_cmnd *ipr_cmd;
1112        struct ipr_ioarcb *ioarcb;
1113
1114        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1115                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1116                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1117                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1118
1119                ipr_cmd->u.hostrcb = hostrcb;
1120                ioarcb = &ipr_cmd->ioarcb;
1121
1122                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1123                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1124                ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1125                ioarcb->cmd_pkt.cdb[1] = type;
1126                ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1127                ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1128
1129                ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1130                               sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1131
1132                if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1133                        ipr_cmd->done = ipr_process_ccn;
1134                else
1135                        ipr_cmd->done = ipr_process_error;
1136
1137                ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1138
1139                ipr_send_command(ipr_cmd);
1140        } else {
1141                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1142        }
1143}
1144
1145/**
1146 * ipr_update_ata_class - Update the ata class in the resource entry
1147 * @res:        resource entry struct
1148 * @proto:      cfgte device bus protocol value
1149 *
1150 * Return value:
1151 *      none
1152 **/
1153static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1154{
1155        switch (proto) {
1156        case IPR_PROTO_SATA:
1157        case IPR_PROTO_SAS_STP:
1158                res->ata_class = ATA_DEV_ATA;
1159                break;
1160        case IPR_PROTO_SATA_ATAPI:
1161        case IPR_PROTO_SAS_STP_ATAPI:
1162                res->ata_class = ATA_DEV_ATAPI;
1163                break;
1164        default:
1165                res->ata_class = ATA_DEV_UNKNOWN;
1166                break;
1167        };
1168}
1169
1170/**
1171 * ipr_init_res_entry - Initialize a resource entry struct.
1172 * @res:        resource entry struct
1173 * @cfgtew:     config table entry wrapper struct
1174 *
1175 * Return value:
1176 *      none
1177 **/
1178static void ipr_init_res_entry(struct ipr_resource_entry *res,
1179                               struct ipr_config_table_entry_wrapper *cfgtew)
1180{
1181        int found = 0;
1182        unsigned int proto;
1183        struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1184        struct ipr_resource_entry *gscsi_res = NULL;
1185
1186        res->needs_sync_complete = 0;
1187        res->in_erp = 0;
1188        res->add_to_ml = 0;
1189        res->del_from_ml = 0;
1190        res->resetting_device = 0;
1191        res->reset_occurred = 0;
1192        res->sdev = NULL;
1193        res->sata_port = NULL;
1194
1195        if (ioa_cfg->sis64) {
1196                proto = cfgtew->u.cfgte64->proto;
1197                res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1198                res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1199                res->qmodel = IPR_QUEUEING_MODEL64(res);
1200                res->type = cfgtew->u.cfgte64->res_type;
1201
1202                memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1203                        sizeof(res->res_path));
1204
1205                res->bus = 0;
1206                memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1207                        sizeof(res->dev_lun.scsi_lun));
1208                res->lun = scsilun_to_int(&res->dev_lun);
1209
1210                if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1211                        list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1212                                if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1213                                        found = 1;
1214                                        res->target = gscsi_res->target;
1215                                        break;
1216                                }
1217                        }
1218                        if (!found) {
1219                                res->target = find_first_zero_bit(ioa_cfg->target_ids,
1220                                                                  ioa_cfg->max_devs_supported);
1221                                set_bit(res->target, ioa_cfg->target_ids);
1222                        }
1223                } else if (res->type == IPR_RES_TYPE_IOAFP) {
1224                        res->bus = IPR_IOAFP_VIRTUAL_BUS;
1225                        res->target = 0;
1226                } else if (res->type == IPR_RES_TYPE_ARRAY) {
1227                        res->bus = IPR_ARRAY_VIRTUAL_BUS;
1228                        res->target = find_first_zero_bit(ioa_cfg->array_ids,
1229                                                          ioa_cfg->max_devs_supported);
1230                        set_bit(res->target, ioa_cfg->array_ids);
1231                } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1232                        res->bus = IPR_VSET_VIRTUAL_BUS;
1233                        res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1234                                                          ioa_cfg->max_devs_supported);
1235                        set_bit(res->target, ioa_cfg->vset_ids);
1236                } else {
1237                        res->target = find_first_zero_bit(ioa_cfg->target_ids,
1238                                                          ioa_cfg->max_devs_supported);
1239                        set_bit(res->target, ioa_cfg->target_ids);
1240                }
1241        } else {
1242                proto = cfgtew->u.cfgte->proto;
1243                res->qmodel = IPR_QUEUEING_MODEL(res);
1244                res->flags = cfgtew->u.cfgte->flags;
1245                if (res->flags & IPR_IS_IOA_RESOURCE)
1246                        res->type = IPR_RES_TYPE_IOAFP;
1247                else
1248                        res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1249
1250                res->bus = cfgtew->u.cfgte->res_addr.bus;
1251                res->target = cfgtew->u.cfgte->res_addr.target;
1252                res->lun = cfgtew->u.cfgte->res_addr.lun;
1253                res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1254        }
1255
1256        ipr_update_ata_class(res, proto);
1257}
1258
1259/**
1260 * ipr_is_same_device - Determine if two devices are the same.
1261 * @res:        resource entry struct
1262 * @cfgtew:     config table entry wrapper struct
1263 *
1264 * Return value:
1265 *      1 if the devices are the same / 0 otherwise
1266 **/
1267static int ipr_is_same_device(struct ipr_resource_entry *res,
1268                              struct ipr_config_table_entry_wrapper *cfgtew)
1269{
1270        if (res->ioa_cfg->sis64) {
1271                if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1272                                        sizeof(cfgtew->u.cfgte64->dev_id)) &&
1273                        !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274                                        sizeof(cfgtew->u.cfgte64->lun))) {
1275                        return 1;
1276                }
1277        } else {
1278                if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1279                    res->target == cfgtew->u.cfgte->res_addr.target &&
1280                    res->lun == cfgtew->u.cfgte->res_addr.lun)
1281                        return 1;
1282        }
1283
1284        return 0;
1285}
1286
1287/**
1288 * __ipr_format_res_path - Format the resource path for printing.
1289 * @res_path:   resource path
1290 * @buf:        buffer
1291 * @len:        length of buffer provided
1292 *
1293 * Return value:
1294 *      pointer to buffer
1295 **/
1296static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1297{
1298        int i;
1299        char *p = buffer;
1300
1301        *p = '\0';
1302        p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1303        for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1304                p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1305
1306        return buffer;
1307}
1308
1309/**
1310 * ipr_format_res_path - Format the resource path for printing.
1311 * @ioa_cfg:    ioa config struct
1312 * @res_path:   resource path
1313 * @buf:        buffer
1314 * @len:        length of buffer provided
1315 *
1316 * Return value:
1317 *      pointer to buffer
1318 **/
1319static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1320                                 u8 *res_path, char *buffer, int len)
1321{
1322        char *p = buffer;
1323
1324        *p = '\0';
1325        p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1326        __ipr_format_res_path(res_path, p, len - (buffer - p));
1327        return buffer;
1328}
1329
1330/**
1331 * ipr_update_res_entry - Update the resource entry.
1332 * @res:        resource entry struct
1333 * @cfgtew:     config table entry wrapper struct
1334 *
1335 * Return value:
1336 *      none
1337 **/
1338static void ipr_update_res_entry(struct ipr_resource_entry *res,
1339                                 struct ipr_config_table_entry_wrapper *cfgtew)
1340{
1341        char buffer[IPR_MAX_RES_PATH_LENGTH];
1342        unsigned int proto;
1343        int new_path = 0;
1344
1345        if (res->ioa_cfg->sis64) {
1346                res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1347                res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1348                res->type = cfgtew->u.cfgte64->res_type;
1349
1350                memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1351                        sizeof(struct ipr_std_inq_data));
1352
1353                res->qmodel = IPR_QUEUEING_MODEL64(res);
1354                proto = cfgtew->u.cfgte64->proto;
1355                res->res_handle = cfgtew->u.cfgte64->res_handle;
1356                res->dev_id = cfgtew->u.cfgte64->dev_id;
1357
1358                memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1359                        sizeof(res->dev_lun.scsi_lun));
1360
1361                if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1362                                        sizeof(res->res_path))) {
1363                        memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1364                                sizeof(res->res_path));
1365                        new_path = 1;
1366                }
1367
1368                if (res->sdev && new_path)
1369                        sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1370                                    ipr_format_res_path(res->ioa_cfg,
1371                                        res->res_path, buffer, sizeof(buffer)));
1372        } else {
1373                res->flags = cfgtew->u.cfgte->flags;
1374                if (res->flags & IPR_IS_IOA_RESOURCE)
1375                        res->type = IPR_RES_TYPE_IOAFP;
1376                else
1377                        res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1378
1379                memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1380                        sizeof(struct ipr_std_inq_data));
1381
1382                res->qmodel = IPR_QUEUEING_MODEL(res);
1383                proto = cfgtew->u.cfgte->proto;
1384                res->res_handle = cfgtew->u.cfgte->res_handle;
1385        }
1386
1387        ipr_update_ata_class(res, proto);
1388}
1389
1390/**
1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1392 *                        for the resource.
1393 * @res:        resource entry struct
1394 * @cfgtew:     config table entry wrapper struct
1395 *
1396 * Return value:
1397 *      none
1398 **/
1399static void ipr_clear_res_target(struct ipr_resource_entry *res)
1400{
1401        struct ipr_resource_entry *gscsi_res = NULL;
1402        struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1403
1404        if (!ioa_cfg->sis64)
1405                return;
1406
1407        if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1408                clear_bit(res->target, ioa_cfg->array_ids);
1409        else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1410                clear_bit(res->target, ioa_cfg->vset_ids);
1411        else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1412                list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1413                        if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1414                                return;
1415                clear_bit(res->target, ioa_cfg->target_ids);
1416
1417        } else if (res->bus == 0)
1418                clear_bit(res->target, ioa_cfg->target_ids);
1419}
1420
1421/**
1422 * ipr_handle_config_change - Handle a config change from the adapter
1423 * @ioa_cfg:    ioa config struct
1424 * @hostrcb:    hostrcb
1425 *
1426 * Return value:
1427 *      none
1428 **/
1429static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1430                                     struct ipr_hostrcb *hostrcb)
1431{
1432        struct ipr_resource_entry *res = NULL;
1433        struct ipr_config_table_entry_wrapper cfgtew;
1434        __be32 cc_res_handle;
1435
1436        u32 is_ndn = 1;
1437
1438        if (ioa_cfg->sis64) {
1439                cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1440                cc_res_handle = cfgtew.u.cfgte64->res_handle;
1441        } else {
1442                cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1443                cc_res_handle = cfgtew.u.cfgte->res_handle;
1444        }
1445
1446        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1447                if (res->res_handle == cc_res_handle) {
1448                        is_ndn = 0;
1449                        break;
1450                }
1451        }
1452
1453        if (is_ndn) {
1454                if (list_empty(&ioa_cfg->free_res_q)) {
1455                        ipr_send_hcam(ioa_cfg,
1456                                      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1457                                      hostrcb);
1458                        return;
1459                }
1460
1461                res = list_entry(ioa_cfg->free_res_q.next,
1462                                 struct ipr_resource_entry, queue);
1463
1464                list_del(&res->queue);
1465                ipr_init_res_entry(res, &cfgtew);
1466                list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1467        }
1468
1469        ipr_update_res_entry(res, &cfgtew);
1470
1471        if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1472                if (res->sdev) {
1473                        res->del_from_ml = 1;
1474                        res->res_handle = IPR_INVALID_RES_HANDLE;
1475                        schedule_work(&ioa_cfg->work_q);
1476                } else {
1477                        ipr_clear_res_target(res);
1478                        list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1479                }
1480        } else if (!res->sdev || res->del_from_ml) {
1481                res->add_to_ml = 1;
1482                schedule_work(&ioa_cfg->work_q);
1483        }
1484
1485        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1486}
1487
1488/**
1489 * ipr_process_ccn - Op done function for a CCN.
1490 * @ipr_cmd:    ipr command struct
1491 *
1492 * This function is the op done function for a configuration
1493 * change notification host controlled async from the adapter.
1494 *
1495 * Return value:
1496 *      none
1497 **/
1498static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1499{
1500        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1501        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1502        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1503
1504        list_del_init(&hostrcb->queue);
1505        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1506
1507        if (ioasc) {
1508                if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1509                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1510                        dev_err(&ioa_cfg->pdev->dev,
1511                                "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1512
1513                ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1514        } else {
1515                ipr_handle_config_change(ioa_cfg, hostrcb);
1516        }
1517}
1518
1519/**
1520 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1521 * @i:          index into buffer
1522 * @buf:                string to modify
1523 *
1524 * This function will strip all trailing whitespace, pad the end
1525 * of the string with a single space, and NULL terminate the string.
1526 *
1527 * Return value:
1528 *      new length of string
1529 **/
1530static int strip_and_pad_whitespace(int i, char *buf)
1531{
1532        while (i && buf[i] == ' ')
1533                i--;
1534        buf[i+1] = ' ';
1535        buf[i+2] = '\0';
1536        return i + 2;
1537}
1538
1539/**
1540 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1541 * @prefix:             string to print at start of printk
1542 * @hostrcb:    hostrcb pointer
1543 * @vpd:                vendor/product id/sn struct
1544 *
1545 * Return value:
1546 *      none
1547 **/
1548static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1549                                struct ipr_vpd *vpd)
1550{
1551        char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1552        int i = 0;
1553
1554        memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1555        i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1556
1557        memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1558        i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1559
1560        memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1561        buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1562
1563        ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1564}
1565
1566/**
1567 * ipr_log_vpd - Log the passed VPD to the error log.
1568 * @vpd:                vendor/product id/sn struct
1569 *
1570 * Return value:
1571 *      none
1572 **/
1573static void ipr_log_vpd(struct ipr_vpd *vpd)
1574{
1575        char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1576                    + IPR_SERIAL_NUM_LEN];
1577
1578        memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1579        memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1580               IPR_PROD_ID_LEN);
1581        buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1582        ipr_err("Vendor/Product ID: %s\n", buffer);
1583
1584        memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1585        buffer[IPR_SERIAL_NUM_LEN] = '\0';
1586        ipr_err("    Serial Number: %s\n", buffer);
1587}
1588
1589/**
1590 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1591 * @prefix:             string to print at start of printk
1592 * @hostrcb:    hostrcb pointer
1593 * @vpd:                vendor/product id/sn/wwn struct
1594 *
1595 * Return value:
1596 *      none
1597 **/
1598static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1599                                    struct ipr_ext_vpd *vpd)
1600{
1601        ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1602        ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1603                     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1604}
1605
1606/**
1607 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1608 * @vpd:                vendor/product id/sn/wwn struct
1609 *
1610 * Return value:
1611 *      none
1612 **/
1613static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1614{
1615        ipr_log_vpd(&vpd->vpd);
1616        ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1617                be32_to_cpu(vpd->wwid[1]));
1618}
1619
1620/**
1621 * ipr_log_enhanced_cache_error - Log a cache error.
1622 * @ioa_cfg:    ioa config struct
1623 * @hostrcb:    hostrcb struct
1624 *
1625 * Return value:
1626 *      none
1627 **/
1628static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1629                                         struct ipr_hostrcb *hostrcb)
1630{
1631        struct ipr_hostrcb_type_12_error *error;
1632
1633        if (ioa_cfg->sis64)
1634                error = &hostrcb->hcam.u.error64.u.type_12_error;
1635        else
1636                error = &hostrcb->hcam.u.error.u.type_12_error;
1637
1638        ipr_err("-----Current Configuration-----\n");
1639        ipr_err("Cache Directory Card Information:\n");
1640        ipr_log_ext_vpd(&error->ioa_vpd);
1641        ipr_err("Adapter Card Information:\n");
1642        ipr_log_ext_vpd(&error->cfc_vpd);
1643
1644        ipr_err("-----Expected Configuration-----\n");
1645        ipr_err("Cache Directory Card Information:\n");
1646        ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1647        ipr_err("Adapter Card Information:\n");
1648        ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1649
1650        ipr_err("Additional IOA Data: %08X %08X %08X\n",
1651                     be32_to_cpu(error->ioa_data[0]),
1652                     be32_to_cpu(error->ioa_data[1]),
1653                     be32_to_cpu(error->ioa_data[2]));
1654}
1655
1656/**
1657 * ipr_log_cache_error - Log a cache error.
1658 * @ioa_cfg:    ioa config struct
1659 * @hostrcb:    hostrcb struct
1660 *
1661 * Return value:
1662 *      none
1663 **/
1664static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1665                                struct ipr_hostrcb *hostrcb)
1666{
1667        struct ipr_hostrcb_type_02_error *error =
1668                &hostrcb->hcam.u.error.u.type_02_error;
1669
1670        ipr_err("-----Current Configuration-----\n");
1671        ipr_err("Cache Directory Card Information:\n");
1672        ipr_log_vpd(&error->ioa_vpd);
1673        ipr_err("Adapter Card Information:\n");
1674        ipr_log_vpd(&error->cfc_vpd);
1675
1676        ipr_err("-----Expected Configuration-----\n");
1677        ipr_err("Cache Directory Card Information:\n");
1678        ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1679        ipr_err("Adapter Card Information:\n");
1680        ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1681
1682        ipr_err("Additional IOA Data: %08X %08X %08X\n",
1683                     be32_to_cpu(error->ioa_data[0]),
1684                     be32_to_cpu(error->ioa_data[1]),
1685                     be32_to_cpu(error->ioa_data[2]));
1686}
1687
1688/**
1689 * ipr_log_enhanced_config_error - Log a configuration error.
1690 * @ioa_cfg:    ioa config struct
1691 * @hostrcb:    hostrcb struct
1692 *
1693 * Return value:
1694 *      none
1695 **/
1696static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1697                                          struct ipr_hostrcb *hostrcb)
1698{
1699        int errors_logged, i;
1700        struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1701        struct ipr_hostrcb_type_13_error *error;
1702
1703        error = &hostrcb->hcam.u.error.u.type_13_error;
1704        errors_logged = be32_to_cpu(error->errors_logged);
1705
1706        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1707                be32_to_cpu(error->errors_detected), errors_logged);
1708
1709        dev_entry = error->dev;
1710
1711        for (i = 0; i < errors_logged; i++, dev_entry++) {
1712                ipr_err_separator;
1713
1714                ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1715                ipr_log_ext_vpd(&dev_entry->vpd);
1716
1717                ipr_err("-----New Device Information-----\n");
1718                ipr_log_ext_vpd(&dev_entry->new_vpd);
1719
1720                ipr_err("Cache Directory Card Information:\n");
1721                ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1722
1723                ipr_err("Adapter Card Information:\n");
1724                ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1725        }
1726}
1727
1728/**
1729 * ipr_log_sis64_config_error - Log a device error.
1730 * @ioa_cfg:    ioa config struct
1731 * @hostrcb:    hostrcb struct
1732 *
1733 * Return value:
1734 *      none
1735 **/
1736static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1737                                       struct ipr_hostrcb *hostrcb)
1738{
1739        int errors_logged, i;
1740        struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1741        struct ipr_hostrcb_type_23_error *error;
1742        char buffer[IPR_MAX_RES_PATH_LENGTH];
1743
1744        error = &hostrcb->hcam.u.error64.u.type_23_error;
1745        errors_logged = be32_to_cpu(error->errors_logged);
1746
1747        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1748                be32_to_cpu(error->errors_detected), errors_logged);
1749
1750        dev_entry = error->dev;
1751
1752        for (i = 0; i < errors_logged; i++, dev_entry++) {
1753                ipr_err_separator;
1754
1755                ipr_err("Device %d : %s", i + 1,
1756                        __ipr_format_res_path(dev_entry->res_path,
1757                                              buffer, sizeof(buffer)));
1758                ipr_log_ext_vpd(&dev_entry->vpd);
1759
1760                ipr_err("-----New Device Information-----\n");
1761                ipr_log_ext_vpd(&dev_entry->new_vpd);
1762
1763                ipr_err("Cache Directory Card Information:\n");
1764                ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1765
1766                ipr_err("Adapter Card Information:\n");
1767                ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1768        }
1769}
1770
1771/**
1772 * ipr_log_config_error - Log a configuration error.
1773 * @ioa_cfg:    ioa config struct
1774 * @hostrcb:    hostrcb struct
1775 *
1776 * Return value:
1777 *      none
1778 **/
1779static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1780                                 struct ipr_hostrcb *hostrcb)
1781{
1782        int errors_logged, i;
1783        struct ipr_hostrcb_device_data_entry *dev_entry;
1784        struct ipr_hostrcb_type_03_error *error;
1785
1786        error = &hostrcb->hcam.u.error.u.type_03_error;
1787        errors_logged = be32_to_cpu(error->errors_logged);
1788
1789        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1790                be32_to_cpu(error->errors_detected), errors_logged);
1791
1792        dev_entry = error->dev;
1793
1794        for (i = 0; i < errors_logged; i++, dev_entry++) {
1795                ipr_err_separator;
1796
1797                ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1798                ipr_log_vpd(&dev_entry->vpd);
1799
1800                ipr_err("-----New Device Information-----\n");
1801                ipr_log_vpd(&dev_entry->new_vpd);
1802
1803                ipr_err("Cache Directory Card Information:\n");
1804                ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1805
1806                ipr_err("Adapter Card Information:\n");
1807                ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1808
1809                ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1810                        be32_to_cpu(dev_entry->ioa_data[0]),
1811                        be32_to_cpu(dev_entry->ioa_data[1]),
1812                        be32_to_cpu(dev_entry->ioa_data[2]),
1813                        be32_to_cpu(dev_entry->ioa_data[3]),
1814                        be32_to_cpu(dev_entry->ioa_data[4]));
1815        }
1816}
1817
1818/**
1819 * ipr_log_enhanced_array_error - Log an array configuration error.
1820 * @ioa_cfg:    ioa config struct
1821 * @hostrcb:    hostrcb struct
1822 *
1823 * Return value:
1824 *      none
1825 **/
1826static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1827                                         struct ipr_hostrcb *hostrcb)
1828{
1829        int i, num_entries;
1830        struct ipr_hostrcb_type_14_error *error;
1831        struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1832        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1833
1834        error = &hostrcb->hcam.u.error.u.type_14_error;
1835
1836        ipr_err_separator;
1837
1838        ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1839                error->protection_level,
1840                ioa_cfg->host->host_no,
1841                error->last_func_vset_res_addr.bus,
1842                error->last_func_vset_res_addr.target,
1843                error->last_func_vset_res_addr.lun);
1844
1845        ipr_err_separator;
1846
1847        array_entry = error->array_member;
1848        num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1849                            ARRAY_SIZE(error->array_member));
1850
1851        for (i = 0; i < num_entries; i++, array_entry++) {
1852                if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1853                        continue;
1854
1855                if (be32_to_cpu(error->exposed_mode_adn) == i)
1856                        ipr_err("Exposed Array Member %d:\n", i);
1857                else
1858                        ipr_err("Array Member %d:\n", i);
1859
1860                ipr_log_ext_vpd(&array_entry->vpd);
1861                ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1862                ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1863                                 "Expected Location");
1864
1865                ipr_err_separator;
1866        }
1867}
1868
1869/**
1870 * ipr_log_array_error - Log an array configuration error.
1871 * @ioa_cfg:    ioa config struct
1872 * @hostrcb:    hostrcb struct
1873 *
1874 * Return value:
1875 *      none
1876 **/
1877static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1878                                struct ipr_hostrcb *hostrcb)
1879{
1880        int i;
1881        struct ipr_hostrcb_type_04_error *error;
1882        struct ipr_hostrcb_array_data_entry *array_entry;
1883        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1884
1885        error = &hostrcb->hcam.u.error.u.type_04_error;
1886
1887        ipr_err_separator;
1888
1889        ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1890                error->protection_level,
1891                ioa_cfg->host->host_no,
1892                error->last_func_vset_res_addr.bus,
1893                error->last_func_vset_res_addr.target,
1894                error->last_func_vset_res_addr.lun);
1895
1896        ipr_err_separator;
1897
1898        array_entry = error->array_member;
1899
1900        for (i = 0; i < 18; i++) {
1901                if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1902                        continue;
1903
1904                if (be32_to_cpu(error->exposed_mode_adn) == i)
1905                        ipr_err("Exposed Array Member %d:\n", i);
1906                else
1907                        ipr_err("Array Member %d:\n", i);
1908
1909                ipr_log_vpd(&array_entry->vpd);
1910
1911                ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1912                ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1913                                 "Expected Location");
1914
1915                ipr_err_separator;
1916
1917                if (i == 9)
1918                        array_entry = error->array_member2;
1919                else
1920                        array_entry++;
1921        }
1922}
1923
1924/**
1925 * ipr_log_hex_data - Log additional hex IOA error data.
1926 * @ioa_cfg:    ioa config struct
1927 * @data:               IOA error data
1928 * @len:                data length
1929 *
1930 * Return value:
1931 *      none
1932 **/
1933static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1934{
1935        int i;
1936
1937        if (len == 0)
1938                return;
1939
1940        if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1941                len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1942
1943        for (i = 0; i < len / 4; i += 4) {
1944                ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1945                        be32_to_cpu(data[i]),
1946                        be32_to_cpu(data[i+1]),
1947                        be32_to_cpu(data[i+2]),
1948                        be32_to_cpu(data[i+3]));
1949        }
1950}
1951
1952/**
1953 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1954 * @ioa_cfg:    ioa config struct
1955 * @hostrcb:    hostrcb struct
1956 *
1957 * Return value:
1958 *      none
1959 **/
1960static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1961                                            struct ipr_hostrcb *hostrcb)
1962{
1963        struct ipr_hostrcb_type_17_error *error;
1964
1965        if (ioa_cfg->sis64)
1966                error = &hostrcb->hcam.u.error64.u.type_17_error;
1967        else
1968                error = &hostrcb->hcam.u.error.u.type_17_error;
1969
1970        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1971        strim(error->failure_reason);
1972
1973        ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1974                     be32_to_cpu(hostrcb->hcam.u.error.prc));
1975        ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1976        ipr_log_hex_data(ioa_cfg, error->data,
1977                         be32_to_cpu(hostrcb->hcam.length) -
1978                         (offsetof(struct ipr_hostrcb_error, u) +
1979                          offsetof(struct ipr_hostrcb_type_17_error, data)));
1980}
1981
1982/**
1983 * ipr_log_dual_ioa_error - Log a dual adapter error.
1984 * @ioa_cfg:    ioa config struct
1985 * @hostrcb:    hostrcb struct
1986 *
1987 * Return value:
1988 *      none
1989 **/
1990static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1991                                   struct ipr_hostrcb *hostrcb)
1992{
1993        struct ipr_hostrcb_type_07_error *error;
1994
1995        error = &hostrcb->hcam.u.error.u.type_07_error;
1996        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1997        strim(error->failure_reason);
1998
1999        ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2000                     be32_to_cpu(hostrcb->hcam.u.error.prc));
2001        ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2002        ipr_log_hex_data(ioa_cfg, error->data,
2003                         be32_to_cpu(hostrcb->hcam.length) -
2004                         (offsetof(struct ipr_hostrcb_error, u) +
2005                          offsetof(struct ipr_hostrcb_type_07_error, data)));
2006}
2007
2008static const struct {
2009        u8 active;
2010        char *desc;
2011} path_active_desc[] = {
2012        { IPR_PATH_NO_INFO, "Path" },
2013        { IPR_PATH_ACTIVE, "Active path" },
2014        { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2015};
2016
2017static const struct {
2018        u8 state;
2019        char *desc;
2020} path_state_desc[] = {
2021        { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2022        { IPR_PATH_HEALTHY, "is healthy" },
2023        { IPR_PATH_DEGRADED, "is degraded" },
2024        { IPR_PATH_FAILED, "is failed" }
2025};
2026
2027/**
2028 * ipr_log_fabric_path - Log a fabric path error
2029 * @hostrcb:    hostrcb struct
2030 * @fabric:             fabric descriptor
2031 *
2032 * Return value:
2033 *      none
2034 **/
2035static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2036                                struct ipr_hostrcb_fabric_desc *fabric)
2037{
2038        int i, j;
2039        u8 path_state = fabric->path_state;
2040        u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2041        u8 state = path_state & IPR_PATH_STATE_MASK;
2042
2043        for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2044                if (path_active_desc[i].active != active)
2045                        continue;
2046
2047                for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2048                        if (path_state_desc[j].state != state)
2049                                continue;
2050
2051                        if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2052                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2053                                             path_active_desc[i].desc, path_state_desc[j].desc,
2054                                             fabric->ioa_port);
2055                        } else if (fabric->cascaded_expander == 0xff) {
2056                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2057                                             path_active_desc[i].desc, path_state_desc[j].desc,
2058                                             fabric->ioa_port, fabric->phy);
2059                        } else if (fabric->phy == 0xff) {
2060                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2061                                             path_active_desc[i].desc, path_state_desc[j].desc,
2062                                             fabric->ioa_port, fabric->cascaded_expander);
2063                        } else {
2064                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2065                                             path_active_desc[i].desc, path_state_desc[j].desc,
2066                                             fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2067                        }
2068                        return;
2069                }
2070        }
2071
2072        ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2073                fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2074}
2075
2076/**
2077 * ipr_log64_fabric_path - Log a fabric path error
2078 * @hostrcb:    hostrcb struct
2079 * @fabric:             fabric descriptor
2080 *
2081 * Return value:
2082 *      none
2083 **/
2084static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2085                                  struct ipr_hostrcb64_fabric_desc *fabric)
2086{
2087        int i, j;
2088        u8 path_state = fabric->path_state;
2089        u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2090        u8 state = path_state & IPR_PATH_STATE_MASK;
2091        char buffer[IPR_MAX_RES_PATH_LENGTH];
2092
2093        for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2094                if (path_active_desc[i].active != active)
2095                        continue;
2096
2097                for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2098                        if (path_state_desc[j].state != state)
2099                                continue;
2100
2101                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2102                                     path_active_desc[i].desc, path_state_desc[j].desc,
2103                                     ipr_format_res_path(hostrcb->ioa_cfg,
2104                                                fabric->res_path,
2105                                                buffer, sizeof(buffer)));
2106                        return;
2107                }
2108        }
2109
2110        ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2111                ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2112                                    buffer, sizeof(buffer)));
2113}
2114
2115static const struct {
2116        u8 type;
2117        char *desc;
2118} path_type_desc[] = {
2119        { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2120        { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2121        { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2122        { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2123};
2124
2125static const struct {
2126        u8 status;
2127        char *desc;
2128} path_status_desc[] = {
2129        { IPR_PATH_CFG_NO_PROB, "Functional" },
2130        { IPR_PATH_CFG_DEGRADED, "Degraded" },
2131        { IPR_PATH_CFG_FAILED, "Failed" },
2132        { IPR_PATH_CFG_SUSPECT, "Suspect" },
2133        { IPR_PATH_NOT_DETECTED, "Missing" },
2134        { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2135};
2136
2137static const char *link_rate[] = {
2138        "unknown",
2139        "disabled",
2140        "phy reset problem",
2141        "spinup hold",
2142        "port selector",
2143        "unknown",
2144        "unknown",
2145        "unknown",
2146        "1.5Gbps",
2147        "3.0Gbps",
2148        "unknown",
2149        "unknown",
2150        "unknown",
2151        "unknown",
2152        "unknown",
2153        "unknown"
2154};
2155
2156/**
2157 * ipr_log_path_elem - Log a fabric path element.
2158 * @hostrcb:    hostrcb struct
2159 * @cfg:                fabric path element struct
2160 *
2161 * Return value:
2162 *      none
2163 **/
2164static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2165                              struct ipr_hostrcb_config_element *cfg)
2166{
2167        int i, j;
2168        u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2169        u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2170
2171        if (type == IPR_PATH_CFG_NOT_EXIST)
2172                return;
2173
2174        for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2175                if (path_type_desc[i].type != type)
2176                        continue;
2177
2178                for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2179                        if (path_status_desc[j].status != status)
2180                                continue;
2181
2182                        if (type == IPR_PATH_CFG_IOA_PORT) {
2183                                ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2184                                             path_status_desc[j].desc, path_type_desc[i].desc,
2185                                             cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2186                                             be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2187                        } else {
2188                                if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2189                                        ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2190                                                     path_status_desc[j].desc, path_type_desc[i].desc,
2191                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2192                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2193                                } else if (cfg->cascaded_expander == 0xff) {
2194                                        ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2195                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2196                                                     path_type_desc[i].desc, cfg->phy,
2197                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2198                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2199                                } else if (cfg->phy == 0xff) {
2200                                        ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2201                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2202                                                     path_type_desc[i].desc, cfg->cascaded_expander,
2203                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2204                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2205                                } else {
2206                                        ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2207                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2208                                                     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2209                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2210                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2211                                }
2212                        }
2213                        return;
2214                }
2215        }
2216
2217        ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2218                     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2219                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2220                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2221}
2222
2223/**
2224 * ipr_log64_path_elem - Log a fabric path element.
2225 * @hostrcb:    hostrcb struct
2226 * @cfg:                fabric path element struct
2227 *
2228 * Return value:
2229 *      none
2230 **/
2231static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2232                                struct ipr_hostrcb64_config_element *cfg)
2233{
2234        int i, j;
2235        u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2236        u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2237        u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2238        char buffer[IPR_MAX_RES_PATH_LENGTH];
2239
2240        if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2241                return;
2242
2243        for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2244                if (path_type_desc[i].type != type)
2245                        continue;
2246
2247                for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2248                        if (path_status_desc[j].status != status)
2249                                continue;
2250
2251                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2252                                     path_status_desc[j].desc, path_type_desc[i].desc,
2253                                     ipr_format_res_path(hostrcb->ioa_cfg,
2254                                        cfg->res_path, buffer, sizeof(buffer)),
2255                                        link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2256                                        be32_to_cpu(cfg->wwid[0]),
2257                                        be32_to_cpu(cfg->wwid[1]));
2258                        return;
2259                }
2260        }
2261        ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2262                     "WWN=%08X%08X\n", cfg->type_status,
2263                     ipr_format_res_path(hostrcb->ioa_cfg,
2264                        cfg->res_path, buffer, sizeof(buffer)),
2265                        link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2266                        be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2267}
2268
2269/**
2270 * ipr_log_fabric_error - Log a fabric error.
2271 * @ioa_cfg:    ioa config struct
2272 * @hostrcb:    hostrcb struct
2273 *
2274 * Return value:
2275 *      none
2276 **/
2277static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2278                                 struct ipr_hostrcb *hostrcb)
2279{
2280        struct ipr_hostrcb_type_20_error *error;
2281        struct ipr_hostrcb_fabric_desc *fabric;
2282        struct ipr_hostrcb_config_element *cfg;
2283        int i, add_len;
2284
2285        error = &hostrcb->hcam.u.error.u.type_20_error;
2286        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2287        ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2288
2289        add_len = be32_to_cpu(hostrcb->hcam.length) -
2290                (offsetof(struct ipr_hostrcb_error, u) +
2291                 offsetof(struct ipr_hostrcb_type_20_error, desc));
2292
2293        for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2294                ipr_log_fabric_path(hostrcb, fabric);
2295                for_each_fabric_cfg(fabric, cfg)
2296                        ipr_log_path_elem(hostrcb, cfg);
2297
2298                add_len -= be16_to_cpu(fabric->length);
2299                fabric = (struct ipr_hostrcb_fabric_desc *)
2300                        ((unsigned long)fabric + be16_to_cpu(fabric->length));
2301        }
2302
2303        ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2304}
2305
2306/**
2307 * ipr_log_sis64_array_error - Log a sis64 array error.
2308 * @ioa_cfg:    ioa config struct
2309 * @hostrcb:    hostrcb struct
2310 *
2311 * Return value:
2312 *      none
2313 **/
2314static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2315                                      struct ipr_hostrcb *hostrcb)
2316{
2317        int i, num_entries;
2318        struct ipr_hostrcb_type_24_error *error;
2319        struct ipr_hostrcb64_array_data_entry *array_entry;
2320        char buffer[IPR_MAX_RES_PATH_LENGTH];
2321        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2322
2323        error = &hostrcb->hcam.u.error64.u.type_24_error;
2324
2325        ipr_err_separator;
2326
2327        ipr_err("RAID %s Array Configuration: %s\n",
2328                error->protection_level,
2329                ipr_format_res_path(ioa_cfg, error->last_res_path,
2330                        buffer, sizeof(buffer)));
2331
2332        ipr_err_separator;
2333
2334        array_entry = error->array_member;
2335        num_entries = min_t(u32, error->num_entries,
2336                            ARRAY_SIZE(error->array_member));
2337
2338        for (i = 0; i < num_entries; i++, array_entry++) {
2339
2340                if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2341                        continue;
2342
2343                if (error->exposed_mode_adn == i)
2344                        ipr_err("Exposed Array Member %d:\n", i);
2345                else
2346                        ipr_err("Array Member %d:\n", i);
2347
2348                ipr_err("Array Member %d:\n", i);
2349                ipr_log_ext_vpd(&array_entry->vpd);
2350                ipr_err("Current Location: %s\n",
2351                         ipr_format_res_path(ioa_cfg, array_entry->res_path,
2352                                buffer, sizeof(buffer)));
2353                ipr_err("Expected Location: %s\n",
2354                         ipr_format_res_path(ioa_cfg,
2355                                array_entry->expected_res_path,
2356                                buffer, sizeof(buffer)));
2357
2358                ipr_err_separator;
2359        }
2360}
2361
2362/**
2363 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2364 * @ioa_cfg:    ioa config struct
2365 * @hostrcb:    hostrcb struct
2366 *
2367 * Return value:
2368 *      none
2369 **/
2370static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2371                                       struct ipr_hostrcb *hostrcb)
2372{
2373        struct ipr_hostrcb_type_30_error *error;
2374        struct ipr_hostrcb64_fabric_desc *fabric;
2375        struct ipr_hostrcb64_config_element *cfg;
2376        int i, add_len;
2377
2378        error = &hostrcb->hcam.u.error64.u.type_30_error;
2379
2380        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2381        ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2382
2383        add_len = be32_to_cpu(hostrcb->hcam.length) -
2384                (offsetof(struct ipr_hostrcb64_error, u) +
2385                 offsetof(struct ipr_hostrcb_type_30_error, desc));
2386
2387        for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2388                ipr_log64_fabric_path(hostrcb, fabric);
2389                for_each_fabric_cfg(fabric, cfg)
2390                        ipr_log64_path_elem(hostrcb, cfg);
2391
2392                add_len -= be16_to_cpu(fabric->length);
2393                fabric = (struct ipr_hostrcb64_fabric_desc *)
2394                        ((unsigned long)fabric + be16_to_cpu(fabric->length));
2395        }
2396
2397        ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2398}
2399
2400/**
2401 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2402 * @ioa_cfg:    ioa config struct
2403 * @hostrcb:    hostrcb struct
2404 *
2405 * Return value:
2406 *      none
2407 **/
2408static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2409                                       struct ipr_hostrcb *hostrcb)
2410{
2411        struct ipr_hostrcb_type_41_error *error;
2412
2413        error = &hostrcb->hcam.u.error64.u.type_41_error;
2414
2415        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2416        ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2417        ipr_log_hex_data(ioa_cfg, error->data,
2418                         be32_to_cpu(hostrcb->hcam.length) -
2419                         (offsetof(struct ipr_hostrcb_error, u) +
2420                          offsetof(struct ipr_hostrcb_type_41_error, data)));
2421}
2422/**
2423 * ipr_log_generic_error - Log an adapter error.
2424 * @ioa_cfg:    ioa config struct
2425 * @hostrcb:    hostrcb struct
2426 *
2427 * Return value:
2428 *      none
2429 **/
2430static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2431                                  struct ipr_hostrcb *hostrcb)
2432{
2433        ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2434                         be32_to_cpu(hostrcb->hcam.length));
2435}
2436
2437/**
2438 * ipr_log_sis64_device_error - Log a cache error.
2439 * @ioa_cfg:    ioa config struct
2440 * @hostrcb:    hostrcb struct
2441 *
2442 * Return value:
2443 *      none
2444 **/
2445static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2446                                         struct ipr_hostrcb *hostrcb)
2447{
2448        struct ipr_hostrcb_type_21_error *error;
2449        char buffer[IPR_MAX_RES_PATH_LENGTH];
2450
2451        error = &hostrcb->hcam.u.error64.u.type_21_error;
2452
2453        ipr_err("-----Failing Device Information-----\n");
2454        ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2455                be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2456                 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2457        ipr_err("Device Resource Path: %s\n",
2458                __ipr_format_res_path(error->res_path,
2459                                      buffer, sizeof(buffer)));
2460        error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2461        error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2462        ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2463        ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2464        ipr_err("SCSI Sense Data:\n");
2465        ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2466        ipr_err("SCSI Command Descriptor Block: \n");
2467        ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2468
2469        ipr_err("Additional IOA Data:\n");
2470        ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2471}
2472
2473/**
2474 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2475 * @ioasc:      IOASC
2476 *
2477 * This function will return the index of into the ipr_error_table
2478 * for the specified IOASC. If the IOASC is not in the table,
2479 * 0 will be returned, which points to the entry used for unknown errors.
2480 *
2481 * Return value:
2482 *      index into the ipr_error_table
2483 **/
2484static u32 ipr_get_error(u32 ioasc)
2485{
2486        int i;
2487
2488        for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2489                if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2490                        return i;
2491
2492        return 0;
2493}
2494
2495/**
2496 * ipr_handle_log_data - Log an adapter error.
2497 * @ioa_cfg:    ioa config struct
2498 * @hostrcb:    hostrcb struct
2499 *
2500 * This function logs an adapter error to the system.
2501 *
2502 * Return value:
2503 *      none
2504 **/
2505static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2506                                struct ipr_hostrcb *hostrcb)
2507{
2508        u32 ioasc;
2509        int error_index;
2510        struct ipr_hostrcb_type_21_error *error;
2511
2512        if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2513                return;
2514
2515        if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2516                dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2517
2518        if (ioa_cfg->sis64)
2519                ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2520        else
2521                ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2522
2523        if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2524            ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2525                /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2526                scsi_report_bus_reset(ioa_cfg->host,
2527                                      hostrcb->hcam.u.error.fd_res_addr.bus);
2528        }
2529
2530        error_index = ipr_get_error(ioasc);
2531
2532        if (!ipr_error_table[error_index].log_hcam)
2533                return;
2534
2535        if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2536            hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2537                error = &hostrcb->hcam.u.error64.u.type_21_error;
2538
2539                if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2540                        ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2541                                return;
2542        }
2543
2544        ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2545
2546        /* Set indication we have logged an error */
2547        ioa_cfg->errors_logged++;
2548
2549        if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2550                return;
2551        if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2552                hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2553
2554        switch (hostrcb->hcam.overlay_id) {
2555        case IPR_HOST_RCB_OVERLAY_ID_2:
2556                ipr_log_cache_error(ioa_cfg, hostrcb);
2557                break;
2558        case IPR_HOST_RCB_OVERLAY_ID_3:
2559                ipr_log_config_error(ioa_cfg, hostrcb);
2560                break;
2561        case IPR_HOST_RCB_OVERLAY_ID_4:
2562        case IPR_HOST_RCB_OVERLAY_ID_6:
2563                ipr_log_array_error(ioa_cfg, hostrcb);
2564                break;
2565        case IPR_HOST_RCB_OVERLAY_ID_7:
2566                ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2567                break;
2568        case IPR_HOST_RCB_OVERLAY_ID_12:
2569                ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2570                break;
2571        case IPR_HOST_RCB_OVERLAY_ID_13:
2572                ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2573                break;
2574        case IPR_HOST_RCB_OVERLAY_ID_14:
2575        case IPR_HOST_RCB_OVERLAY_ID_16:
2576                ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2577                break;
2578        case IPR_HOST_RCB_OVERLAY_ID_17:
2579                ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2580                break;
2581        case IPR_HOST_RCB_OVERLAY_ID_20:
2582                ipr_log_fabric_error(ioa_cfg, hostrcb);
2583                break;
2584        case IPR_HOST_RCB_OVERLAY_ID_21:
2585                ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2586                break;
2587        case IPR_HOST_RCB_OVERLAY_ID_23:
2588                ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2589                break;
2590        case IPR_HOST_RCB_OVERLAY_ID_24:
2591        case IPR_HOST_RCB_OVERLAY_ID_26:
2592                ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2593                break;
2594        case IPR_HOST_RCB_OVERLAY_ID_30:
2595                ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2596                break;
2597        case IPR_HOST_RCB_OVERLAY_ID_41:
2598                ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2599                break;
2600        case IPR_HOST_RCB_OVERLAY_ID_1:
2601        case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2602        default:
2603                ipr_log_generic_error(ioa_cfg, hostrcb);
2604                break;
2605        }
2606}
2607
2608static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2609{
2610        struct ipr_hostrcb *hostrcb;
2611
2612        hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2613                                        struct ipr_hostrcb, queue);
2614
2615        if (unlikely(!hostrcb)) {
2616                dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2617                hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2618                                                struct ipr_hostrcb, queue);
2619        }
2620
2621        list_del_init(&hostrcb->queue);
2622        return hostrcb;
2623}
2624
2625/**
2626 * ipr_process_error - Op done function for an adapter error log.
2627 * @ipr_cmd:    ipr command struct
2628 *
2629 * This function is the op done function for an error log host
2630 * controlled async from the adapter. It will log the error and
2631 * send the HCAM back to the adapter.
2632 *
2633 * Return value:
2634 *      none
2635 **/
2636static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2637{
2638        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2639        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2640        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2641        u32 fd_ioasc;
2642
2643        if (ioa_cfg->sis64)
2644                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2645        else
2646                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2647
2648        list_del_init(&hostrcb->queue);
2649        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2650
2651        if (!ioasc) {
2652                ipr_handle_log_data(ioa_cfg, hostrcb);
2653                if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2654                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2655        } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2656                   ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2657                dev_err(&ioa_cfg->pdev->dev,
2658                        "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2659        }
2660
2661        list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2662        schedule_work(&ioa_cfg->work_q);
2663        hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2664
2665        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2666}
2667
2668/**
2669 * ipr_timeout -  An internally generated op has timed out.
2670 * @ipr_cmd:    ipr command struct
2671 *
2672 * This function blocks host requests and initiates an
2673 * adapter reset.
2674 *
2675 * Return value:
2676 *      none
2677 **/
2678static void ipr_timeout(struct timer_list *t)
2679{
2680        struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2681        unsigned long lock_flags = 0;
2682        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2683
2684        ENTER;
2685        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2686
2687        ioa_cfg->errors_logged++;
2688        dev_err(&ioa_cfg->pdev->dev,
2689                "Adapter being reset due to command timeout.\n");
2690
2691        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2692                ioa_cfg->sdt_state = GET_DUMP;
2693
2694        if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2695                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2696
2697        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2698        LEAVE;
2699}
2700
2701/**
2702 * ipr_oper_timeout -  Adapter timed out transitioning to operational
2703 * @ipr_cmd:    ipr command struct
2704 *
2705 * This function blocks host requests and initiates an
2706 * adapter reset.
2707 *
2708 * Return value:
2709 *      none
2710 **/
2711static void ipr_oper_timeout(struct timer_list *t)
2712{
2713        struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2714        unsigned long lock_flags = 0;
2715        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2716
2717        ENTER;
2718        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2719
2720        ioa_cfg->errors_logged++;
2721        dev_err(&ioa_cfg->pdev->dev,
2722                "Adapter timed out transitioning to operational.\n");
2723
2724        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2725                ioa_cfg->sdt_state = GET_DUMP;
2726
2727        if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2728                if (ipr_fastfail)
2729                        ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2730                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2731        }
2732
2733        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2734        LEAVE;
2735}
2736
2737/**
2738 * ipr_find_ses_entry - Find matching SES in SES table
2739 * @res:        resource entry struct of SES
2740 *
2741 * Return value:
2742 *      pointer to SES table entry / NULL on failure
2743 **/
2744static const struct ipr_ses_table_entry *
2745ipr_find_ses_entry(struct ipr_resource_entry *res)
2746{
2747        int i, j, matches;
2748        struct ipr_std_inq_vpids *vpids;
2749        const struct ipr_ses_table_entry *ste = ipr_ses_table;
2750
2751        for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2752                for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2753                        if (ste->compare_product_id_byte[j] == 'X') {
2754                                vpids = &res->std_inq_data.vpids;
2755                                if (vpids->product_id[j] == ste->product_id[j])
2756                                        matches++;
2757                                else
2758                                        break;
2759                        } else
2760                                matches++;
2761                }
2762
2763                if (matches == IPR_PROD_ID_LEN)
2764                        return ste;
2765        }
2766
2767        return NULL;
2768}
2769
2770/**
2771 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2772 * @ioa_cfg:    ioa config struct
2773 * @bus:                SCSI bus
2774 * @bus_width:  bus width
2775 *
2776 * Return value:
2777 *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2778 *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2779 *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2780 *      max 160MHz = max 320MB/sec).
2781 **/
2782static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2783{
2784        struct ipr_resource_entry *res;
2785        const struct ipr_ses_table_entry *ste;
2786        u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2787
2788        /* Loop through each config table entry in the config table buffer */
2789        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2790                if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2791                        continue;
2792
2793                if (bus != res->bus)
2794                        continue;
2795
2796                if (!(ste = ipr_find_ses_entry(res)))
2797                        continue;
2798
2799                max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2800        }
2801
2802        return max_xfer_rate;
2803}
2804
2805/**
2806 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2807 * @ioa_cfg:            ioa config struct
2808 * @max_delay:          max delay in micro-seconds to wait
2809 *
2810 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2811 *
2812 * Return value:
2813 *      0 on success / other on failure
2814 **/
2815static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2816{
2817        volatile u32 pcii_reg;
2818        int delay = 1;
2819
2820        /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2821        while (delay < max_delay) {
2822                pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2823
2824                if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2825                        return 0;
2826
2827                /* udelay cannot be used if delay is more than a few milliseconds */
2828                if ((delay / 1000) > MAX_UDELAY_MS)
2829                        mdelay(delay / 1000);
2830                else
2831                        udelay(delay);
2832
2833                delay += delay;
2834        }
2835        return -EIO;
2836}
2837
2838/**
2839 * ipr_get_sis64_dump_data_section - Dump IOA memory
2840 * @ioa_cfg:                    ioa config struct
2841 * @start_addr:                 adapter address to dump
2842 * @dest:                       destination kernel buffer
2843 * @length_in_words:            length to dump in 4 byte words
2844 *
2845 * Return value:
2846 *      0 on success
2847 **/
2848static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2849                                           u32 start_addr,
2850                                           __be32 *dest, u32 length_in_words)
2851{
2852        int i;
2853
2854        for (i = 0; i < length_in_words; i++) {
2855                writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2856                *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2857                dest++;
2858        }
2859
2860        return 0;
2861}
2862
2863/**
2864 * ipr_get_ldump_data_section - Dump IOA memory
2865 * @ioa_cfg:                    ioa config struct
2866 * @start_addr:                 adapter address to dump
2867 * @dest:                               destination kernel buffer
2868 * @length_in_words:    length to dump in 4 byte words
2869 *
2870 * Return value:
2871 *      0 on success / -EIO on failure
2872 **/
2873static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2874                                      u32 start_addr,
2875                                      __be32 *dest, u32 length_in_words)
2876{
2877        volatile u32 temp_pcii_reg;
2878        int i, delay = 0;
2879
2880        if (ioa_cfg->sis64)
2881                return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2882                                                       dest, length_in_words);
2883
2884        /* Write IOA interrupt reg starting LDUMP state  */
2885        writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2886               ioa_cfg->regs.set_uproc_interrupt_reg32);
2887
2888        /* Wait for IO debug acknowledge */
2889        if (ipr_wait_iodbg_ack(ioa_cfg,
2890                               IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2891                dev_err(&ioa_cfg->pdev->dev,
2892                        "IOA dump long data transfer timeout\n");
2893                return -EIO;
2894        }
2895
2896        /* Signal LDUMP interlocked - clear IO debug ack */
2897        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2898               ioa_cfg->regs.clr_interrupt_reg);
2899
2900        /* Write Mailbox with starting address */
2901        writel(start_addr, ioa_cfg->ioa_mailbox);
2902
2903        /* Signal address valid - clear IOA Reset alert */
2904        writel(IPR_UPROCI_RESET_ALERT,
2905               ioa_cfg->regs.clr_uproc_interrupt_reg32);
2906
2907        for (i = 0; i < length_in_words; i++) {
2908                /* Wait for IO debug acknowledge */
2909                if (ipr_wait_iodbg_ack(ioa_cfg,
2910                                       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2911                        dev_err(&ioa_cfg->pdev->dev,
2912                                "IOA dump short data transfer timeout\n");
2913                        return -EIO;
2914                }
2915
2916                /* Read data from mailbox and increment destination pointer */
2917                *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2918                dest++;
2919
2920                /* For all but the last word of data, signal data received */
2921                if (i < (length_in_words - 1)) {
2922                        /* Signal dump data received - Clear IO debug Ack */
2923                        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2924                               ioa_cfg->regs.clr_interrupt_reg);
2925                }
2926        }
2927
2928        /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2929        writel(IPR_UPROCI_RESET_ALERT,
2930               ioa_cfg->regs.set_uproc_interrupt_reg32);
2931
2932        writel(IPR_UPROCI_IO_DEBUG_ALERT,
2933               ioa_cfg->regs.clr_uproc_interrupt_reg32);
2934
2935        /* Signal dump data received - Clear IO debug Ack */
2936        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2937               ioa_cfg->regs.clr_interrupt_reg);
2938
2939        /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2940        while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2941                temp_pcii_reg =
2942                    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2943
2944                if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2945                        return 0;
2946
2947                udelay(10);
2948                delay += 10;
2949        }
2950
2951        return 0;
2952}
2953
2954#ifdef CONFIG_SCSI_IPR_DUMP
2955/**
2956 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2957 * @ioa_cfg:            ioa config struct
2958 * @pci_address:        adapter address
2959 * @length:                     length of data to copy
2960 *
2961 * Copy data from PCI adapter to kernel buffer.
2962 * Note: length MUST be a 4 byte multiple
2963 * Return value:
2964 *      0 on success / other on failure
2965 **/
2966static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2967                        unsigned long pci_address, u32 length)
2968{
2969        int bytes_copied = 0;
2970        int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2971        __be32 *page;
2972        unsigned long lock_flags = 0;
2973        struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2974
2975        if (ioa_cfg->sis64)
2976                max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2977        else
2978                max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2979
2980        while (bytes_copied < length &&
2981               (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2982                if (ioa_dump->page_offset >= PAGE_SIZE ||
2983                    ioa_dump->page_offset == 0) {
2984                        page = (__be32 *)__get_free_page(GFP_ATOMIC);
2985
2986                        if (!page) {
2987                                ipr_trace;
2988                                return bytes_copied;
2989                        }
2990
2991                        ioa_dump->page_offset = 0;
2992                        ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2993                        ioa_dump->next_page_index++;
2994                } else
2995                        page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2996
2997                rem_len = length - bytes_copied;
2998                rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2999                cur_len = min(rem_len, rem_page_len);
3000
3001                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3002                if (ioa_cfg->sdt_state == ABORT_DUMP) {
3003                        rc = -EIO;
3004                } else {
3005                        rc = ipr_get_ldump_data_section(ioa_cfg,
3006                                                        pci_address + bytes_copied,
3007                                                        &page[ioa_dump->page_offset / 4],
3008                                                        (cur_len / sizeof(u32)));
3009                }
3010                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3011
3012                if (!rc) {
3013                        ioa_dump->page_offset += cur_len;
3014                        bytes_copied += cur_len;
3015                } else {
3016                        ipr_trace;
3017                        break;
3018                }
3019                schedule();
3020        }
3021
3022        return bytes_copied;
3023}
3024
3025/**
3026 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3027 * @hdr:        dump entry header struct
3028 *
3029 * Return value:
3030 *      nothing
3031 **/
3032static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3033{
3034        hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3035        hdr->num_elems = 1;
3036        hdr->offset = sizeof(*hdr);
3037        hdr->status = IPR_DUMP_STATUS_SUCCESS;
3038}
3039
3040/**
3041 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3042 * @ioa_cfg:    ioa config struct
3043 * @driver_dump:        driver dump struct
3044 *
3045 * Return value:
3046 *      nothing
3047 **/
3048static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3049                                   struct ipr_driver_dump *driver_dump)
3050{
3051        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3052
3053        ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3054        driver_dump->ioa_type_entry.hdr.len =
3055                sizeof(struct ipr_dump_ioa_type_entry) -
3056                sizeof(struct ipr_dump_entry_header);
3057        driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3058        driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3059        driver_dump->ioa_type_entry.type = ioa_cfg->type;
3060        driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3061                (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3062                ucode_vpd->minor_release[1];
3063        driver_dump->hdr.num_entries++;
3064}
3065
3066/**
3067 * ipr_dump_version_data - Fill in the driver version in the dump.
3068 * @ioa_cfg:    ioa config struct
3069 * @driver_dump:        driver dump struct
3070 *
3071 * Return value:
3072 *      nothing
3073 **/
3074static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3075                                  struct ipr_driver_dump *driver_dump)
3076{
3077        ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3078        driver_dump->version_entry.hdr.len =
3079                sizeof(struct ipr_dump_version_entry) -
3080                sizeof(struct ipr_dump_entry_header);
3081        driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3082        driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3083        strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3084        driver_dump->hdr.num_entries++;
3085}
3086
3087/**
3088 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3089 * @ioa_cfg:    ioa config struct
3090 * @driver_dump:        driver dump struct
3091 *
3092 * Return value:
3093 *      nothing
3094 **/
3095static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3096                                   struct ipr_driver_dump *driver_dump)
3097{
3098        ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3099        driver_dump->trace_entry.hdr.len =
3100                sizeof(struct ipr_dump_trace_entry) -
3101                sizeof(struct ipr_dump_entry_header);
3102        driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3103        driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3104        memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3105        driver_dump->hdr.num_entries++;
3106}
3107
3108/**
3109 * ipr_dump_location_data - Fill in the IOA location in the dump.
3110 * @ioa_cfg:    ioa config struct
3111 * @driver_dump:        driver dump struct
3112 *
3113 * Return value:
3114 *      nothing
3115 **/
3116static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3117                                   struct ipr_driver_dump *driver_dump)
3118{
3119        ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3120        driver_dump->location_entry.hdr.len =
3121                sizeof(struct ipr_dump_location_entry) -
3122                sizeof(struct ipr_dump_entry_header);
3123        driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3124        driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3125        strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3126        driver_dump->hdr.num_entries++;
3127}
3128
3129/**
3130 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3131 * @ioa_cfg:    ioa config struct
3132 * @dump:               dump struct
3133 *
3134 * Return value:
3135 *      nothing
3136 **/
3137static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3138{
3139        unsigned long start_addr, sdt_word;
3140        unsigned long lock_flags = 0;
3141        struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3142        struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3143        u32 num_entries, max_num_entries, start_off, end_off;
3144        u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3145        struct ipr_sdt *sdt;
3146        int valid = 1;
3147        int i;
3148
3149        ENTER;
3150
3151        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3152
3153        if (ioa_cfg->sdt_state != READ_DUMP) {
3154                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3155                return;
3156        }
3157
3158        if (ioa_cfg->sis64) {
3159                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160                ssleep(IPR_DUMP_DELAY_SECONDS);
3161                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3162        }
3163
3164        start_addr = readl(ioa_cfg->ioa_mailbox);
3165
3166        if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3167                dev_err(&ioa_cfg->pdev->dev,
3168                        "Invalid dump table format: %lx\n", start_addr);
3169                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3170                return;
3171        }
3172
3173        dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3174
3175        driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3176
3177        /* Initialize the overall dump header */
3178        driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3179        driver_dump->hdr.num_entries = 1;
3180        driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3181        driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3182        driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3183        driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3184
3185        ipr_dump_version_data(ioa_cfg, driver_dump);
3186        ipr_dump_location_data(ioa_cfg, driver_dump);
3187        ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3188        ipr_dump_trace_data(ioa_cfg, driver_dump);
3189
3190        /* Update dump_header */
3191        driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3192
3193        /* IOA Dump entry */
3194        ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3195        ioa_dump->hdr.len = 0;
3196        ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3197        ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3198
3199        /* First entries in sdt are actually a list of dump addresses and
3200         lengths to gather the real dump data.  sdt represents the pointer
3201         to the ioa generated dump table.  Dump data will be extracted based
3202         on entries in this table */
3203        sdt = &ioa_dump->sdt;
3204
3205        if (ioa_cfg->sis64) {
3206                max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3207                max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3208        } else {
3209                max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3210                max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3211        }
3212
3213        bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3214                        (max_num_entries * sizeof(struct ipr_sdt_entry));
3215        rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3216                                        bytes_to_copy / sizeof(__be32));
3217
3218        /* Smart Dump table is ready to use and the first entry is valid */
3219        if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3220            (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3221                dev_err(&ioa_cfg->pdev->dev,
3222                        "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3223                        rc, be32_to_cpu(sdt->hdr.state));
3224                driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3225                ioa_cfg->sdt_state = DUMP_OBTAINED;
3226                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227                return;
3228        }
3229
3230        num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3231
3232        if (num_entries > max_num_entries)
3233                num_entries = max_num_entries;
3234
3235        /* Update dump length to the actual data to be copied */
3236        dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3237        if (ioa_cfg->sis64)
3238                dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3239        else
3240                dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3241
3242        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3243
3244        for (i = 0; i < num_entries; i++) {
3245                if (ioa_dump->hdr.len > max_dump_size) {
3246                        driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3247                        break;
3248                }
3249
3250                if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3251                        sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3252                        if (ioa_cfg->sis64)
3253                                bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3254                        else {
3255                                start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3256                                end_off = be32_to_cpu(sdt->entry[i].end_token);
3257
3258                                if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3259                                        bytes_to_copy = end_off - start_off;
3260                                else
3261                                        valid = 0;
3262                        }
3263                        if (valid) {
3264                                if (bytes_to_copy > max_dump_size) {
3265                                        sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3266                                        continue;
3267                                }
3268
3269                                /* Copy data from adapter to driver buffers */
3270                                bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3271                                                            bytes_to_copy);
3272
3273                                ioa_dump->hdr.len += bytes_copied;
3274
3275                                if (bytes_copied != bytes_to_copy) {
3276                                        driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3277                                        break;
3278                                }
3279                        }
3280                }
3281        }
3282
3283        dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3284
3285        /* Update dump_header */
3286        driver_dump->hdr.len += ioa_dump->hdr.len;
3287        wmb();
3288        ioa_cfg->sdt_state = DUMP_OBTAINED;
3289        LEAVE;
3290}
3291
3292#else
3293#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3294#endif
3295
3296/**
3297 * ipr_release_dump - Free adapter dump memory
3298 * @kref:       kref struct
3299 *
3300 * Return value:
3301 *      nothing
3302 **/
3303static void ipr_release_dump(struct kref *kref)
3304{
3305        struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3306        struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3307        unsigned long lock_flags = 0;
3308        int i;
3309
3310        ENTER;
3311        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3312        ioa_cfg->dump = NULL;
3313        ioa_cfg->sdt_state = INACTIVE;
3314        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315
3316        for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3317                free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3318
3319        vfree(dump->ioa_dump.ioa_data);
3320        kfree(dump);
3321        LEAVE;
3322}
3323
3324static void ipr_add_remove_thread(struct work_struct *work)
3325{
3326        unsigned long lock_flags;
3327        struct ipr_resource_entry *res;
3328        struct scsi_device *sdev;
3329        struct ipr_ioa_cfg *ioa_cfg =
3330                container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3331        u8 bus, target, lun;
3332        int did_work;
3333
3334        ENTER;
3335        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336
3337restart:
3338        do {
3339                did_work = 0;
3340                if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3341                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3342                        return;
3343                }
3344
3345                list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3346                        if (res->del_from_ml && res->sdev) {
3347                                did_work = 1;
3348                                sdev = res->sdev;
3349                                if (!scsi_device_get(sdev)) {
3350                                        if (!res->add_to_ml)
3351                                                list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3352                                        else
3353                                                res->del_from_ml = 0;
3354                                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3355                                        scsi_remove_device(sdev);
3356                                        scsi_device_put(sdev);
3357                                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3358                                }
3359                                break;
3360                        }
3361                }
3362        } while (did_work);
3363
3364        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3365                if (res->add_to_ml) {
3366                        bus = res->bus;
3367                        target = res->target;
3368                        lun = res->lun;
3369                        res->add_to_ml = 0;
3370                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3371                        scsi_add_device(ioa_cfg->host, bus, target, lun);
3372                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3373                        goto restart;
3374                }
3375        }
3376
3377        ioa_cfg->scan_done = 1;
3378        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3379        kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3380        LEAVE;
3381}
3382
3383/**
3384 * ipr_worker_thread - Worker thread
3385 * @work:               ioa config struct
3386 *
3387 * Called at task level from a work thread. This function takes care
3388 * of adding and removing device from the mid-layer as configuration
3389 * changes are detected by the adapter.
3390 *
3391 * Return value:
3392 *      nothing
3393 **/
3394static void ipr_worker_thread(struct work_struct *work)
3395{
3396        unsigned long lock_flags;
3397        struct ipr_dump *dump;
3398        struct ipr_ioa_cfg *ioa_cfg =
3399                container_of(work, struct ipr_ioa_cfg, work_q);
3400
3401        ENTER;
3402        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3403
3404        if (ioa_cfg->sdt_state == READ_DUMP) {
3405                dump = ioa_cfg->dump;
3406                if (!dump) {
3407                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3408                        return;
3409                }
3410                kref_get(&dump->kref);
3411                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3412                ipr_get_ioa_dump(ioa_cfg, dump);
3413                kref_put(&dump->kref, ipr_release_dump);
3414
3415                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3416                if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3417                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3418                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3419                return;
3420        }
3421
3422        if (ioa_cfg->scsi_unblock) {
3423                ioa_cfg->scsi_unblock = 0;
3424                ioa_cfg->scsi_blocked = 0;
3425                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3426                scsi_unblock_requests(ioa_cfg->host);
3427                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3428                if (ioa_cfg->scsi_blocked)
3429                        scsi_block_requests(ioa_cfg->host);
3430        }
3431
3432        if (!ioa_cfg->scan_enabled) {
3433                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3434                return;
3435        }
3436
3437        schedule_work(&ioa_cfg->scsi_add_work_q);
3438
3439        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440        LEAVE;
3441}
3442
3443#ifdef CONFIG_SCSI_IPR_TRACE
3444/**
3445 * ipr_read_trace - Dump the adapter trace
3446 * @filp:               open sysfs file
3447 * @kobj:               kobject struct
3448 * @bin_attr:           bin_attribute struct
3449 * @buf:                buffer
3450 * @off:                offset
3451 * @count:              buffer size
3452 *
3453 * Return value:
3454 *      number of bytes printed to buffer
3455 **/
3456static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3457                              struct bin_attribute *bin_attr,
3458                              char *buf, loff_t off, size_t count)
3459{
3460        struct device *dev = container_of(kobj, struct device, kobj);
3461        struct Scsi_Host *shost = class_to_shost(dev);
3462        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463        unsigned long lock_flags = 0;
3464        ssize_t ret;
3465
3466        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3467        ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3468                                IPR_TRACE_SIZE);
3469        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3470
3471        return ret;
3472}
3473
3474static struct bin_attribute ipr_trace_attr = {
3475        .attr = {
3476                .name = "trace",
3477                .mode = S_IRUGO,
3478        },
3479        .size = 0,
3480        .read = ipr_read_trace,
3481};
3482#endif
3483
3484/**
3485 * ipr_show_fw_version - Show the firmware version
3486 * @dev:        class device struct
3487 * @buf:        buffer
3488 *
3489 * Return value:
3490 *      number of bytes printed to buffer
3491 **/
3492static ssize_t ipr_show_fw_version(struct device *dev,
3493                                   struct device_attribute *attr, char *buf)
3494{
3495        struct Scsi_Host *shost = class_to_shost(dev);
3496        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3497        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3498        unsigned long lock_flags = 0;
3499        int len;
3500
3501        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3502        len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3503                       ucode_vpd->major_release, ucode_vpd->card_type,
3504                       ucode_vpd->minor_release[0],
3505                       ucode_vpd->minor_release[1]);
3506        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3507        return len;
3508}
3509
3510static struct device_attribute ipr_fw_version_attr = {
3511        .attr = {
3512                .name =         "fw_version",
3513                .mode =         S_IRUGO,
3514        },
3515        .show = ipr_show_fw_version,
3516};
3517
3518/**
3519 * ipr_show_log_level - Show the adapter's error logging level
3520 * @dev:        class device struct
3521 * @buf:        buffer
3522 *
3523 * Return value:
3524 *      number of bytes printed to buffer
3525 **/
3526static ssize_t ipr_show_log_level(struct device *dev,
3527                                   struct device_attribute *attr, char *buf)
3528{
3529        struct Scsi_Host *shost = class_to_shost(dev);
3530        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3531        unsigned long lock_flags = 0;
3532        int len;
3533
3534        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3535        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3536        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3537        return len;
3538}
3539
3540/**
3541 * ipr_store_log_level - Change the adapter's error logging level
3542 * @dev:        class device struct
3543 * @buf:        buffer
3544 *
3545 * Return value:
3546 *      number of bytes printed to buffer
3547 **/
3548static ssize_t ipr_store_log_level(struct device *dev,
3549                                   struct device_attribute *attr,
3550                                   const char *buf, size_t count)
3551{
3552        struct Scsi_Host *shost = class_to_shost(dev);
3553        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3554        unsigned long lock_flags = 0;
3555
3556        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557        ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3558        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3559        return strlen(buf);
3560}
3561
3562static struct device_attribute ipr_log_level_attr = {
3563        .attr = {
3564                .name =         "log_level",
3565                .mode =         S_IRUGO | S_IWUSR,
3566        },
3567        .show = ipr_show_log_level,
3568        .store = ipr_store_log_level
3569};
3570
3571/**
3572 * ipr_store_diagnostics - IOA Diagnostics interface
3573 * @dev:        device struct
3574 * @buf:        buffer
3575 * @count:      buffer size
3576 *
3577 * This function will reset the adapter and wait a reasonable
3578 * amount of time for any errors that the adapter might log.
3579 *
3580 * Return value:
3581 *      count on success / other on failure
3582 **/
3583static ssize_t ipr_store_diagnostics(struct device *dev,
3584                                     struct device_attribute *attr,
3585                                     const char *buf, size_t count)
3586{
3587        struct Scsi_Host *shost = class_to_shost(dev);
3588        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3589        unsigned long lock_flags = 0;
3590        int rc = count;
3591
3592        if (!capable(CAP_SYS_ADMIN))
3593                return -EACCES;
3594
3595        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3596        while (ioa_cfg->in_reset_reload) {
3597                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3598                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3599                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3600        }
3601
3602        ioa_cfg->errors_logged = 0;
3603        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3604
3605        if (ioa_cfg->in_reset_reload) {
3606                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3607                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3608
3609                /* Wait for a second for any errors to be logged */
3610                msleep(1000);
3611        } else {
3612                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3613                return -EIO;
3614        }
3615
3616        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3617        if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3618                rc = -EIO;
3619        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3620
3621        return rc;
3622}
3623
3624static struct device_attribute ipr_diagnostics_attr = {
3625        .attr = {
3626                .name =         "run_diagnostics",
3627                .mode =         S_IWUSR,
3628        },
3629        .store = ipr_store_diagnostics
3630};
3631
3632/**
3633 * ipr_show_adapter_state - Show the adapter's state
3634 * @class_dev:  device struct
3635 * @buf:        buffer
3636 *
3637 * Return value:
3638 *      number of bytes printed to buffer
3639 **/
3640static ssize_t ipr_show_adapter_state(struct device *dev,
3641                                      struct device_attribute *attr, char *buf)
3642{
3643        struct Scsi_Host *shost = class_to_shost(dev);
3644        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3645        unsigned long lock_flags = 0;
3646        int len;
3647
3648        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3649        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3650                len = snprintf(buf, PAGE_SIZE, "offline\n");
3651        else
3652                len = snprintf(buf, PAGE_SIZE, "online\n");
3653        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3654        return len;
3655}
3656
3657/**
3658 * ipr_store_adapter_state - Change adapter state
3659 * @dev:        device struct
3660 * @buf:        buffer
3661 * @count:      buffer size
3662 *
3663 * This function will change the adapter's state.
3664 *
3665 * Return value:
3666 *      count on success / other on failure
3667 **/
3668static ssize_t ipr_store_adapter_state(struct device *dev,
3669                                       struct device_attribute *attr,
3670                                       const char *buf, size_t count)
3671{
3672        struct Scsi_Host *shost = class_to_shost(dev);
3673        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3674        unsigned long lock_flags;
3675        int result = count, i;
3676
3677        if (!capable(CAP_SYS_ADMIN))
3678                return -EACCES;
3679
3680        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3681        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3682            !strncmp(buf, "online", 6)) {
3683                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3684                        spin_lock(&ioa_cfg->hrrq[i]._lock);
3685                        ioa_cfg->hrrq[i].ioa_is_dead = 0;
3686                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
3687                }
3688                wmb();
3689                ioa_cfg->reset_retries = 0;
3690                ioa_cfg->in_ioa_bringdown = 0;
3691                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3692        }
3693        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3694        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3695
3696        return result;
3697}
3698
3699static struct device_attribute ipr_ioa_state_attr = {
3700        .attr = {
3701                .name =         "online_state",
3702                .mode =         S_IRUGO | S_IWUSR,
3703        },
3704        .show = ipr_show_adapter_state,
3705        .store = ipr_store_adapter_state
3706};
3707
3708/**
3709 * ipr_store_reset_adapter - Reset the adapter
3710 * @dev:        device struct
3711 * @buf:        buffer
3712 * @count:      buffer size
3713 *
3714 * This function will reset the adapter.
3715 *
3716 * Return value:
3717 *      count on success / other on failure
3718 **/
3719static ssize_t ipr_store_reset_adapter(struct device *dev,
3720                                       struct device_attribute *attr,
3721                                       const char *buf, size_t count)
3722{
3723        struct Scsi_Host *shost = class_to_shost(dev);
3724        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3725        unsigned long lock_flags;
3726        int result = count;
3727
3728        if (!capable(CAP_SYS_ADMIN))
3729                return -EACCES;
3730
3731        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3732        if (!ioa_cfg->in_reset_reload)
3733                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3734        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3735        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3736
3737        return result;
3738}
3739
3740static struct device_attribute ipr_ioa_reset_attr = {
3741        .attr = {
3742                .name =         "reset_host",
3743                .mode =         S_IWUSR,
3744        },
3745        .store = ipr_store_reset_adapter
3746};
3747
3748static int ipr_iopoll(struct irq_poll *iop, int budget);
3749 /**
3750 * ipr_show_iopoll_weight - Show ipr polling mode
3751 * @dev:        class device struct
3752 * @buf:        buffer
3753 *
3754 * Return value:
3755 *      number of bytes printed to buffer
3756 **/
3757static ssize_t ipr_show_iopoll_weight(struct device *dev,
3758                                   struct device_attribute *attr, char *buf)
3759{
3760        struct Scsi_Host *shost = class_to_shost(dev);
3761        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3762        unsigned long lock_flags = 0;
3763        int len;
3764
3765        spin_lock_irqsave(shost->host_lock, lock_flags);
3766        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3767        spin_unlock_irqrestore(shost->host_lock, lock_flags);
3768
3769        return len;
3770}
3771
3772/**
3773 * ipr_store_iopoll_weight - Change the adapter's polling mode
3774 * @dev:        class device struct
3775 * @buf:        buffer
3776 *
3777 * Return value:
3778 *      number of bytes printed to buffer
3779 **/
3780static ssize_t ipr_store_iopoll_weight(struct device *dev,
3781                                        struct device_attribute *attr,
3782                                        const char *buf, size_t count)
3783{
3784        struct Scsi_Host *shost = class_to_shost(dev);
3785        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3786        unsigned long user_iopoll_weight;
3787        unsigned long lock_flags = 0;
3788        int i;
3789
3790        if (!ioa_cfg->sis64) {
3791                dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3792                return -EINVAL;
3793        }
3794        if (kstrtoul(buf, 10, &user_iopoll_weight))
3795                return -EINVAL;
3796
3797        if (user_iopoll_weight > 256) {
3798                dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3799                return -EINVAL;
3800        }
3801
3802        if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3803                dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3804                return strlen(buf);
3805        }
3806
3807        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3808                for (i = 1; i < ioa_cfg->hrrq_num; i++)
3809                        irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3810        }
3811
3812        spin_lock_irqsave(shost->host_lock, lock_flags);
3813        ioa_cfg->iopoll_weight = user_iopoll_weight;
3814        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3815                for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3816                        irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3817                                        ioa_cfg->iopoll_weight, ipr_iopoll);
3818                }
3819        }
3820        spin_unlock_irqrestore(shost->host_lock, lock_flags);
3821
3822        return strlen(buf);
3823}
3824
3825static struct device_attribute ipr_iopoll_weight_attr = {
3826        .attr = {
3827                .name =         "iopoll_weight",
3828                .mode =         S_IRUGO | S_IWUSR,
3829        },
3830        .show = ipr_show_iopoll_weight,
3831        .store = ipr_store_iopoll_weight
3832};
3833
3834/**
3835 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3836 * @buf_len:            buffer length
3837 *
3838 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3839 * list to use for microcode download
3840 *
3841 * Return value:
3842 *      pointer to sglist / NULL on failure
3843 **/
3844static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3845{
3846        int sg_size, order;
3847        struct ipr_sglist *sglist;
3848
3849        /* Get the minimum size per scatter/gather element */
3850        sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3851
3852        /* Get the actual size per element */
3853        order = get_order(sg_size);
3854
3855        /* Allocate a scatter/gather list for the DMA */
3856        sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3857        if (sglist == NULL) {
3858                ipr_trace;
3859                return NULL;
3860        }
3861        sglist->order = order;
3862        sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3863                                              &sglist->num_sg);
3864        if (!sglist->scatterlist) {
3865                kfree(sglist);
3866                return NULL;
3867        }
3868
3869        return sglist;
3870}
3871
3872/**
3873 * ipr_free_ucode_buffer - Frees a microcode download buffer
3874 * @p_dnld:             scatter/gather list pointer
3875 *
3876 * Free a DMA'able ucode download buffer previously allocated with
3877 * ipr_alloc_ucode_buffer
3878 *
3879 * Return value:
3880 *      nothing
3881 **/
3882static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3883{
3884        sgl_free_order(sglist->scatterlist, sglist->order);
3885        kfree(sglist);
3886}
3887
3888/**
3889 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3890 * @sglist:             scatter/gather list pointer
3891 * @buffer:             buffer pointer
3892 * @len:                buffer length
3893 *
3894 * Copy a microcode image from a user buffer into a buffer allocated by
3895 * ipr_alloc_ucode_buffer
3896 *
3897 * Return value:
3898 *      0 on success / other on failure
3899 **/
3900static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3901                                 u8 *buffer, u32 len)
3902{
3903        int bsize_elem, i, result = 0;
3904        struct scatterlist *sg;
3905        void *kaddr;
3906
3907        /* Determine the actual number of bytes per element */
3908        bsize_elem = PAGE_SIZE * (1 << sglist->order);
3909
3910        sg = sglist->scatterlist;
3911
3912        for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3913                        buffer += bsize_elem) {
3914                struct page *page = sg_page(sg);
3915
3916                kaddr = kmap(page);
3917                memcpy(kaddr, buffer, bsize_elem);
3918                kunmap(page);
3919
3920                sg->length = bsize_elem;
3921
3922                if (result != 0) {
3923                        ipr_trace;
3924                        return result;
3925                }
3926        }
3927
3928        if (len % bsize_elem) {
3929                struct page *page = sg_page(sg);
3930
3931                kaddr = kmap(page);
3932                memcpy(kaddr, buffer, len % bsize_elem);
3933                kunmap(page);
3934
3935                sg->length = len % bsize_elem;
3936        }
3937
3938        sglist->buffer_len = len;
3939        return result;
3940}
3941
3942/**
3943 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3944 * @ipr_cmd:            ipr command struct
3945 * @sglist:             scatter/gather list
3946 *
3947 * Builds a microcode download IOA data list (IOADL).
3948 *
3949 **/
3950static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3951                                    struct ipr_sglist *sglist)
3952{
3953        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3954        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3955        struct scatterlist *scatterlist = sglist->scatterlist;
3956        struct scatterlist *sg;
3957        int i;
3958
3959        ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3960        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3961        ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3962
3963        ioarcb->ioadl_len =
3964                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3965        for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3966                ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3967                ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3968                ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3969        }
3970
3971        ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3972}
3973
3974/**
3975 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3976 * @ipr_cmd:    ipr command struct
3977 * @sglist:             scatter/gather list
3978 *
3979 * Builds a microcode download IOA data list (IOADL).
3980 *
3981 **/
3982static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3983                                  struct ipr_sglist *sglist)
3984{
3985        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3986        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3987        struct scatterlist *scatterlist = sglist->scatterlist;
3988        struct scatterlist *sg;
3989        int i;
3990
3991        ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3992        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3993        ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3994
3995        ioarcb->ioadl_len =
3996                cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3997
3998        for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3999                ioadl[i].flags_and_data_len =
4000                        cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
4001                ioadl[i].address =
4002                        cpu_to_be32(sg_dma_address(sg));
4003        }
4004
4005        ioadl[i-1].flags_and_data_len |=
4006                cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4007}
4008
4009/**
4010 * ipr_update_ioa_ucode - Update IOA's microcode
4011 * @ioa_cfg:    ioa config struct
4012 * @sglist:             scatter/gather list
4013 *
4014 * Initiate an adapter reset to update the IOA's microcode
4015 *
4016 * Return value:
4017 *      0 on success / -EIO on failure
4018 **/
4019static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4020                                struct ipr_sglist *sglist)
4021{
4022        unsigned long lock_flags;
4023
4024        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4025        while (ioa_cfg->in_reset_reload) {
4026                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4027                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4028                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4029        }
4030
4031        if (ioa_cfg->ucode_sglist) {
4032                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4033                dev_err(&ioa_cfg->pdev->dev,
4034                        "Microcode download already in progress\n");
4035                return -EIO;
4036        }
4037
4038        sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4039                                        sglist->scatterlist, sglist->num_sg,
4040                                        DMA_TO_DEVICE);
4041
4042        if (!sglist->num_dma_sg) {
4043                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044                dev_err(&ioa_cfg->pdev->dev,
4045                        "Failed to map microcode download buffer!\n");
4046                return -EIO;
4047        }
4048
4049        ioa_cfg->ucode_sglist = sglist;
4050        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4051        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4052        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4053
4054        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4055        ioa_cfg->ucode_sglist = NULL;
4056        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4057        return 0;
4058}
4059
4060/**
4061 * ipr_store_update_fw - Update the firmware on the adapter
4062 * @class_dev:  device struct
4063 * @buf:        buffer
4064 * @count:      buffer size
4065 *
4066 * This function will update the firmware on the adapter.
4067 *
4068 * Return value:
4069 *      count on success / other on failure
4070 **/
4071static ssize_t ipr_store_update_fw(struct device *dev,
4072                                   struct device_attribute *attr,
4073                                   const char *buf, size_t count)
4074{
4075        struct Scsi_Host *shost = class_to_shost(dev);
4076        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4077        struct ipr_ucode_image_header *image_hdr;
4078        const struct firmware *fw_entry;
4079        struct ipr_sglist *sglist;
4080        char fname[100];
4081        char *src;
4082        char *endline;
4083        int result, dnld_size;
4084
4085        if (!capable(CAP_SYS_ADMIN))
4086                return -EACCES;
4087
4088        snprintf(fname, sizeof(fname), "%s", buf);
4089
4090        endline = strchr(fname, '\n');
4091        if (endline)
4092                *endline = '\0';
4093
4094        if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4095                dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4096                return -EIO;
4097        }
4098
4099        image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4100
4101        src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4102        dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4103        sglist = ipr_alloc_ucode_buffer(dnld_size);
4104
4105        if (!sglist) {
4106                dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4107                release_firmware(fw_entry);
4108                return -ENOMEM;
4109        }
4110
4111        result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4112
4113        if (result) {
4114                dev_err(&ioa_cfg->pdev->dev,
4115                        "Microcode buffer copy to DMA buffer failed\n");
4116                goto out;
4117        }
4118
4119        ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4120
4121        result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4122
4123        if (!result)
4124                result = count;
4125out:
4126        ipr_free_ucode_buffer(sglist);
4127        release_firmware(fw_entry);
4128        return result;
4129}
4130
4131static struct device_attribute ipr_update_fw_attr = {
4132        .attr = {
4133                .name =         "update_fw",
4134                .mode =         S_IWUSR,
4135        },
4136        .store = ipr_store_update_fw
4137};
4138
4139/**
4140 * ipr_show_fw_type - Show the adapter's firmware type.
4141 * @dev:        class device struct
4142 * @buf:        buffer
4143 *
4144 * Return value:
4145 *      number of bytes printed to buffer
4146 **/
4147static ssize_t ipr_show_fw_type(struct device *dev,
4148                                struct device_attribute *attr, char *buf)
4149{
4150        struct Scsi_Host *shost = class_to_shost(dev);
4151        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4152        unsigned long lock_flags = 0;
4153        int len;
4154
4155        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4156        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4157        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4158        return len;
4159}
4160
4161static struct device_attribute ipr_ioa_fw_type_attr = {
4162        .attr = {
4163                .name =         "fw_type",
4164                .mode =         S_IRUGO,
4165        },
4166        .show = ipr_show_fw_type
4167};
4168
4169static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4170                                struct bin_attribute *bin_attr, char *buf,
4171                                loff_t off, size_t count)
4172{
4173        struct device *cdev = container_of(kobj, struct device, kobj);
4174        struct Scsi_Host *shost = class_to_shost(cdev);
4175        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4176        struct ipr_hostrcb *hostrcb;
4177        unsigned long lock_flags = 0;
4178        int ret;
4179
4180        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4181        hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4182                                        struct ipr_hostrcb, queue);
4183        if (!hostrcb) {
4184                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4185                return 0;
4186        }
4187        ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4188                                sizeof(hostrcb->hcam));
4189        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4190        return ret;
4191}
4192
4193static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4194                                struct bin_attribute *bin_attr, char *buf,
4195                                loff_t off, size_t count)
4196{
4197        struct device *cdev = container_of(kobj, struct device, kobj);
4198        struct Scsi_Host *shost = class_to_shost(cdev);
4199        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4200        struct ipr_hostrcb *hostrcb;
4201        unsigned long lock_flags = 0;
4202
4203        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4204        hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4205                                        struct ipr_hostrcb, queue);
4206        if (!hostrcb) {
4207                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4208                return count;
4209        }
4210
4211        /* Reclaim hostrcb before exit */
4212        list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4213        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4214        return count;
4215}
4216
4217static struct bin_attribute ipr_ioa_async_err_log = {
4218        .attr = {
4219                .name =         "async_err_log",
4220                .mode =         S_IRUGO | S_IWUSR,
4221        },
4222        .size = 0,
4223        .read = ipr_read_async_err_log,
4224        .write = ipr_next_async_err_log
4225};
4226
4227static struct device_attribute *ipr_ioa_attrs[] = {
4228        &ipr_fw_version_attr,
4229        &ipr_log_level_attr,
4230        &ipr_diagnostics_attr,
4231        &ipr_ioa_state_attr,
4232        &ipr_ioa_reset_attr,
4233        &ipr_update_fw_attr,
4234        &ipr_ioa_fw_type_attr,
4235        &ipr_iopoll_weight_attr,
4236        NULL,
4237};
4238
4239#ifdef CONFIG_SCSI_IPR_DUMP
4240/**
4241 * ipr_read_dump - Dump the adapter
4242 * @filp:               open sysfs file
4243 * @kobj:               kobject struct
4244 * @bin_attr:           bin_attribute struct
4245 * @buf:                buffer
4246 * @off:                offset
4247 * @count:              buffer size
4248 *
4249 * Return value:
4250 *      number of bytes printed to buffer
4251 **/
4252static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4253                             struct bin_attribute *bin_attr,
4254                             char *buf, loff_t off, size_t count)
4255{
4256        struct device *cdev = container_of(kobj, struct device, kobj);
4257        struct Scsi_Host *shost = class_to_shost(cdev);
4258        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4259        struct ipr_dump *dump;
4260        unsigned long lock_flags = 0;
4261        char *src;
4262        int len, sdt_end;
4263        size_t rc = count;
4264
4265        if (!capable(CAP_SYS_ADMIN))
4266                return -EACCES;
4267
4268        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4269        dump = ioa_cfg->dump;
4270
4271        if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4272                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4273                return 0;
4274        }
4275        kref_get(&dump->kref);
4276        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4277
4278        if (off > dump->driver_dump.hdr.len) {
4279                kref_put(&dump->kref, ipr_release_dump);
4280                return 0;
4281        }
4282
4283        if (off + count > dump->driver_dump.hdr.len) {
4284                count = dump->driver_dump.hdr.len - off;
4285                rc = count;
4286        }
4287
4288        if (count && off < sizeof(dump->driver_dump)) {
4289                if (off + count > sizeof(dump->driver_dump))
4290                        len = sizeof(dump->driver_dump) - off;
4291                else
4292                        len = count;
4293                src = (u8 *)&dump->driver_dump + off;
4294                memcpy(buf, src, len);
4295                buf += len;
4296                off += len;
4297                count -= len;
4298        }
4299
4300        off -= sizeof(dump->driver_dump);
4301
4302        if (ioa_cfg->sis64)
4303                sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4304                          (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4305                           sizeof(struct ipr_sdt_entry));
4306        else
4307                sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4308                          (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4309
4310        if (count && off < sdt_end) {
4311                if (off + count > sdt_end)
4312                        len = sdt_end - off;
4313                else
4314                        len = count;
4315                src = (u8 *)&dump->ioa_dump + off;
4316                memcpy(buf, src, len);
4317                buf += len;
4318                off += len;
4319                count -= len;
4320        }
4321
4322        off -= sdt_end;
4323
4324        while (count) {
4325                if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4326                        len = PAGE_ALIGN(off) - off;
4327                else
4328                        len = count;
4329                src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4330                src += off & ~PAGE_MASK;
4331                memcpy(buf, src, len);
4332                buf += len;
4333                off += len;
4334                count -= len;
4335        }
4336
4337        kref_put(&dump->kref, ipr_release_dump);
4338        return rc;
4339}
4340
4341/**
4342 * ipr_alloc_dump - Prepare for adapter dump
4343 * @ioa_cfg:    ioa config struct
4344 *
4345 * Return value:
4346 *      0 on success / other on failure
4347 **/
4348static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4349{
4350        struct ipr_dump *dump;
4351        __be32 **ioa_data;
4352        unsigned long lock_flags = 0;
4353
4354        dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4355
4356        if (!dump) {
4357                ipr_err("Dump memory allocation failed\n");
4358                return -ENOMEM;
4359        }
4360
4361        if (ioa_cfg->sis64)
4362                ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4363                                              sizeof(__be32 *)));
4364        else
4365                ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4366                                              sizeof(__be32 *)));
4367
4368        if (!ioa_data) {
4369                ipr_err("Dump memory allocation failed\n");
4370                kfree(dump);
4371                return -ENOMEM;
4372        }
4373
4374        dump->ioa_dump.ioa_data = ioa_data;
4375
4376        kref_init(&dump->kref);
4377        dump->ioa_cfg = ioa_cfg;
4378
4379        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4380
4381        if (INACTIVE != ioa_cfg->sdt_state) {
4382                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4383                vfree(dump->ioa_dump.ioa_data);
4384                kfree(dump);
4385                return 0;
4386        }
4387
4388        ioa_cfg->dump = dump;
4389        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4390        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4391                ioa_cfg->dump_taken = 1;
4392                schedule_work(&ioa_cfg->work_q);
4393        }
4394        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4395
4396        return 0;
4397}
4398
4399/**
4400 * ipr_free_dump - Free adapter dump memory
4401 * @ioa_cfg:    ioa config struct
4402 *
4403 * Return value:
4404 *      0 on success / other on failure
4405 **/
4406static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4407{
4408        struct ipr_dump *dump;
4409        unsigned long lock_flags = 0;
4410
4411        ENTER;
4412
4413        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4414        dump = ioa_cfg->dump;
4415        if (!dump) {
4416                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4417                return 0;
4418        }
4419
4420        ioa_cfg->dump = NULL;
4421        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4422
4423        kref_put(&dump->kref, ipr_release_dump);
4424
4425        LEAVE;
4426        return 0;
4427}
4428
4429/**
4430 * ipr_write_dump - Setup dump state of adapter
4431 * @filp:               open sysfs file
4432 * @kobj:               kobject struct
4433 * @bin_attr:           bin_attribute struct
4434 * @buf:                buffer
4435 * @off:                offset
4436 * @count:              buffer size
4437 *
4438 * Return value:
4439 *      number of bytes printed to buffer
4440 **/
4441static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4442                              struct bin_attribute *bin_attr,
4443                              char *buf, loff_t off, size_t count)
4444{
4445        struct device *cdev = container_of(kobj, struct device, kobj);
4446        struct Scsi_Host *shost = class_to_shost(cdev);
4447        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4448        int rc;
4449
4450        if (!capable(CAP_SYS_ADMIN))
4451                return -EACCES;
4452
4453        if (buf[0] == '1')
4454                rc = ipr_alloc_dump(ioa_cfg);
4455        else if (buf[0] == '0')
4456                rc = ipr_free_dump(ioa_cfg);
4457        else
4458                return -EINVAL;
4459
4460        if (rc)
4461                return rc;
4462        else
4463                return count;
4464}
4465
4466static struct bin_attribute ipr_dump_attr = {
4467        .attr = {
4468                .name = "dump",
4469                .mode = S_IRUSR | S_IWUSR,
4470        },
4471        .size = 0,
4472        .read = ipr_read_dump,
4473        .write = ipr_write_dump
4474};
4475#else
4476static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4477#endif
4478
4479/**
4480 * ipr_change_queue_depth - Change the device's queue depth
4481 * @sdev:       scsi device struct
4482 * @qdepth:     depth to set
4483 * @reason:     calling context
4484 *
4485 * Return value:
4486 *      actual depth set
4487 **/
4488static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4489{
4490        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4491        struct ipr_resource_entry *res;
4492        unsigned long lock_flags = 0;
4493
4494        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4495        res = (struct ipr_resource_entry *)sdev->hostdata;
4496
4497        if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4498                qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4499        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4500
4501        scsi_change_queue_depth(sdev, qdepth);
4502        return sdev->queue_depth;
4503}
4504
4505/**
4506 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4507 * @dev:        device struct
4508 * @attr:       device attribute structure
4509 * @buf:        buffer
4510 *
4511 * Return value:
4512 *      number of bytes printed to buffer
4513 **/
4514static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4515{
4516        struct scsi_device *sdev = to_scsi_device(dev);
4517        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4518        struct ipr_resource_entry *res;
4519        unsigned long lock_flags = 0;
4520        ssize_t len = -ENXIO;
4521
4522        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4523        res = (struct ipr_resource_entry *)sdev->hostdata;
4524        if (res)
4525                len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4526        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4527        return len;
4528}
4529
4530static struct device_attribute ipr_adapter_handle_attr = {
4531        .attr = {
4532                .name =         "adapter_handle",
4533                .mode =         S_IRUSR,
4534        },
4535        .show = ipr_show_adapter_handle
4536};
4537
4538/**
4539 * ipr_show_resource_path - Show the resource path or the resource address for
4540 *                          this device.
4541 * @dev:        device struct
4542 * @attr:       device attribute structure
4543 * @buf:        buffer
4544 *
4545 * Return value:
4546 *      number of bytes printed to buffer
4547 **/
4548static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4549{
4550        struct scsi_device *sdev = to_scsi_device(dev);
4551        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4552        struct ipr_resource_entry *res;
4553        unsigned long lock_flags = 0;
4554        ssize_t len = -ENXIO;
4555        char buffer[IPR_MAX_RES_PATH_LENGTH];
4556
4557        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4558        res = (struct ipr_resource_entry *)sdev->hostdata;
4559        if (res && ioa_cfg->sis64)
4560                len = snprintf(buf, PAGE_SIZE, "%s\n",
4561                               __ipr_format_res_path(res->res_path, buffer,
4562                                                     sizeof(buffer)));
4563        else if (res)
4564                len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4565                               res->bus, res->target, res->lun);
4566
4567        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4568        return len;
4569}
4570
4571static struct device_attribute ipr_resource_path_attr = {
4572        .attr = {
4573                .name =         "resource_path",
4574                .mode =         S_IRUGO,
4575        },
4576        .show = ipr_show_resource_path
4577};
4578
4579/**
4580 * ipr_show_device_id - Show the device_id for this device.
4581 * @dev:        device struct
4582 * @attr:       device attribute structure
4583 * @buf:        buffer
4584 *
4585 * Return value:
4586 *      number of bytes printed to buffer
4587 **/
4588static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4589{
4590        struct scsi_device *sdev = to_scsi_device(dev);
4591        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4592        struct ipr_resource_entry *res;
4593        unsigned long lock_flags = 0;
4594        ssize_t len = -ENXIO;
4595
4596        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4597        res = (struct ipr_resource_entry *)sdev->hostdata;
4598        if (res && ioa_cfg->sis64)
4599                len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4600        else if (res)
4601                len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4602
4603        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4604        return len;
4605}
4606
4607static struct device_attribute ipr_device_id_attr = {
4608        .attr = {
4609                .name =         "device_id",
4610                .mode =         S_IRUGO,
4611        },
4612        .show = ipr_show_device_id
4613};
4614
4615/**
4616 * ipr_show_resource_type - Show the resource type for this device.
4617 * @dev:        device struct
4618 * @attr:       device attribute structure
4619 * @buf:        buffer
4620 *
4621 * Return value:
4622 *      number of bytes printed to buffer
4623 **/
4624static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4625{
4626        struct scsi_device *sdev = to_scsi_device(dev);
4627        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4628        struct ipr_resource_entry *res;
4629        unsigned long lock_flags = 0;
4630        ssize_t len = -ENXIO;
4631
4632        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4633        res = (struct ipr_resource_entry *)sdev->hostdata;
4634
4635        if (res)
4636                len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4637
4638        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4639        return len;
4640}
4641
4642static struct device_attribute ipr_resource_type_attr = {
4643        .attr = {
4644                .name =         "resource_type",
4645                .mode =         S_IRUGO,
4646        },
4647        .show = ipr_show_resource_type
4648};
4649
4650/**
4651 * ipr_show_raw_mode - Show the adapter's raw mode
4652 * @dev:        class device struct
4653 * @buf:        buffer
4654 *
4655 * Return value:
4656 *      number of bytes printed to buffer
4657 **/
4658static ssize_t ipr_show_raw_mode(struct device *dev,
4659                                 struct device_attribute *attr, char *buf)
4660{
4661        struct scsi_device *sdev = to_scsi_device(dev);
4662        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4663        struct ipr_resource_entry *res;
4664        unsigned long lock_flags = 0;
4665        ssize_t len;
4666
4667        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4668        res = (struct ipr_resource_entry *)sdev->hostdata;
4669        if (res)
4670                len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4671        else
4672                len = -ENXIO;
4673        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4674        return len;
4675}
4676
4677/**
4678 * ipr_store_raw_mode - Change the adapter's raw mode
4679 * @dev:        class device struct
4680 * @buf:        buffer
4681 *
4682 * Return value:
4683 *      number of bytes printed to buffer
4684 **/
4685static ssize_t ipr_store_raw_mode(struct device *dev,
4686                                  struct device_attribute *attr,
4687                                  const char *buf, size_t count)
4688{
4689        struct scsi_device *sdev = to_scsi_device(dev);
4690        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4691        struct ipr_resource_entry *res;
4692        unsigned long lock_flags = 0;
4693        ssize_t len;
4694
4695        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4696        res = (struct ipr_resource_entry *)sdev->hostdata;
4697        if (res) {
4698                if (ipr_is_af_dasd_device(res)) {
4699                        res->raw_mode = simple_strtoul(buf, NULL, 10);
4700                        len = strlen(buf);
4701                        if (res->sdev)
4702                                sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4703                                        res->raw_mode ? "enabled" : "disabled");
4704                } else
4705                        len = -EINVAL;
4706        } else
4707                len = -ENXIO;
4708        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4709        return len;
4710}
4711
4712static struct device_attribute ipr_raw_mode_attr = {
4713        .attr = {
4714                .name =         "raw_mode",
4715                .mode =         S_IRUGO | S_IWUSR,
4716        },
4717        .show = ipr_show_raw_mode,
4718        .store = ipr_store_raw_mode
4719};
4720
4721static struct device_attribute *ipr_dev_attrs[] = {
4722        &ipr_adapter_handle_attr,
4723        &ipr_resource_path_attr,
4724        &ipr_device_id_attr,
4725        &ipr_resource_type_attr,
4726        &ipr_raw_mode_attr,
4727        NULL,
4728};
4729
4730/**
4731 * ipr_biosparam - Return the HSC mapping
4732 * @sdev:                       scsi device struct
4733 * @block_device:       block device pointer
4734 * @capacity:           capacity of the device
4735 * @parm:                       Array containing returned HSC values.
4736 *
4737 * This function generates the HSC parms that fdisk uses.
4738 * We want to make sure we return something that places partitions
4739 * on 4k boundaries for best performance with the IOA.
4740 *
4741 * Return value:
4742 *      0 on success
4743 **/
4744static int ipr_biosparam(struct scsi_device *sdev,
4745                         struct block_device *block_device,
4746                         sector_t capacity, int *parm)
4747{
4748        int heads, sectors;
4749        sector_t cylinders;
4750
4751        heads = 128;
4752        sectors = 32;
4753
4754        cylinders = capacity;
4755        sector_div(cylinders, (128 * 32));
4756
4757        /* return result */
4758        parm[0] = heads;
4759        parm[1] = sectors;
4760        parm[2] = cylinders;
4761
4762        return 0;
4763}
4764
4765/**
4766 * ipr_find_starget - Find target based on bus/target.
4767 * @starget:    scsi target struct
4768 *
4769 * Return value:
4770 *      resource entry pointer if found / NULL if not found
4771 **/
4772static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4773{
4774        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4775        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4776        struct ipr_resource_entry *res;
4777
4778        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4779                if ((res->bus == starget->channel) &&
4780                    (res->target == starget->id)) {
4781                        return res;
4782                }
4783        }
4784
4785        return NULL;
4786}
4787
4788static struct ata_port_info sata_port_info;
4789
4790/**
4791 * ipr_target_alloc - Prepare for commands to a SCSI target
4792 * @starget:    scsi target struct
4793 *
4794 * If the device is a SATA device, this function allocates an
4795 * ATA port with libata, else it does nothing.
4796 *
4797 * Return value:
4798 *      0 on success / non-0 on failure
4799 **/
4800static int ipr_target_alloc(struct scsi_target *starget)
4801{
4802        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4803        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4804        struct ipr_sata_port *sata_port;
4805        struct ata_port *ap;
4806        struct ipr_resource_entry *res;
4807        unsigned long lock_flags;
4808
4809        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4810        res = ipr_find_starget(starget);
4811        starget->hostdata = NULL;
4812
4813        if (res && ipr_is_gata(res)) {
4814                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4815                sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4816                if (!sata_port)
4817                        return -ENOMEM;
4818
4819                ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4820                if (ap) {
4821                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4822                        sata_port->ioa_cfg = ioa_cfg;
4823                        sata_port->ap = ap;
4824                        sata_port->res = res;
4825
4826                        res->sata_port = sata_port;
4827                        ap->private_data = sata_port;
4828                        starget->hostdata = sata_port;
4829                } else {
4830                        kfree(sata_port);
4831                        return -ENOMEM;
4832                }
4833        }
4834        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4835
4836        return 0;
4837}
4838
4839/**
4840 * ipr_target_destroy - Destroy a SCSI target
4841 * @starget:    scsi target struct
4842 *
4843 * If the device was a SATA device, this function frees the libata
4844 * ATA port, else it does nothing.
4845 *
4846 **/
4847static void ipr_target_destroy(struct scsi_target *starget)
4848{
4849        struct ipr_sata_port *sata_port = starget->hostdata;
4850        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4851        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4852
4853        if (ioa_cfg->sis64) {
4854                if (!ipr_find_starget(starget)) {
4855                        if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4856                                clear_bit(starget->id, ioa_cfg->array_ids);
4857                        else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4858                                clear_bit(starget->id, ioa_cfg->vset_ids);
4859                        else if (starget->channel == 0)
4860                                clear_bit(starget->id, ioa_cfg->target_ids);
4861                }
4862        }
4863
4864        if (sata_port) {
4865                starget->hostdata = NULL;
4866                ata_sas_port_destroy(sata_port->ap);
4867                kfree(sata_port);
4868        }
4869}
4870
4871/**
4872 * ipr_find_sdev - Find device based on bus/target/lun.
4873 * @sdev:       scsi device struct
4874 *
4875 * Return value:
4876 *      resource entry pointer if found / NULL if not found
4877 **/
4878static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4879{
4880        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4881        struct ipr_resource_entry *res;
4882
4883        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4884                if ((res->bus == sdev->channel) &&
4885                    (res->target == sdev->id) &&
4886                    (res->lun == sdev->lun))
4887                        return res;
4888        }
4889
4890        return NULL;
4891}
4892
4893/**
4894 * ipr_slave_destroy - Unconfigure a SCSI device
4895 * @sdev:       scsi device struct
4896 *
4897 * Return value:
4898 *      nothing
4899 **/
4900static void ipr_slave_destroy(struct scsi_device *sdev)
4901{
4902        struct ipr_resource_entry *res;
4903        struct ipr_ioa_cfg *ioa_cfg;
4904        unsigned long lock_flags = 0;
4905
4906        ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4907
4908        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4909        res = (struct ipr_resource_entry *) sdev->hostdata;
4910        if (res) {
4911                if (res->sata_port)
4912                        res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4913                sdev->hostdata = NULL;
4914                res->sdev = NULL;
4915                res->sata_port = NULL;
4916        }
4917        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918}
4919
4920/**
4921 * ipr_slave_configure - Configure a SCSI device
4922 * @sdev:       scsi device struct
4923 *
4924 * This function configures the specified scsi device.
4925 *
4926 * Return value:
4927 *      0 on success
4928 **/
4929static int ipr_slave_configure(struct scsi_device *sdev)
4930{
4931        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4932        struct ipr_resource_entry *res;
4933        struct ata_port *ap = NULL;
4934        unsigned long lock_flags = 0;
4935        char buffer[IPR_MAX_RES_PATH_LENGTH];
4936
4937        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4938        res = sdev->hostdata;
4939        if (res) {
4940                if (ipr_is_af_dasd_device(res))
4941                        sdev->type = TYPE_RAID;
4942                if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4943                        sdev->scsi_level = 4;
4944                        sdev->no_uld_attach = 1;
4945                }
4946                if (ipr_is_vset_device(res)) {
4947                        sdev->scsi_level = SCSI_SPC_3;
4948                        sdev->no_report_opcodes = 1;
4949                        blk_queue_rq_timeout(sdev->request_queue,
4950                                             IPR_VSET_RW_TIMEOUT);
4951                        blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4952                }
4953                if (ipr_is_gata(res) && res->sata_port)
4954                        ap = res->sata_port->ap;
4955                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4956
4957                if (ap) {
4958                        scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4959                        ata_sas_slave_configure(sdev, ap);
4960                }
4961
4962                if (ioa_cfg->sis64)
4963                        sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4964                                    ipr_format_res_path(ioa_cfg,
4965                                res->res_path, buffer, sizeof(buffer)));
4966                return 0;
4967        }
4968        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4969        return 0;
4970}
4971
4972/**
4973 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4974 * @sdev:       scsi device struct
4975 *
4976 * This function initializes an ATA port so that future commands
4977 * sent through queuecommand will work.
4978 *
4979 * Return value:
4980 *      0 on success
4981 **/
4982static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4983{
4984        struct ipr_sata_port *sata_port = NULL;
4985        int rc = -ENXIO;
4986
4987        ENTER;
4988        if (sdev->sdev_target)
4989                sata_port = sdev->sdev_target->hostdata;
4990        if (sata_port) {
4991                rc = ata_sas_port_init(sata_port->ap);
4992                if (rc == 0)
4993                        rc = ata_sas_sync_probe(sata_port->ap);
4994        }
4995
4996        if (rc)
4997                ipr_slave_destroy(sdev);
4998
4999        LEAVE;
5000        return rc;
5001}
5002
5003/**
5004 * ipr_slave_alloc - Prepare for commands to a device.
5005 * @sdev:       scsi device struct
5006 *
5007 * This function saves a pointer to the resource entry
5008 * in the scsi device struct if the device exists. We
5009 * can then use this pointer in ipr_queuecommand when
5010 * handling new commands.
5011 *
5012 * Return value:
5013 *      0 on success / -ENXIO if device does not exist
5014 **/
5015static int ipr_slave_alloc(struct scsi_device *sdev)
5016{
5017        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5018        struct ipr_resource_entry *res;
5019        unsigned long lock_flags;
5020        int rc = -ENXIO;
5021
5022        sdev->hostdata = NULL;
5023
5024        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5025
5026        res = ipr_find_sdev(sdev);
5027        if (res) {
5028                res->sdev = sdev;
5029                res->add_to_ml = 0;
5030                res->in_erp = 0;
5031                sdev->hostdata = res;
5032                if (!ipr_is_naca_model(res))
5033                        res->needs_sync_complete = 1;
5034                rc = 0;
5035                if (ipr_is_gata(res)) {
5036                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5037                        return ipr_ata_slave_alloc(sdev);
5038                }
5039        }
5040
5041        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5042
5043        return rc;
5044}
5045
5046/**
5047 * ipr_match_lun - Match function for specified LUN
5048 * @ipr_cmd:    ipr command struct
5049 * @device:             device to match (sdev)
5050 *
5051 * Returns:
5052 *      1 if command matches sdev / 0 if command does not match sdev
5053 **/
5054static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5055{
5056        if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5057                return 1;
5058        return 0;
5059}
5060
5061/**
5062 * ipr_cmnd_is_free - Check if a command is free or not
5063 * @ipr_cmd     ipr command struct
5064 *
5065 * Returns:
5066 *      true / false
5067 **/
5068static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5069{
5070        struct ipr_cmnd *loop_cmd;
5071
5072        list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5073                if (loop_cmd == ipr_cmd)
5074                        return true;
5075        }
5076
5077        return false;
5078}
5079
5080/**
5081 * ipr_match_res - Match function for specified resource entry
5082 * @ipr_cmd:    ipr command struct
5083 * @resource:   resource entry to match
5084 *
5085 * Returns:
5086 *      1 if command matches sdev / 0 if command does not match sdev
5087 **/
5088static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5089{
5090        struct ipr_resource_entry *res = resource;
5091
5092        if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5093                return 1;
5094        return 0;
5095}
5096
5097/**
5098 * ipr_wait_for_ops - Wait for matching commands to complete
5099 * @ipr_cmd:    ipr command struct
5100 * @device:             device to match (sdev)
5101 * @match:              match function to use
5102 *
5103 * Returns:
5104 *      SUCCESS / FAILED
5105 **/
5106static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5107                            int (*match)(struct ipr_cmnd *, void *))
5108{
5109        struct ipr_cmnd *ipr_cmd;
5110        int wait, i;
5111        unsigned long flags;
5112        struct ipr_hrr_queue *hrrq;
5113        signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5114        DECLARE_COMPLETION_ONSTACK(comp);
5115
5116        ENTER;
5117        do {
5118                wait = 0;
5119
5120                for_each_hrrq(hrrq, ioa_cfg) {
5121                        spin_lock_irqsave(hrrq->lock, flags);
5122                        for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5123                                ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5124                                if (!ipr_cmnd_is_free(ipr_cmd)) {
5125                                        if (match(ipr_cmd, device)) {
5126                                                ipr_cmd->eh_comp = &comp;
5127                                                wait++;
5128                                        }
5129                                }
5130                        }
5131                        spin_unlock_irqrestore(hrrq->lock, flags);
5132                }
5133
5134                if (wait) {
5135                        timeout = wait_for_completion_timeout(&comp, timeout);
5136
5137                        if (!timeout) {
5138                                wait = 0;
5139
5140                                for_each_hrrq(hrrq, ioa_cfg) {
5141                                        spin_lock_irqsave(hrrq->lock, flags);
5142                                        for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5143                                                ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5144                                                if (!ipr_cmnd_is_free(ipr_cmd)) {
5145                                                        if (match(ipr_cmd, device)) {
5146                                                                ipr_cmd->eh_comp = NULL;
5147                                                                wait++;
5148                                                        }
5149                                                }
5150                                        }
5151                                        spin_unlock_irqrestore(hrrq->lock, flags);
5152                                }
5153
5154                                if (wait)
5155                                        dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5156                                LEAVE;
5157                                return wait ? FAILED : SUCCESS;
5158                        }
5159                }
5160        } while (wait);
5161
5162        LEAVE;
5163        return SUCCESS;
5164}
5165
5166static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5167{
5168        struct ipr_ioa_cfg *ioa_cfg;
5169        unsigned long lock_flags = 0;
5170        int rc = SUCCESS;
5171
5172        ENTER;
5173        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5174        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5175
5176        if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5177                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5178                dev_err(&ioa_cfg->pdev->dev,
5179                        "Adapter being reset as a result of error recovery.\n");
5180
5181                if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5182                        ioa_cfg->sdt_state = GET_DUMP;
5183        }
5184
5185        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5186        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5187        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5188
5189        /* If we got hit with a host reset while we were already resetting
5190         the adapter for some reason, and the reset failed. */
5191        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5192                ipr_trace;
5193                rc = FAILED;
5194        }
5195
5196        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5197        LEAVE;
5198        return rc;
5199}
5200
5201/**
5202 * ipr_device_reset - Reset the device
5203 * @ioa_cfg:    ioa config struct
5204 * @res:                resource entry struct
5205 *
5206 * This function issues a device reset to the affected device.
5207 * If the device is a SCSI device, a LUN reset will be sent
5208 * to the device first. If that does not work, a target reset
5209 * will be sent. If the device is a SATA device, a PHY reset will
5210 * be sent.
5211 *
5212 * Return value:
5213 *      0 on success / non-zero on failure
5214 **/
5215static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5216                            struct ipr_resource_entry *res)
5217{
5218        struct ipr_cmnd *ipr_cmd;
5219        struct ipr_ioarcb *ioarcb;
5220        struct ipr_cmd_pkt *cmd_pkt;
5221        struct ipr_ioarcb_ata_regs *regs;
5222        u32 ioasc;
5223
5224        ENTER;
5225        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5226        ioarcb = &ipr_cmd->ioarcb;
5227        cmd_pkt = &ioarcb->cmd_pkt;
5228
5229        if (ipr_cmd->ioa_cfg->sis64) {
5230                regs = &ipr_cmd->i.ata_ioadl.regs;
5231                ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5232        } else
5233                regs = &ioarcb->u.add_data.u.regs;
5234
5235        ioarcb->res_handle = res->res_handle;
5236        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5237        cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5238        if (ipr_is_gata(res)) {
5239                cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5240                ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5241                regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5242        }
5243
5244        ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5245        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5246        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5247        if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5248                if (ipr_cmd->ioa_cfg->sis64)
5249                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5250                               sizeof(struct ipr_ioasa_gata));
5251                else
5252                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5253                               sizeof(struct ipr_ioasa_gata));
5254        }
5255
5256        LEAVE;
5257        return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5258}
5259
5260/**
5261 * ipr_sata_reset - Reset the SATA port
5262 * @link:       SATA link to reset
5263 * @classes:    class of the attached device
5264 *
5265 * This function issues a SATA phy reset to the affected ATA link.
5266 *
5267 * Return value:
5268 *      0 on success / non-zero on failure
5269 **/
5270static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5271                                unsigned long deadline)
5272{
5273        struct ipr_sata_port *sata_port = link->ap->private_data;
5274        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5275        struct ipr_resource_entry *res;
5276        unsigned long lock_flags = 0;
5277        int rc = -ENXIO, ret;
5278
5279        ENTER;
5280        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5281        while (ioa_cfg->in_reset_reload) {
5282                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5283                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5284                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5285        }
5286
5287        res = sata_port->res;
5288        if (res) {
5289                rc = ipr_device_reset(ioa_cfg, res);
5290                *classes = res->ata_class;
5291                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5292
5293                ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5294                if (ret != SUCCESS) {
5295                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5296                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5297                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5298
5299                        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5300                }
5301        } else
5302                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5303
5304        LEAVE;
5305        return rc;
5306}
5307
5308/**
5309 * ipr_eh_dev_reset - Reset the device
5310 * @scsi_cmd:   scsi command struct
5311 *
5312 * This function issues a device reset to the affected device.
5313 * A LUN reset will be sent to the device first. If that does
5314 * not work, a target reset will be sent.
5315 *
5316 * Return value:
5317 *      SUCCESS / FAILED
5318 **/
5319static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5320{
5321        struct ipr_cmnd *ipr_cmd;
5322        struct ipr_ioa_cfg *ioa_cfg;
5323        struct ipr_resource_entry *res;
5324        struct ata_port *ap;
5325        int rc = 0, i;
5326        struct ipr_hrr_queue *hrrq;
5327
5328        ENTER;
5329        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5330        res = scsi_cmd->device->hostdata;
5331
5332        /*
5333         * If we are currently going through reset/reload, return failed. This will force the
5334         * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5335         * reset to complete
5336         */
5337        if (ioa_cfg->in_reset_reload)
5338                return FAILED;
5339        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5340                return FAILED;
5341
5342        for_each_hrrq(hrrq, ioa_cfg) {
5343                spin_lock(&hrrq->_lock);
5344                for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5345                        ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5346
5347                        if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5348                                if (!ipr_cmd->qc)
5349                                        continue;
5350                                if (ipr_cmnd_is_free(ipr_cmd))
5351                                        continue;
5352
5353                                ipr_cmd->done = ipr_sata_eh_done;
5354                                if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5355                                        ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5356                                        ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5357                                }
5358                        }
5359                }
5360                spin_unlock(&hrrq->_lock);
5361        }
5362        res->resetting_device = 1;
5363        scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5364
5365        if (ipr_is_gata(res) && res->sata_port) {
5366                ap = res->sata_port->ap;
5367                spin_unlock_irq(scsi_cmd->device->host->host_lock);
5368                ata_std_error_handler(ap);
5369                spin_lock_irq(scsi_cmd->device->host->host_lock);
5370        } else
5371                rc = ipr_device_reset(ioa_cfg, res);
5372        res->resetting_device = 0;
5373        res->reset_occurred = 1;
5374
5375        LEAVE;
5376        return rc ? FAILED : SUCCESS;
5377}
5378
5379static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5380{
5381        int rc;
5382        struct ipr_ioa_cfg *ioa_cfg;
5383        struct ipr_resource_entry *res;
5384
5385        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5386        res = cmd->device->hostdata;
5387
5388        if (!res)
5389                return FAILED;
5390
5391        spin_lock_irq(cmd->device->host->host_lock);
5392        rc = __ipr_eh_dev_reset(cmd);
5393        spin_unlock_irq(cmd->device->host->host_lock);
5394
5395        if (rc == SUCCESS) {
5396                if (ipr_is_gata(res) && res->sata_port)
5397                        rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5398                else
5399                        rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5400        }
5401
5402        return rc;
5403}
5404
5405/**
5406 * ipr_bus_reset_done - Op done function for bus reset.
5407 * @ipr_cmd:    ipr command struct
5408 *
5409 * This function is the op done function for a bus reset
5410 *
5411 * Return value:
5412 *      none
5413 **/
5414static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5415{
5416        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5417        struct ipr_resource_entry *res;
5418
5419        ENTER;
5420        if (!ioa_cfg->sis64)
5421                list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5422                        if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5423                                scsi_report_bus_reset(ioa_cfg->host, res->bus);
5424                                break;
5425                        }
5426                }
5427
5428        /*
5429         * If abort has not completed, indicate the reset has, else call the
5430         * abort's done function to wake the sleeping eh thread
5431         */
5432        if (ipr_cmd->sibling->sibling)
5433                ipr_cmd->sibling->sibling = NULL;
5434        else
5435                ipr_cmd->sibling->done(ipr_cmd->sibling);
5436
5437        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5438        LEAVE;
5439}
5440
5441/**
5442 * ipr_abort_timeout - An abort task has timed out
5443 * @ipr_cmd:    ipr command struct
5444 *
5445 * This function handles when an abort task times out. If this
5446 * happens we issue a bus reset since we have resources tied
5447 * up that must be freed before returning to the midlayer.
5448 *
5449 * Return value:
5450 *      none
5451 **/
5452static void ipr_abort_timeout(struct timer_list *t)
5453{
5454        struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5455        struct ipr_cmnd *reset_cmd;
5456        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5457        struct ipr_cmd_pkt *cmd_pkt;
5458        unsigned long lock_flags = 0;
5459
5460        ENTER;
5461        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5462        if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5463                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5464                return;
5465        }
5466
5467        sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5468        reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5469        ipr_cmd->sibling = reset_cmd;
5470        reset_cmd->sibling = ipr_cmd;
5471        reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5472        cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5473        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5474        cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5475        cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5476
5477        ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5478        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5479        LEAVE;
5480}
5481
5482/**
5483 * ipr_cancel_op - Cancel specified op
5484 * @scsi_cmd:   scsi command struct
5485 *
5486 * This function cancels specified op.
5487 *
5488 * Return value:
5489 *      SUCCESS / FAILED
5490 **/
5491static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5492{
5493        struct ipr_cmnd *ipr_cmd;
5494        struct ipr_ioa_cfg *ioa_cfg;
5495        struct ipr_resource_entry *res;
5496        struct ipr_cmd_pkt *cmd_pkt;
5497        u32 ioasc, int_reg;
5498        int i, op_found = 0;
5499        struct ipr_hrr_queue *hrrq;
5500
5501        ENTER;
5502        ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5503        res = scsi_cmd->device->hostdata;
5504
5505        /* If we are currently going through reset/reload, return failed.
5506         * This will force the mid-layer to call ipr_eh_host_reset,
5507         * which will then go to sleep and wait for the reset to complete
5508         */
5509        if (ioa_cfg->in_reset_reload ||
5510            ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5511                return FAILED;
5512        if (!res)
5513                return FAILED;
5514
5515        /*
5516         * If we are aborting a timed out op, chances are that the timeout was caused
5517         * by a still not detected EEH error. In such cases, reading a register will
5518         * trigger the EEH recovery infrastructure.
5519         */
5520        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5521
5522        if (!ipr_is_gscsi(res))
5523                return FAILED;
5524
5525        for_each_hrrq(hrrq, ioa_cfg) {
5526                spin_lock(&hrrq->_lock);
5527                for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5528                        if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5529                                if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5530                                        op_found = 1;
5531                                        break;
5532                                }
5533                        }
5534                }
5535                spin_unlock(&hrrq->_lock);
5536        }
5537
5538        if (!op_found)
5539                return SUCCESS;
5540
5541        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5542        ipr_cmd->ioarcb.res_handle = res->res_handle;
5543        cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5544        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5545        cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5546        ipr_cmd->u.sdev = scsi_cmd->device;
5547
5548        scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5549                    scsi_cmd->cmnd[0]);
5550        ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5551        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5552
5553        /*
5554         * If the abort task timed out and we sent a bus reset, we will get
5555         * one the following responses to the abort
5556         */
5557        if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5558                ioasc = 0;
5559                ipr_trace;
5560        }
5561
5562        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5563        if (!ipr_is_naca_model(res))
5564                res->needs_sync_complete = 1;
5565
5566        LEAVE;
5567        return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5568}
5569
5570/**
5571 * ipr_eh_abort - Abort a single op
5572 * @scsi_cmd:   scsi command struct
5573 *
5574 * Return value:
5575 *      0 if scan in progress / 1 if scan is complete
5576 **/
5577static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5578{
5579        unsigned long lock_flags;
5580        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5581        int rc = 0;
5582
5583        spin_lock_irqsave(shost->host_lock, lock_flags);
5584        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5585                rc = 1;
5586        if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5587                rc = 1;
5588        spin_unlock_irqrestore(shost->host_lock, lock_flags);
5589        return rc;
5590}
5591
5592/**
5593 * ipr_eh_host_reset - Reset the host adapter
5594 * @scsi_cmd:   scsi command struct
5595 *
5596 * Return value:
5597 *      SUCCESS / FAILED
5598 **/
5599static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5600{
5601        unsigned long flags;
5602        int rc;
5603        struct ipr_ioa_cfg *ioa_cfg;
5604
5605        ENTER;
5606
5607        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5608
5609        spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5610        rc = ipr_cancel_op(scsi_cmd);
5611        spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5612
5613        if (rc == SUCCESS)
5614                rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5615        LEAVE;
5616        return rc;
5617}
5618
5619/**
5620 * ipr_handle_other_interrupt - Handle "other" interrupts
5621 * @ioa_cfg:    ioa config struct
5622 * @int_reg:    interrupt register
5623 *
5624 * Return value:
5625 *      IRQ_NONE / IRQ_HANDLED
5626 **/
5627static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5628                                              u32 int_reg)
5629{
5630        irqreturn_t rc = IRQ_HANDLED;
5631        u32 int_mask_reg;
5632
5633        int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5634        int_reg &= ~int_mask_reg;
5635
5636        /* If an interrupt on the adapter did not occur, ignore it.
5637         * Or in the case of SIS 64, check for a stage change interrupt.
5638         */
5639        if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5640                if (ioa_cfg->sis64) {
5641                        int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5642                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5643                        if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5644
5645                                /* clear stage change */
5646                                writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5647                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5648                                list_del(&ioa_cfg->reset_cmd->queue);
5649                                del_timer(&ioa_cfg->reset_cmd->timer);
5650                                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5651                                return IRQ_HANDLED;
5652                        }
5653                }
5654
5655                return IRQ_NONE;
5656        }
5657
5658        if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5659                /* Mask the interrupt */
5660                writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5661                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5662
5663                list_del(&ioa_cfg->reset_cmd->queue);
5664                del_timer(&ioa_cfg->reset_cmd->timer);
5665                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5666        } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5667                if (ioa_cfg->clear_isr) {
5668                        if (ipr_debug && printk_ratelimit())
5669                                dev_err(&ioa_cfg->pdev->dev,
5670                                        "Spurious interrupt detected. 0x%08X\n", int_reg);
5671                        writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5672                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5673                        return IRQ_NONE;
5674                }
5675        } else {
5676                if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5677                        ioa_cfg->ioa_unit_checked = 1;
5678                else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5679                        dev_err(&ioa_cfg->pdev->dev,
5680                                "No Host RRQ. 0x%08X\n", int_reg);
5681                else
5682                        dev_err(&ioa_cfg->pdev->dev,
5683                                "Permanent IOA failure. 0x%08X\n", int_reg);
5684
5685                if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5686                        ioa_cfg->sdt_state = GET_DUMP;
5687
5688                ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5689                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5690        }
5691
5692        return rc;
5693}
5694
5695/**
5696 * ipr_isr_eh - Interrupt service routine error handler
5697 * @ioa_cfg:    ioa config struct
5698 * @msg:        message to log
5699 *
5700 * Return value:
5701 *      none
5702 **/
5703static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5704{
5705        ioa_cfg->errors_logged++;
5706        dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5707
5708        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5709                ioa_cfg->sdt_state = GET_DUMP;
5710
5711        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5712}
5713
5714static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5715                                                struct list_head *doneq)
5716{
5717        u32 ioasc;
5718        u16 cmd_index;
5719        struct ipr_cmnd *ipr_cmd;
5720        struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5721        int num_hrrq = 0;
5722
5723        /* If interrupts are disabled, ignore the interrupt */
5724        if (!hrr_queue->allow_interrupts)
5725                return 0;
5726
5727        while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5728               hrr_queue->toggle_bit) {
5729
5730                cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5731                             IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5732                             IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5733
5734                if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5735                             cmd_index < hrr_queue->min_cmd_id)) {
5736                        ipr_isr_eh(ioa_cfg,
5737                                "Invalid response handle from IOA: ",
5738                                cmd_index);
5739                        break;
5740                }
5741
5742                ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5743                ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5744
5745                ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5746
5747                list_move_tail(&ipr_cmd->queue, doneq);
5748
5749                if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5750                        hrr_queue->hrrq_curr++;
5751                } else {
5752                        hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5753                        hrr_queue->toggle_bit ^= 1u;
5754                }
5755                num_hrrq++;
5756                if (budget > 0 && num_hrrq >= budget)
5757                        break;
5758        }
5759
5760        return num_hrrq;
5761}
5762
5763static int ipr_iopoll(struct irq_poll *iop, int budget)
5764{
5765        struct ipr_ioa_cfg *ioa_cfg;
5766        struct ipr_hrr_queue *hrrq;
5767        struct ipr_cmnd *ipr_cmd, *temp;
5768        unsigned long hrrq_flags;
5769        int completed_ops;
5770        LIST_HEAD(doneq);
5771
5772        hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5773        ioa_cfg = hrrq->ioa_cfg;
5774
5775        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5776        completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5777
5778        if (completed_ops < budget)
5779                irq_poll_complete(iop);
5780        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5781
5782        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5783                list_del(&ipr_cmd->queue);
5784                del_timer(&ipr_cmd->timer);
5785                ipr_cmd->fast_done(ipr_cmd);
5786        }
5787
5788        return completed_ops;
5789}
5790
5791/**
5792 * ipr_isr - Interrupt service routine
5793 * @irq:        irq number
5794 * @devp:       pointer to ioa config struct
5795 *
5796 * Return value:
5797 *      IRQ_NONE / IRQ_HANDLED
5798 **/
5799static irqreturn_t ipr_isr(int irq, void *devp)
5800{
5801        struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5802        struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5803        unsigned long hrrq_flags = 0;
5804        u32 int_reg = 0;
5805        int num_hrrq = 0;
5806        int irq_none = 0;
5807        struct ipr_cmnd *ipr_cmd, *temp;
5808        irqreturn_t rc = IRQ_NONE;
5809        LIST_HEAD(doneq);
5810
5811        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5812        /* If interrupts are disabled, ignore the interrupt */
5813        if (!hrrq->allow_interrupts) {
5814                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5815                return IRQ_NONE;
5816        }
5817
5818        while (1) {
5819                if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5820                        rc =  IRQ_HANDLED;
5821
5822                        if (!ioa_cfg->clear_isr)
5823                                break;
5824
5825                        /* Clear the PCI interrupt */
5826                        num_hrrq = 0;
5827                        do {
5828                                writel(IPR_PCII_HRRQ_UPDATED,
5829                                     ioa_cfg->regs.clr_interrupt_reg32);
5830                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5831                        } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5832                                num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5833
5834                } else if (rc == IRQ_NONE && irq_none == 0) {
5835                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5836                        irq_none++;
5837                } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5838                           int_reg & IPR_PCII_HRRQ_UPDATED) {
5839                        ipr_isr_eh(ioa_cfg,
5840                                "Error clearing HRRQ: ", num_hrrq);
5841                        rc = IRQ_HANDLED;
5842                        break;
5843                } else
5844                        break;
5845        }
5846
5847        if (unlikely(rc == IRQ_NONE))
5848                rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5849
5850        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5851        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5852                list_del(&ipr_cmd->queue);
5853                del_timer(&ipr_cmd->timer);
5854                ipr_cmd->fast_done(ipr_cmd);
5855        }
5856        return rc;
5857}
5858
5859/**
5860 * ipr_isr_mhrrq - Interrupt service routine
5861 * @irq:        irq number
5862 * @devp:       pointer to ioa config struct
5863 *
5864 * Return value:
5865 *      IRQ_NONE / IRQ_HANDLED
5866 **/
5867static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5868{
5869        struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5870        struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5871        unsigned long hrrq_flags = 0;
5872        struct ipr_cmnd *ipr_cmd, *temp;
5873        irqreturn_t rc = IRQ_NONE;
5874        LIST_HEAD(doneq);
5875
5876        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5877
5878        /* If interrupts are disabled, ignore the interrupt */
5879        if (!hrrq->allow_interrupts) {
5880                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5881                return IRQ_NONE;
5882        }
5883
5884        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5885                if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5886                       hrrq->toggle_bit) {
5887                        irq_poll_sched(&hrrq->iopoll);
5888                        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5889                        return IRQ_HANDLED;
5890                }
5891        } else {
5892                if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5893                        hrrq->toggle_bit)
5894
5895                        if (ipr_process_hrrq(hrrq, -1, &doneq))
5896                                rc =  IRQ_HANDLED;
5897        }
5898
5899        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5900
5901        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5902                list_del(&ipr_cmd->queue);
5903                del_timer(&ipr_cmd->timer);
5904                ipr_cmd->fast_done(ipr_cmd);
5905        }
5906        return rc;
5907}
5908
5909/**
5910 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5911 * @ioa_cfg:    ioa config struct
5912 * @ipr_cmd:    ipr command struct
5913 *
5914 * Return value:
5915 *      0 on success / -1 on failure
5916 **/
5917static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5918                             struct ipr_cmnd *ipr_cmd)
5919{
5920        int i, nseg;
5921        struct scatterlist *sg;
5922        u32 length;
5923        u32 ioadl_flags = 0;
5924        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5925        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5926        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5927
5928        length = scsi_bufflen(scsi_cmd);
5929        if (!length)
5930                return 0;
5931
5932        nseg = scsi_dma_map(scsi_cmd);
5933        if (nseg < 0) {
5934                if (printk_ratelimit())
5935                        dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5936                return -1;
5937        }
5938
5939        ipr_cmd->dma_use_sg = nseg;
5940
5941        ioarcb->data_transfer_length = cpu_to_be32(length);
5942        ioarcb->ioadl_len =
5943                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5944
5945        if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5946                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5947                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5948        } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5949                ioadl_flags = IPR_IOADL_FLAGS_READ;
5950
5951        scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5952                ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5953                ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5954                ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5955        }
5956
5957        ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5958        return 0;
5959}
5960
5961/**
5962 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5963 * @ioa_cfg:    ioa config struct
5964 * @ipr_cmd:    ipr command struct
5965 *
5966 * Return value:
5967 *      0 on success / -1 on failure
5968 **/
5969static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5970                           struct ipr_cmnd *ipr_cmd)
5971{
5972        int i, nseg;
5973        struct scatterlist *sg;
5974        u32 length;
5975        u32 ioadl_flags = 0;
5976        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5977        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5978        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5979
5980        length = scsi_bufflen(scsi_cmd);
5981        if (!length)
5982                return 0;
5983
5984        nseg = scsi_dma_map(scsi_cmd);
5985        if (nseg < 0) {
5986                dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5987                return -1;
5988        }
5989
5990        ipr_cmd->dma_use_sg = nseg;
5991
5992        if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5993                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5994                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5995                ioarcb->data_transfer_length = cpu_to_be32(length);
5996                ioarcb->ioadl_len =
5997                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5998        } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5999                ioadl_flags = IPR_IOADL_FLAGS_READ;
6000                ioarcb->read_data_transfer_length = cpu_to_be32(length);
6001                ioarcb->read_ioadl_len =
6002                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6003        }
6004
6005        if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6006                ioadl = ioarcb->u.add_data.u.ioadl;
6007                ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6008                                    offsetof(struct ipr_ioarcb, u.add_data));
6009                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6010        }
6011
6012        scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6013                ioadl[i].flags_and_data_len =
6014                        cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6015                ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6016        }
6017
6018        ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6019        return 0;
6020}
6021
6022/**
6023 * __ipr_erp_done - Process completion of ERP for a device
6024 * @ipr_cmd:            ipr command struct
6025 *
6026 * This function copies the sense buffer into the scsi_cmd
6027 * struct and pushes the scsi_done function.
6028 *
6029 * Return value:
6030 *      nothing
6031 **/
6032static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6033{
6034        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6035        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6036        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6037
6038        if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6039                scsi_cmd->result |= (DID_ERROR << 16);
6040                scmd_printk(KERN_ERR, scsi_cmd,
6041                            "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6042        } else {
6043                memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6044                       SCSI_SENSE_BUFFERSIZE);
6045        }
6046
6047        if (res) {
6048                if (!ipr_is_naca_model(res))
6049                        res->needs_sync_complete = 1;
6050                res->in_erp = 0;
6051        }
6052        scsi_dma_unmap(ipr_cmd->scsi_cmd);
6053        scsi_cmd->scsi_done(scsi_cmd);
6054        if (ipr_cmd->eh_comp)
6055                complete(ipr_cmd->eh_comp);
6056        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6057}
6058
6059/**
6060 * ipr_erp_done - Process completion of ERP for a device
6061 * @ipr_cmd:            ipr command struct
6062 *
6063 * This function copies the sense buffer into the scsi_cmd
6064 * struct and pushes the scsi_done function.
6065 *
6066 * Return value:
6067 *      nothing
6068 **/
6069static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6070{
6071        struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6072        unsigned long hrrq_flags;
6073
6074        spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6075        __ipr_erp_done(ipr_cmd);
6076        spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6077}
6078
6079/**
6080 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6081 * @ipr_cmd:    ipr command struct
6082 *
6083 * Return value:
6084 *      none
6085 **/
6086static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6087{
6088        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6089        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6090        dma_addr_t dma_addr = ipr_cmd->dma_addr;
6091
6092        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6093        ioarcb->data_transfer_length = 0;
6094        ioarcb->read_data_transfer_length = 0;
6095        ioarcb->ioadl_len = 0;
6096        ioarcb->read_ioadl_len = 0;
6097        ioasa->hdr.ioasc = 0;
6098        ioasa->hdr.residual_data_len = 0;
6099
6100        if (ipr_cmd->ioa_cfg->sis64)
6101                ioarcb->u.sis64_addr_data.data_ioadl_addr =
6102                        cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6103        else {
6104                ioarcb->write_ioadl_addr =
6105                        cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6106                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6107        }
6108}
6109
6110/**
6111 * __ipr_erp_request_sense - Send request sense to a device
6112 * @ipr_cmd:    ipr command struct
6113 *
6114 * This function sends a request sense to a device as a result
6115 * of a check condition.
6116 *
6117 * Return value:
6118 *      nothing
6119 **/
6120static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6121{
6122        struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6123        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6124
6125        if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6126                __ipr_erp_done(ipr_cmd);
6127                return;
6128        }
6129
6130        ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6131
6132        cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6133        cmd_pkt->cdb[0] = REQUEST_SENSE;
6134        cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6135        cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6136        cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6137        cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6138
6139        ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6140                       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6141
6142        ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6143                   IPR_REQUEST_SENSE_TIMEOUT * 2);
6144}
6145
6146/**
6147 * ipr_erp_request_sense - Send request sense to a device
6148 * @ipr_cmd:    ipr command struct
6149 *
6150 * This function sends a request sense to a device as a result
6151 * of a check condition.
6152 *
6153 * Return value:
6154 *      nothing
6155 **/
6156static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6157{
6158        struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6159        unsigned long hrrq_flags;
6160
6161        spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6162        __ipr_erp_request_sense(ipr_cmd);
6163        spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6164}
6165
6166/**
6167 * ipr_erp_cancel_all - Send cancel all to a device
6168 * @ipr_cmd:    ipr command struct
6169 *
6170 * This function sends a cancel all to a device to clear the
6171 * queue. If we are running TCQ on the device, QERR is set to 1,
6172 * which means all outstanding ops have been dropped on the floor.
6173 * Cancel all will return them to us.
6174 *
6175 * Return value:
6176 *      nothing
6177 **/
6178static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6179{
6180        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6181        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6182        struct ipr_cmd_pkt *cmd_pkt;
6183
6184        res->in_erp = 1;
6185
6186        ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6187
6188        if (!scsi_cmd->device->simple_tags) {
6189                __ipr_erp_request_sense(ipr_cmd);
6190                return;
6191        }
6192
6193        cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6194        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6195        cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6196
6197        ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6198                   IPR_CANCEL_ALL_TIMEOUT);
6199}
6200
6201/**
6202 * ipr_dump_ioasa - Dump contents of IOASA
6203 * @ioa_cfg:    ioa config struct
6204 * @ipr_cmd:    ipr command struct
6205 * @res:                resource entry struct
6206 *
6207 * This function is invoked by the interrupt handler when ops
6208 * fail. It will log the IOASA if appropriate. Only called
6209 * for GPDD ops.
6210 *
6211 * Return value:
6212 *      none
6213 **/
6214static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6215                           struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6216{
6217        int i;
6218        u16 data_len;
6219        u32 ioasc, fd_ioasc;
6220        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6221        __be32 *ioasa_data = (__be32 *)ioasa;
6222        int error_index;
6223
6224        ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6225        fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6226
6227        if (0 == ioasc)
6228                return;
6229
6230        if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6231                return;
6232
6233        if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6234                error_index = ipr_get_error(fd_ioasc);
6235        else
6236                error_index = ipr_get_error(ioasc);
6237
6238        if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6239                /* Don't log an error if the IOA already logged one */
6240                if (ioasa->hdr.ilid != 0)
6241                        return;
6242
6243                if (!ipr_is_gscsi(res))
6244                        return;
6245
6246                if (ipr_error_table[error_index].log_ioasa == 0)
6247                        return;
6248        }
6249
6250        ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6251
6252        data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6253        if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6254                data_len = sizeof(struct ipr_ioasa64);
6255        else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6256                data_len = sizeof(struct ipr_ioasa);
6257
6258        ipr_err("IOASA Dump:\n");
6259
6260        for (i = 0; i < data_len / 4; i += 4) {
6261                ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6262                        be32_to_cpu(ioasa_data[i]),
6263                        be32_to_cpu(ioasa_data[i+1]),
6264                        be32_to_cpu(ioasa_data[i+2]),
6265                        be32_to_cpu(ioasa_data[i+3]));
6266        }
6267}
6268
6269/**
6270 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6271 * @ioasa:              IOASA
6272 * @sense_buf:  sense data buffer
6273 *
6274 * Return value:
6275 *      none
6276 **/
6277static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6278{
6279        u32 failing_lba;
6280        u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6281        struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6282        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6283        u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6284
6285        memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6286
6287        if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6288                return;
6289
6290        ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6291
6292        if (ipr_is_vset_device(res) &&
6293            ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6294            ioasa->u.vset.failing_lba_hi != 0) {
6295                sense_buf[0] = 0x72;
6296                sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6297                sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6298                sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6299
6300                sense_buf[7] = 12;
6301                sense_buf[8] = 0;
6302                sense_buf[9] = 0x0A;
6303                sense_buf[10] = 0x80;
6304
6305                failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6306
6307                sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6308                sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6309                sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6310                sense_buf[15] = failing_lba & 0x000000ff;
6311
6312                failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6313
6314                sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6315                sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6316                sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6317                sense_buf[19] = failing_lba & 0x000000ff;
6318        } else {
6319                sense_buf[0] = 0x70;
6320                sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6321                sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6322                sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6323
6324                /* Illegal request */
6325                if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6326                    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6327                        sense_buf[7] = 10;      /* additional length */
6328
6329                        /* IOARCB was in error */
6330                        if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6331                                sense_buf[15] = 0xC0;
6332                        else    /* Parameter data was invalid */
6333                                sense_buf[15] = 0x80;
6334
6335                        sense_buf[16] =
6336                            ((IPR_FIELD_POINTER_MASK &
6337                              be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6338                        sense_buf[17] =
6339                            (IPR_FIELD_POINTER_MASK &
6340                             be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6341                } else {
6342                        if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6343                                if (ipr_is_vset_device(res))
6344                                        failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6345                                else
6346                                        failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6347
6348                                sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6349                                sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6350                                sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6351                                sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6352                                sense_buf[6] = failing_lba & 0x000000ff;
6353                        }
6354
6355                        sense_buf[7] = 6;       /* additional length */
6356                }
6357        }
6358}
6359
6360/**
6361 * ipr_get_autosense - Copy autosense data to sense buffer
6362 * @ipr_cmd:    ipr command struct
6363 *
6364 * This function copies the autosense buffer to the buffer
6365 * in the scsi_cmd, if there is autosense available.
6366 *
6367 * Return value:
6368 *      1 if autosense was available / 0 if not
6369 **/
6370static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6371{
6372        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6373        struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6374
6375        if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6376                return 0;
6377
6378        if (ipr_cmd->ioa_cfg->sis64)
6379                memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6380                       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6381                           SCSI_SENSE_BUFFERSIZE));
6382        else
6383                memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6384                       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6385                           SCSI_SENSE_BUFFERSIZE));
6386        return 1;
6387}
6388
6389/**
6390 * ipr_erp_start - Process an error response for a SCSI op
6391 * @ioa_cfg:    ioa config struct
6392 * @ipr_cmd:    ipr command struct
6393 *
6394 * This function determines whether or not to initiate ERP
6395 * on the affected device.
6396 *
6397 * Return value:
6398 *      nothing
6399 **/
6400static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6401                              struct ipr_cmnd *ipr_cmd)
6402{
6403        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6404        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6405        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6406        u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6407
6408        if (!res) {
6409                __ipr_scsi_eh_done(ipr_cmd);
6410                return;
6411        }
6412
6413        if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6414                ipr_gen_sense(ipr_cmd);
6415
6416        ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6417
6418        switch (masked_ioasc) {
6419        case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6420                if (ipr_is_naca_model(res))
6421                        scsi_cmd->result |= (DID_ABORT << 16);
6422                else
6423                        scsi_cmd->result |= (DID_IMM_RETRY << 16);
6424                break;
6425        case IPR_IOASC_IR_RESOURCE_HANDLE:
6426        case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6427                scsi_cmd->result |= (DID_NO_CONNECT << 16);
6428                break;
6429        case IPR_IOASC_HW_SEL_TIMEOUT:
6430                scsi_cmd->result |= (DID_NO_CONNECT << 16);
6431                if (!ipr_is_naca_model(res))
6432                        res->needs_sync_complete = 1;
6433                break;
6434        case IPR_IOASC_SYNC_REQUIRED:
6435                if (!res->in_erp)
6436                        res->needs_sync_complete = 1;
6437                scsi_cmd->result |= (DID_IMM_RETRY << 16);
6438                break;
6439        case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6440        case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6441                /*
6442                 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6443                 * so SCSI mid-layer and upper layers handle it accordingly.
6444                 */
6445                if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6446                        scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6447                break;
6448        case IPR_IOASC_BUS_WAS_RESET:
6449        case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6450                /*
6451                 * Report the bus reset and ask for a retry. The device
6452                 * will give CC/UA the next command.
6453                 */
6454                if (!res->resetting_device)
6455                        scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6456                scsi_cmd->result |= (DID_ERROR << 16);
6457                if (!ipr_is_naca_model(res))
6458                        res->needs_sync_complete = 1;
6459                break;
6460        case IPR_IOASC_HW_DEV_BUS_STATUS:
6461                scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6462                if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6463                        if (!ipr_get_autosense(ipr_cmd)) {
6464                                if (!ipr_is_naca_model(res)) {
6465                                        ipr_erp_cancel_all(ipr_cmd);
6466                                        return;
6467                                }
6468                        }
6469                }
6470                if (!ipr_is_naca_model(res))
6471                        res->needs_sync_complete = 1;
6472                break;
6473        case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6474                break;
6475        case IPR_IOASC_IR_NON_OPTIMIZED:
6476                if (res->raw_mode) {
6477                        res->raw_mode = 0;
6478                        scsi_cmd->result |= (DID_IMM_RETRY << 16);
6479                } else
6480                        scsi_cmd->result |= (DID_ERROR << 16);
6481                break;
6482        default:
6483                if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6484                        scsi_cmd->result |= (DID_ERROR << 16);
6485                if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6486                        res->needs_sync_complete = 1;
6487                break;
6488        }
6489
6490        scsi_dma_unmap(ipr_cmd->scsi_cmd);
6491        scsi_cmd->scsi_done(scsi_cmd);
6492        if (ipr_cmd->eh_comp)
6493                complete(ipr_cmd->eh_comp);
6494        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6495}
6496
6497/**
6498 * ipr_scsi_done - mid-layer done function
6499 * @ipr_cmd:    ipr command struct
6500 *
6501 * This function is invoked by the interrupt handler for
6502 * ops generated by the SCSI mid-layer
6503 *
6504 * Return value:
6505 *      none
6506 **/
6507static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6508{
6509        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6510        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6511        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6512        unsigned long lock_flags;
6513
6514        scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6515
6516        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6517                scsi_dma_unmap(scsi_cmd);
6518
6519                spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6520                scsi_cmd->scsi_done(scsi_cmd);
6521                if (ipr_cmd->eh_comp)
6522                        complete(ipr_cmd->eh_comp);
6523                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6524                spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6525        } else {
6526                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6527                spin_lock(&ipr_cmd->hrrq->_lock);
6528                ipr_erp_start(ioa_cfg, ipr_cmd);
6529                spin_unlock(&ipr_cmd->hrrq->_lock);
6530                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6531        }
6532}
6533
6534/**
6535 * ipr_queuecommand - Queue a mid-layer request
6536 * @shost:              scsi host struct
6537 * @scsi_cmd:   scsi command struct
6538 *
6539 * This function queues a request generated by the mid-layer.
6540 *
6541 * Return value:
6542 *      0 on success
6543 *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6544 *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6545 **/
6546static int ipr_queuecommand(struct Scsi_Host *shost,
6547                            struct scsi_cmnd *scsi_cmd)
6548{
6549        struct ipr_ioa_cfg *ioa_cfg;
6550        struct ipr_resource_entry *res;
6551        struct ipr_ioarcb *ioarcb;
6552        struct ipr_cmnd *ipr_cmd;
6553        unsigned long hrrq_flags, lock_flags;
6554        int rc;
6555        struct ipr_hrr_queue *hrrq;
6556        int hrrq_id;
6557
6558        ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6559
6560        scsi_cmd->result = (DID_OK << 16);
6561        res = scsi_cmd->device->hostdata;
6562
6563        if (ipr_is_gata(res) && res->sata_port) {
6564                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6565                rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6566                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6567                return rc;
6568        }
6569
6570        hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6571        hrrq = &ioa_cfg->hrrq[hrrq_id];
6572
6573        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6574        /*
6575         * We are currently blocking all devices due to a host reset
6576         * We have told the host to stop giving us new requests, but
6577         * ERP ops don't count. FIXME
6578         */
6579        if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6580                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6581                return SCSI_MLQUEUE_HOST_BUSY;
6582        }
6583
6584        /*
6585         * FIXME - Create scsi_set_host_offline interface
6586         *  and the ioa_is_dead check can be removed
6587         */
6588        if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6589                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6590                goto err_nodev;
6591        }
6592
6593        ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6594        if (ipr_cmd == NULL) {
6595                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6596                return SCSI_MLQUEUE_HOST_BUSY;
6597        }
6598        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6599
6600        ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6601        ioarcb = &ipr_cmd->ioarcb;
6602
6603        memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6604        ipr_cmd->scsi_cmd = scsi_cmd;
6605        ipr_cmd->done = ipr_scsi_eh_done;
6606
6607        if (ipr_is_gscsi(res)) {
6608                if (scsi_cmd->underflow == 0)
6609                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6610
6611                if (res->reset_occurred) {
6612                        res->reset_occurred = 0;
6613                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6614                }
6615        }
6616
6617        if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6618                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6619
6620                ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6621                if (scsi_cmd->flags & SCMD_TAGGED)
6622                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6623                else
6624                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6625        }
6626
6627        if (scsi_cmd->cmnd[0] >= 0xC0 &&
6628            (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6629                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6630        }
6631        if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6632                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6633
6634                if (scsi_cmd->underflow == 0)
6635                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6636        }
6637
6638        if (ioa_cfg->sis64)
6639                rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6640        else
6641                rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6642
6643        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6644        if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6645                list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6646                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6647                if (!rc)
6648                        scsi_dma_unmap(scsi_cmd);
6649                return SCSI_MLQUEUE_HOST_BUSY;
6650        }
6651
6652        if (unlikely(hrrq->ioa_is_dead)) {
6653                list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6654                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6655                scsi_dma_unmap(scsi_cmd);
6656                goto err_nodev;
6657        }
6658
6659        ioarcb->res_handle = res->res_handle;
6660        if (res->needs_sync_complete) {
6661                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6662                res->needs_sync_complete = 0;
6663        }
6664        list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6665        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6666        ipr_send_command(ipr_cmd);
6667        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6668        return 0;
6669
6670err_nodev:
6671        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6672        memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6673        scsi_cmd->result = (DID_NO_CONNECT << 16);
6674        scsi_cmd->scsi_done(scsi_cmd);
6675        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6676        return 0;
6677}
6678
6679/**
6680 * ipr_ioctl - IOCTL handler
6681 * @sdev:       scsi device struct
6682 * @cmd:        IOCTL cmd
6683 * @arg:        IOCTL arg
6684 *
6685 * Return value:
6686 *      0 on success / other on failure
6687 **/
6688static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6689                     void __user *arg)
6690{
6691        struct ipr_resource_entry *res;
6692
6693        res = (struct ipr_resource_entry *)sdev->hostdata;
6694        if (res && ipr_is_gata(res)) {
6695                if (cmd == HDIO_GET_IDENTITY)
6696                        return -ENOTTY;
6697                return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6698        }
6699
6700        return -EINVAL;
6701}
6702
6703/**
6704 * ipr_info - Get information about the card/driver
6705 * @scsi_host:  scsi host struct
6706 *
6707 * Return value:
6708 *      pointer to buffer with description string
6709 **/
6710static const char *ipr_ioa_info(struct Scsi_Host *host)
6711{
6712        static char buffer[512];
6713        struct ipr_ioa_cfg *ioa_cfg;
6714        unsigned long lock_flags = 0;
6715
6716        ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6717
6718        spin_lock_irqsave(host->host_lock, lock_flags);
6719        sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6720        spin_unlock_irqrestore(host->host_lock, lock_flags);
6721
6722        return buffer;
6723}
6724
6725static struct scsi_host_template driver_template = {
6726        .module = THIS_MODULE,
6727        .name = "IPR",
6728        .info = ipr_ioa_info,
6729        .ioctl = ipr_ioctl,
6730        .queuecommand = ipr_queuecommand,
6731        .eh_abort_handler = ipr_eh_abort,
6732        .eh_device_reset_handler = ipr_eh_dev_reset,
6733        .eh_host_reset_handler = ipr_eh_host_reset,
6734        .slave_alloc = ipr_slave_alloc,
6735        .slave_configure = ipr_slave_configure,
6736        .slave_destroy = ipr_slave_destroy,
6737        .scan_finished = ipr_scan_finished,
6738        .target_alloc = ipr_target_alloc,
6739        .target_destroy = ipr_target_destroy,
6740        .change_queue_depth = ipr_change_queue_depth,
6741        .bios_param = ipr_biosparam,
6742        .can_queue = IPR_MAX_COMMANDS,
6743        .this_id = -1,
6744        .sg_tablesize = IPR_MAX_SGLIST,
6745        .max_sectors = IPR_IOA_MAX_SECTORS,
6746        .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6747        .shost_attrs = ipr_ioa_attrs,
6748        .sdev_attrs = ipr_dev_attrs,
6749        .proc_name = IPR_NAME,
6750};
6751
6752/**
6753 * ipr_ata_phy_reset - libata phy_reset handler
6754 * @ap:         ata port to reset
6755 *
6756 **/
6757static void ipr_ata_phy_reset(struct ata_port *ap)
6758{
6759        unsigned long flags;
6760        struct ipr_sata_port *sata_port = ap->private_data;
6761        struct ipr_resource_entry *res = sata_port->res;
6762        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6763        int rc;
6764
6765        ENTER;
6766        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6767        while (ioa_cfg->in_reset_reload) {
6768                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6769                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6770                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6771        }
6772
6773        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6774                goto out_unlock;
6775
6776        rc = ipr_device_reset(ioa_cfg, res);
6777
6778        if (rc) {
6779                ap->link.device[0].class = ATA_DEV_NONE;
6780                goto out_unlock;
6781        }
6782
6783        ap->link.device[0].class = res->ata_class;
6784        if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6785                ap->link.device[0].class = ATA_DEV_NONE;
6786
6787out_unlock:
6788        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6789        LEAVE;
6790}
6791
6792/**
6793 * ipr_ata_post_internal - Cleanup after an internal command
6794 * @qc: ATA queued command
6795 *
6796 * Return value:
6797 *      none
6798 **/
6799static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6800{
6801        struct ipr_sata_port *sata_port = qc->ap->private_data;
6802        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6803        struct ipr_cmnd *ipr_cmd;
6804        struct ipr_hrr_queue *hrrq;
6805        unsigned long flags;
6806
6807        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6808        while (ioa_cfg->in_reset_reload) {
6809                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6810                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6811                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6812        }
6813
6814        for_each_hrrq(hrrq, ioa_cfg) {
6815                spin_lock(&hrrq->_lock);
6816                list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6817                        if (ipr_cmd->qc == qc) {
6818                                ipr_device_reset(ioa_cfg, sata_port->res);
6819                                break;
6820                        }
6821                }
6822                spin_unlock(&hrrq->_lock);
6823        }
6824        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6825}
6826
6827/**
6828 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6829 * @regs:       destination
6830 * @tf: source ATA taskfile
6831 *
6832 * Return value:
6833 *      none
6834 **/
6835static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6836                             struct ata_taskfile *tf)
6837{
6838        regs->feature = tf->feature;
6839        regs->nsect = tf->nsect;
6840        regs->lbal = tf->lbal;
6841        regs->lbam = tf->lbam;
6842        regs->lbah = tf->lbah;
6843        regs->device = tf->device;
6844        regs->command = tf->command;
6845        regs->hob_feature = tf->hob_feature;
6846        regs->hob_nsect = tf->hob_nsect;
6847        regs->hob_lbal = tf->hob_lbal;
6848        regs->hob_lbam = tf->hob_lbam;
6849        regs->hob_lbah = tf->hob_lbah;
6850        regs->ctl = tf->ctl;
6851}
6852
6853/**
6854 * ipr_sata_done - done function for SATA commands
6855 * @ipr_cmd:    ipr command struct
6856 *
6857 * This function is invoked by the interrupt handler for
6858 * ops generated by the SCSI mid-layer to SATA devices
6859 *
6860 * Return value:
6861 *      none
6862 **/
6863static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6864{
6865        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6866        struct ata_queued_cmd *qc = ipr_cmd->qc;
6867        struct ipr_sata_port *sata_port = qc->ap->private_data;
6868        struct ipr_resource_entry *res = sata_port->res;
6869        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6870
6871        spin_lock(&ipr_cmd->hrrq->_lock);
6872        if (ipr_cmd->ioa_cfg->sis64)
6873                memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6874                       sizeof(struct ipr_ioasa_gata));
6875        else
6876                memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6877                       sizeof(struct ipr_ioasa_gata));
6878        ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6879
6880        if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6881                scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6882
6883        if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6884                qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6885        else
6886                qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6887        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6888        spin_unlock(&ipr_cmd->hrrq->_lock);
6889        ata_qc_complete(qc);
6890}
6891
6892/**
6893 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6894 * @ipr_cmd:    ipr command struct
6895 * @qc:         ATA queued command
6896 *
6897 **/
6898static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6899                                  struct ata_queued_cmd *qc)
6900{
6901        u32 ioadl_flags = 0;
6902        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6903        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6904        struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6905        int len = qc->nbytes;
6906        struct scatterlist *sg;
6907        unsigned int si;
6908        dma_addr_t dma_addr = ipr_cmd->dma_addr;
6909
6910        if (len == 0)
6911                return;
6912
6913        if (qc->dma_dir == DMA_TO_DEVICE) {
6914                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6915                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6916        } else if (qc->dma_dir == DMA_FROM_DEVICE)
6917                ioadl_flags = IPR_IOADL_FLAGS_READ;
6918
6919        ioarcb->data_transfer_length = cpu_to_be32(len);
6920        ioarcb->ioadl_len =
6921                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6922        ioarcb->u.sis64_addr_data.data_ioadl_addr =
6923                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6924
6925        for_each_sg(qc->sg, sg, qc->n_elem, si) {
6926                ioadl64->flags = cpu_to_be32(ioadl_flags);
6927                ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6928                ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6929
6930                last_ioadl64 = ioadl64;
6931                ioadl64++;
6932        }
6933
6934        if (likely(last_ioadl64))
6935                last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6936}
6937
6938/**
6939 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6940 * @ipr_cmd:    ipr command struct
6941 * @qc:         ATA queued command
6942 *
6943 **/
6944static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6945                                struct ata_queued_cmd *qc)
6946{
6947        u32 ioadl_flags = 0;
6948        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6949        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6950        struct ipr_ioadl_desc *last_ioadl = NULL;
6951        int len = qc->nbytes;
6952        struct scatterlist *sg;
6953        unsigned int si;
6954
6955        if (len == 0)
6956                return;
6957
6958        if (qc->dma_dir == DMA_TO_DEVICE) {
6959                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6960                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6961                ioarcb->data_transfer_length = cpu_to_be32(len);
6962                ioarcb->ioadl_len =
6963                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6964        } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6965                ioadl_flags = IPR_IOADL_FLAGS_READ;
6966                ioarcb->read_data_transfer_length = cpu_to_be32(len);
6967                ioarcb->read_ioadl_len =
6968                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6969        }
6970
6971        for_each_sg(qc->sg, sg, qc->n_elem, si) {
6972                ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6973                ioadl->address = cpu_to_be32(sg_dma_address(sg));
6974
6975                last_ioadl = ioadl;
6976                ioadl++;
6977        }
6978
6979        if (likely(last_ioadl))
6980                last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6981}
6982
6983/**
6984 * ipr_qc_defer - Get a free ipr_cmd
6985 * @qc: queued command
6986 *
6987 * Return value:
6988 *      0 if success
6989 **/
6990static int ipr_qc_defer(struct ata_queued_cmd *qc)
6991{
6992        struct ata_port *ap = qc->ap;
6993        struct ipr_sata_port *sata_port = ap->private_data;
6994        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6995        struct ipr_cmnd *ipr_cmd;
6996        struct ipr_hrr_queue *hrrq;
6997        int hrrq_id;
6998
6999        hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7000        hrrq = &ioa_cfg->hrrq[hrrq_id];
7001
7002        qc->lldd_task = NULL;
7003        spin_lock(&hrrq->_lock);
7004        if (unlikely(hrrq->ioa_is_dead)) {
7005                spin_unlock(&hrrq->_lock);
7006                return 0;
7007        }
7008
7009        if (unlikely(!hrrq->allow_cmds)) {
7010                spin_unlock(&hrrq->_lock);
7011                return ATA_DEFER_LINK;
7012        }
7013
7014        ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7015        if (ipr_cmd == NULL) {
7016                spin_unlock(&hrrq->_lock);
7017                return ATA_DEFER_LINK;
7018        }
7019
7020        qc->lldd_task = ipr_cmd;
7021        spin_unlock(&hrrq->_lock);
7022        return 0;
7023}
7024
7025/**
7026 * ipr_qc_issue - Issue a SATA qc to a device
7027 * @qc: queued command
7028 *
7029 * Return value:
7030 *      0 if success
7031 **/
7032static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7033{
7034        struct ata_port *ap = qc->ap;
7035        struct ipr_sata_port *sata_port = ap->private_data;
7036        struct ipr_resource_entry *res = sata_port->res;
7037        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7038        struct ipr_cmnd *ipr_cmd;
7039        struct ipr_ioarcb *ioarcb;
7040        struct ipr_ioarcb_ata_regs *regs;
7041
7042        if (qc->lldd_task == NULL)
7043                ipr_qc_defer(qc);
7044
7045        ipr_cmd = qc->lldd_task;
7046        if (ipr_cmd == NULL)
7047                return AC_ERR_SYSTEM;
7048
7049        qc->lldd_task = NULL;
7050        spin_lock(&ipr_cmd->hrrq->_lock);
7051        if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7052                        ipr_cmd->hrrq->ioa_is_dead)) {
7053                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7054                spin_unlock(&ipr_cmd->hrrq->_lock);
7055                return AC_ERR_SYSTEM;
7056        }
7057
7058        ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7059        ioarcb = &ipr_cmd->ioarcb;
7060
7061        if (ioa_cfg->sis64) {
7062                regs = &ipr_cmd->i.ata_ioadl.regs;
7063                ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7064        } else
7065                regs = &ioarcb->u.add_data.u.regs;
7066
7067        memset(regs, 0, sizeof(*regs));
7068        ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7069
7070        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7071        ipr_cmd->qc = qc;
7072        ipr_cmd->done = ipr_sata_done;
7073        ipr_cmd->ioarcb.res_handle = res->res_handle;
7074        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7075        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7076        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7077        ipr_cmd->dma_use_sg = qc->n_elem;
7078
7079        if (ioa_cfg->sis64)
7080                ipr_build_ata_ioadl64(ipr_cmd, qc);
7081        else
7082                ipr_build_ata_ioadl(ipr_cmd, qc);
7083
7084        regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7085        ipr_copy_sata_tf(regs, &qc->tf);
7086        memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7087        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7088
7089        switch (qc->tf.protocol) {
7090        case ATA_PROT_NODATA:
7091        case ATA_PROT_PIO:
7092                break;
7093
7094        case ATA_PROT_DMA:
7095                regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7096                break;
7097
7098        case ATAPI_PROT_PIO:
7099        case ATAPI_PROT_NODATA:
7100                regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7101                break;
7102
7103        case ATAPI_PROT_DMA:
7104                regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7105                regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7106                break;
7107
7108        default:
7109                WARN_ON(1);
7110                spin_unlock(&ipr_cmd->hrrq->_lock);
7111                return AC_ERR_INVALID;
7112        }
7113
7114        ipr_send_command(ipr_cmd);
7115        spin_unlock(&ipr_cmd->hrrq->_lock);
7116
7117        return 0;
7118}
7119
7120/**
7121 * ipr_qc_fill_rtf - Read result TF
7122 * @qc: ATA queued command
7123 *
7124 * Return value:
7125 *      true
7126 **/
7127static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7128{
7129        struct ipr_sata_port *sata_port = qc->ap->private_data;
7130        struct ipr_ioasa_gata *g = &sata_port->ioasa;
7131        struct ata_taskfile *tf = &qc->result_tf;
7132
7133        tf->feature = g->error;
7134        tf->nsect = g->nsect;
7135        tf->lbal = g->lbal;
7136        tf->lbam = g->lbam;
7137        tf->lbah = g->lbah;
7138        tf->device = g->device;
7139        tf->command = g->status;
7140        tf->hob_nsect = g->hob_nsect;
7141        tf->hob_lbal = g->hob_lbal;
7142        tf->hob_lbam = g->hob_lbam;
7143        tf->hob_lbah = g->hob_lbah;
7144
7145        return true;
7146}
7147
7148static struct ata_port_operations ipr_sata_ops = {
7149        .phy_reset = ipr_ata_phy_reset,
7150        .hardreset = ipr_sata_reset,
7151        .post_internal_cmd = ipr_ata_post_internal,
7152        .qc_prep = ata_noop_qc_prep,
7153        .qc_defer = ipr_qc_defer,
7154        .qc_issue = ipr_qc_issue,
7155        .qc_fill_rtf = ipr_qc_fill_rtf,
7156        .port_start = ata_sas_port_start,
7157        .port_stop = ata_sas_port_stop
7158};
7159
7160static struct ata_port_info sata_port_info = {
7161        .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7162                          ATA_FLAG_SAS_HOST,
7163        .pio_mask       = ATA_PIO4_ONLY,
7164        .mwdma_mask     = ATA_MWDMA2,
7165        .udma_mask      = ATA_UDMA6,
7166        .port_ops       = &ipr_sata_ops
7167};
7168
7169#ifdef CONFIG_PPC_PSERIES
7170static const u16 ipr_blocked_processors[] = {
7171        PVR_NORTHSTAR,
7172        PVR_PULSAR,
7173        PVR_POWER4,
7174        PVR_ICESTAR,
7175        PVR_SSTAR,
7176        PVR_POWER4p,
7177        PVR_630,
7178        PVR_630p
7179};
7180
7181/**
7182 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7183 * @ioa_cfg:    ioa cfg struct
7184 *
7185 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7186 * certain pSeries hardware. This function determines if the given
7187 * adapter is in one of these confgurations or not.
7188 *
7189 * Return value:
7190 *      1 if adapter is not supported / 0 if adapter is supported
7191 **/
7192static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7193{
7194        int i;
7195
7196        if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7197                for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7198                        if (pvr_version_is(ipr_blocked_processors[i]))
7199                                return 1;
7200                }
7201        }
7202        return 0;
7203}
7204#else
7205#define ipr_invalid_adapter(ioa_cfg) 0
7206#endif
7207
7208/**
7209 * ipr_ioa_bringdown_done - IOA bring down completion.
7210 * @ipr_cmd:    ipr command struct
7211 *
7212 * This function processes the completion of an adapter bring down.
7213 * It wakes any reset sleepers.
7214 *
7215 * Return value:
7216 *      IPR_RC_JOB_RETURN
7217 **/
7218static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7219{
7220        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7221        int i;
7222
7223        ENTER;
7224        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7225                ipr_trace;
7226                ioa_cfg->scsi_unblock = 1;
7227                schedule_work(&ioa_cfg->work_q);
7228        }
7229
7230        ioa_cfg->in_reset_reload = 0;
7231        ioa_cfg->reset_retries = 0;
7232        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7233                spin_lock(&ioa_cfg->hrrq[i]._lock);
7234                ioa_cfg->hrrq[i].ioa_is_dead = 1;
7235                spin_unlock(&ioa_cfg->hrrq[i]._lock);
7236        }
7237        wmb();
7238
7239        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7240        wake_up_all(&ioa_cfg->reset_wait_q);
7241        LEAVE;
7242
7243        return IPR_RC_JOB_RETURN;
7244}
7245
7246/**
7247 * ipr_ioa_reset_done - IOA reset completion.
7248 * @ipr_cmd:    ipr command struct
7249 *
7250 * This function processes the completion of an adapter reset.
7251 * It schedules any necessary mid-layer add/removes and
7252 * wakes any reset sleepers.
7253 *
7254 * Return value:
7255 *      IPR_RC_JOB_RETURN
7256 **/
7257static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7258{
7259        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7260        struct ipr_resource_entry *res;
7261        int j;
7262
7263        ENTER;
7264        ioa_cfg->in_reset_reload = 0;
7265        for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7266                spin_lock(&ioa_cfg->hrrq[j]._lock);
7267                ioa_cfg->hrrq[j].allow_cmds = 1;
7268                spin_unlock(&ioa_cfg->hrrq[j]._lock);
7269        }
7270        wmb();
7271        ioa_cfg->reset_cmd = NULL;
7272        ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7273
7274        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7275                if (res->add_to_ml || res->del_from_ml) {
7276                        ipr_trace;
7277                        break;
7278                }
7279        }
7280        schedule_work(&ioa_cfg->work_q);
7281
7282        for (j = 0; j < IPR_NUM_HCAMS; j++) {
7283                list_del_init(&ioa_cfg->hostrcb[j]->queue);
7284                if (j < IPR_NUM_LOG_HCAMS)
7285                        ipr_send_hcam(ioa_cfg,
7286                                IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7287                                ioa_cfg->hostrcb[j]);
7288                else
7289                        ipr_send_hcam(ioa_cfg,
7290                                IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7291                                ioa_cfg->hostrcb[j]);
7292        }
7293
7294        scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7295        dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7296
7297        ioa_cfg->reset_retries = 0;
7298        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7299        wake_up_all(&ioa_cfg->reset_wait_q);
7300
7301        ioa_cfg->scsi_unblock = 1;
7302        schedule_work(&ioa_cfg->work_q);
7303        LEAVE;
7304        return IPR_RC_JOB_RETURN;
7305}
7306
7307/**
7308 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7309 * @supported_dev:      supported device struct
7310 * @vpids:                      vendor product id struct
7311 *
7312 * Return value:
7313 *      none
7314 **/
7315static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7316                                 struct ipr_std_inq_vpids *vpids)
7317{
7318        memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7319        memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7320        supported_dev->num_records = 1;
7321        supported_dev->data_length =
7322                cpu_to_be16(sizeof(struct ipr_supported_device));
7323        supported_dev->reserved = 0;
7324}
7325
7326/**
7327 * ipr_set_supported_devs - Send Set Supported Devices for a device
7328 * @ipr_cmd:    ipr command struct
7329 *
7330 * This function sends a Set Supported Devices to the adapter
7331 *
7332 * Return value:
7333 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7334 **/
7335static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7336{
7337        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7338        struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7339        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7340        struct ipr_resource_entry *res = ipr_cmd->u.res;
7341
7342        ipr_cmd->job_step = ipr_ioa_reset_done;
7343
7344        list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7345                if (!ipr_is_scsi_disk(res))
7346                        continue;
7347
7348                ipr_cmd->u.res = res;
7349                ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7350
7351                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7352                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7353                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7354
7355                ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7356                ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7357                ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7358                ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7359
7360                ipr_init_ioadl(ipr_cmd,
7361                               ioa_cfg->vpd_cbs_dma +
7362                                 offsetof(struct ipr_misc_cbs, supp_dev),
7363                               sizeof(struct ipr_supported_device),
7364                               IPR_IOADL_FLAGS_WRITE_LAST);
7365
7366                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7367                           IPR_SET_SUP_DEVICE_TIMEOUT);
7368
7369                if (!ioa_cfg->sis64)
7370                        ipr_cmd->job_step = ipr_set_supported_devs;
7371                LEAVE;
7372                return IPR_RC_JOB_RETURN;
7373        }
7374
7375        LEAVE;
7376        return IPR_RC_JOB_CONTINUE;
7377}
7378
7379/**
7380 * ipr_get_mode_page - Locate specified mode page
7381 * @mode_pages: mode page buffer
7382 * @page_code:  page code to find
7383 * @len:                minimum required length for mode page
7384 *
7385 * Return value:
7386 *      pointer to mode page / NULL on failure
7387 **/
7388static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7389                               u32 page_code, u32 len)
7390{
7391        struct ipr_mode_page_hdr *mode_hdr;
7392        u32 page_length;
7393        u32 length;
7394
7395        if (!mode_pages || (mode_pages->hdr.length == 0))
7396                return NULL;
7397
7398        length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7399        mode_hdr = (struct ipr_mode_page_hdr *)
7400                (mode_pages->data + mode_pages->hdr.block_desc_len);
7401
7402        while (length) {
7403                if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7404                        if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7405                                return mode_hdr;
7406                        break;
7407                } else {
7408                        page_length = (sizeof(struct ipr_mode_page_hdr) +
7409                                       mode_hdr->page_length);
7410                        length -= page_length;
7411                        mode_hdr = (struct ipr_mode_page_hdr *)
7412                                ((unsigned long)mode_hdr + page_length);
7413                }
7414        }
7415        return NULL;
7416}
7417
7418/**
7419 * ipr_check_term_power - Check for term power errors
7420 * @ioa_cfg:    ioa config struct
7421 * @mode_pages: IOAFP mode pages buffer
7422 *
7423 * Check the IOAFP's mode page 28 for term power errors
7424 *
7425 * Return value:
7426 *      nothing
7427 **/
7428static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7429                                 struct ipr_mode_pages *mode_pages)
7430{
7431        int i;
7432        int entry_length;
7433        struct ipr_dev_bus_entry *bus;
7434        struct ipr_mode_page28 *mode_page;
7435
7436        mode_page = ipr_get_mode_page(mode_pages, 0x28,
7437                                      sizeof(struct ipr_mode_page28));
7438
7439        entry_length = mode_page->entry_length;
7440
7441        bus = mode_page->bus;
7442
7443        for (i = 0; i < mode_page->num_entries; i++) {
7444                if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7445                        dev_err(&ioa_cfg->pdev->dev,
7446                                "Term power is absent on scsi bus %d\n",
7447                                bus->res_addr.bus);
7448                }
7449
7450                bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7451        }
7452}
7453
7454/**
7455 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7456 * @ioa_cfg:    ioa config struct
7457 *
7458 * Looks through the config table checking for SES devices. If
7459 * the SES device is in the SES table indicating a maximum SCSI
7460 * bus speed, the speed is limited for the bus.
7461 *
7462 * Return value:
7463 *      none
7464 **/
7465static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7466{
7467        u32 max_xfer_rate;
7468        int i;
7469
7470        for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7471                max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7472                                                       ioa_cfg->bus_attr[i].bus_width);
7473
7474                if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7475                        ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7476        }
7477}
7478
7479/**
7480 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7481 * @ioa_cfg:    ioa config struct
7482 * @mode_pages: mode page 28 buffer
7483 *
7484 * Updates mode page 28 based on driver configuration
7485 *
7486 * Return value:
7487 *      none
7488 **/
7489static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7490                                          struct ipr_mode_pages *mode_pages)
7491{
7492        int i, entry_length;
7493        struct ipr_dev_bus_entry *bus;
7494        struct ipr_bus_attributes *bus_attr;
7495        struct ipr_mode_page28 *mode_page;
7496
7497        mode_page = ipr_get_mode_page(mode_pages, 0x28,
7498                                      sizeof(struct ipr_mode_page28));
7499
7500        entry_length = mode_page->entry_length;
7501
7502        /* Loop for each device bus entry */
7503        for (i = 0, bus = mode_page->bus;
7504             i < mode_page->num_entries;
7505             i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7506                if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7507                        dev_err(&ioa_cfg->pdev->dev,
7508                                "Invalid resource address reported: 0x%08X\n",
7509                                IPR_GET_PHYS_LOC(bus->res_addr));
7510                        continue;
7511                }
7512
7513                bus_attr = &ioa_cfg->bus_attr[i];
7514                bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7515                bus->bus_width = bus_attr->bus_width;
7516                bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7517                bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7518                if (bus_attr->qas_enabled)
7519                        bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7520                else
7521                        bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7522        }
7523}
7524
7525/**
7526 * ipr_build_mode_select - Build a mode select command
7527 * @ipr_cmd:    ipr command struct
7528 * @res_handle: resource handle to send command to
7529 * @parm:               Byte 2 of Mode Sense command
7530 * @dma_addr:   DMA buffer address
7531 * @xfer_len:   data transfer length
7532 *
7533 * Return value:
7534 *      none
7535 **/
7536static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7537                                  __be32 res_handle, u8 parm,
7538                                  dma_addr_t dma_addr, u8 xfer_len)
7539{
7540        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7541
7542        ioarcb->res_handle = res_handle;
7543        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7544        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7545        ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7546        ioarcb->cmd_pkt.cdb[1] = parm;
7547        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7548
7549        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7550}
7551
7552/**
7553 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7554 * @ipr_cmd:    ipr command struct
7555 *
7556 * This function sets up the SCSI bus attributes and sends
7557 * a Mode Select for Page 28 to activate them.
7558 *
7559 * Return value:
7560 *      IPR_RC_JOB_RETURN
7561 **/
7562static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7563{
7564        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7565        struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7566        int length;
7567
7568        ENTER;
7569        ipr_scsi_bus_speed_limit(ioa_cfg);
7570        ipr_check_term_power(ioa_cfg, mode_pages);
7571        ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7572        length = mode_pages->hdr.length + 1;
7573        mode_pages->hdr.length = 0;
7574
7575        ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7576                              ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7577                              length);
7578
7579        ipr_cmd->job_step = ipr_set_supported_devs;
7580        ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7581                                    struct ipr_resource_entry, queue);
7582        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7583
7584        LEAVE;
7585        return IPR_RC_JOB_RETURN;
7586}
7587
7588/**
7589 * ipr_build_mode_sense - Builds a mode sense command
7590 * @ipr_cmd:    ipr command struct
7591 * @res:                resource entry struct
7592 * @parm:               Byte 2 of mode sense command
7593 * @dma_addr:   DMA address of mode sense buffer
7594 * @xfer_len:   Size of DMA buffer
7595 *
7596 * Return value:
7597 *      none
7598 **/
7599static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7600                                 __be32 res_handle,
7601                                 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7602{
7603        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7604
7605        ioarcb->res_handle = res_handle;
7606        ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7607        ioarcb->cmd_pkt.cdb[2] = parm;
7608        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7609        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7610
7611        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7612}
7613
7614/**
7615 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7616 * @ipr_cmd:    ipr command struct
7617 *
7618 * This function handles the failure of an IOA bringup command.
7619 *
7620 * Return value:
7621 *      IPR_RC_JOB_RETURN
7622 **/
7623static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7624{
7625        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7626        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7627
7628        dev_err(&ioa_cfg->pdev->dev,
7629                "0x%02X failed with IOASC: 0x%08X\n",
7630                ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7631
7632        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7633        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7634        return IPR_RC_JOB_RETURN;
7635}
7636
7637/**
7638 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7639 * @ipr_cmd:    ipr command struct
7640 *
7641 * This function handles the failure of a Mode Sense to the IOAFP.
7642 * Some adapters do not handle all mode pages.
7643 *
7644 * Return value:
7645 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7646 **/
7647static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7648{
7649        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7650        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7651
7652        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7653                ipr_cmd->job_step = ipr_set_supported_devs;
7654                ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7655                                            struct ipr_resource_entry, queue);
7656                return IPR_RC_JOB_CONTINUE;
7657        }
7658
7659        return ipr_reset_cmd_failed(ipr_cmd);
7660}
7661
7662/**
7663 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7664 * @ipr_cmd:    ipr command struct
7665 *
7666 * This function send a Page 28 mode sense to the IOA to
7667 * retrieve SCSI bus attributes.
7668 *
7669 * Return value:
7670 *      IPR_RC_JOB_RETURN
7671 **/
7672static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7673{
7674        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7675
7676        ENTER;
7677        ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7678                             0x28, ioa_cfg->vpd_cbs_dma +
7679                             offsetof(struct ipr_misc_cbs, mode_pages),
7680                             sizeof(struct ipr_mode_pages));
7681
7682        ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7683        ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7684
7685        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7686
7687        LEAVE;
7688        return IPR_RC_JOB_RETURN;
7689}
7690
7691/**
7692 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7693 * @ipr_cmd:    ipr command struct
7694 *
7695 * This function enables dual IOA RAID support if possible.
7696 *
7697 * Return value:
7698 *      IPR_RC_JOB_RETURN
7699 **/
7700static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7701{
7702        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7703        struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7704        struct ipr_mode_page24 *mode_page;
7705        int length;
7706
7707        ENTER;
7708        mode_page = ipr_get_mode_page(mode_pages, 0x24,
7709                                      sizeof(struct ipr_mode_page24));
7710
7711        if (mode_page)
7712                mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7713
7714        length = mode_pages->hdr.length + 1;
7715        mode_pages->hdr.length = 0;
7716
7717        ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7718                              ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7719                              length);
7720
7721        ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7722        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7723
7724        LEAVE;
7725        return IPR_RC_JOB_RETURN;
7726}
7727
7728/**
7729 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7730 * @ipr_cmd:    ipr command struct
7731 *
7732 * This function handles the failure of a Mode Sense to the IOAFP.
7733 * Some adapters do not handle all mode pages.
7734 *
7735 * Return value:
7736 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7737 **/
7738static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7739{
7740        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7741
7742        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7743                ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7744                return IPR_RC_JOB_CONTINUE;
7745        }
7746
7747        return ipr_reset_cmd_failed(ipr_cmd);
7748}
7749
7750/**
7751 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7752 * @ipr_cmd:    ipr command struct
7753 *
7754 * This function send a mode sense to the IOA to retrieve
7755 * the IOA Advanced Function Control mode page.
7756 *
7757 * Return value:
7758 *      IPR_RC_JOB_RETURN
7759 **/
7760static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7761{
7762        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7763
7764        ENTER;
7765        ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7766                             0x24, ioa_cfg->vpd_cbs_dma +
7767                             offsetof(struct ipr_misc_cbs, mode_pages),
7768                             sizeof(struct ipr_mode_pages));
7769
7770        ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7771        ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7772
7773        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7774
7775        LEAVE;
7776        return IPR_RC_JOB_RETURN;
7777}
7778
7779/**
7780 * ipr_init_res_table - Initialize the resource table
7781 * @ipr_cmd:    ipr command struct
7782 *
7783 * This function looks through the existing resource table, comparing
7784 * it with the config table. This function will take care of old/new
7785 * devices and schedule adding/removing them from the mid-layer
7786 * as appropriate.
7787 *
7788 * Return value:
7789 *      IPR_RC_JOB_CONTINUE
7790 **/
7791static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7792{
7793        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7794        struct ipr_resource_entry *res, *temp;
7795        struct ipr_config_table_entry_wrapper cfgtew;
7796        int entries, found, flag, i;
7797        LIST_HEAD(old_res);
7798
7799        ENTER;
7800        if (ioa_cfg->sis64)
7801                flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7802        else
7803                flag = ioa_cfg->u.cfg_table->hdr.flags;
7804
7805        if (flag & IPR_UCODE_DOWNLOAD_REQ)
7806                dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7807
7808        list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7809                list_move_tail(&res->queue, &old_res);
7810
7811        if (ioa_cfg->sis64)
7812                entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7813        else
7814                entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7815
7816        for (i = 0; i < entries; i++) {
7817                if (ioa_cfg->sis64)
7818                        cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7819                else
7820                        cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7821                found = 0;
7822
7823                list_for_each_entry_safe(res, temp, &old_res, queue) {
7824                        if (ipr_is_same_device(res, &cfgtew)) {
7825                                list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7826                                found = 1;
7827                                break;
7828                        }
7829                }
7830
7831                if (!found) {
7832                        if (list_empty(&ioa_cfg->free_res_q)) {
7833                                dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7834                                break;
7835                        }
7836
7837                        found = 1;
7838                        res = list_entry(ioa_cfg->free_res_q.next,
7839                                         struct ipr_resource_entry, queue);
7840                        list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7841                        ipr_init_res_entry(res, &cfgtew);
7842                        res->add_to_ml = 1;
7843                } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7844                        res->sdev->allow_restart = 1;
7845
7846                if (found)
7847                        ipr_update_res_entry(res, &cfgtew);
7848        }
7849
7850        list_for_each_entry_safe(res, temp, &old_res, queue) {
7851                if (res->sdev) {
7852                        res->del_from_ml = 1;
7853                        res->res_handle = IPR_INVALID_RES_HANDLE;
7854                        list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7855                }
7856        }
7857
7858        list_for_each_entry_safe(res, temp, &old_res, queue) {
7859                ipr_clear_res_target(res);
7860                list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7861        }
7862
7863        if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7864                ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7865        else
7866                ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7867
7868        LEAVE;
7869        return IPR_RC_JOB_CONTINUE;
7870}
7871
7872/**
7873 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7874 * @ipr_cmd:    ipr command struct
7875 *
7876 * This function sends a Query IOA Configuration command
7877 * to the adapter to retrieve the IOA configuration table.
7878 *
7879 * Return value:
7880 *      IPR_RC_JOB_RETURN
7881 **/
7882static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7883{
7884        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7885        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7886        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7887        struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7888
7889        ENTER;
7890        if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7891                ioa_cfg->dual_raid = 1;
7892        dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7893                 ucode_vpd->major_release, ucode_vpd->card_type,
7894                 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7895        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7896        ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7897
7898        ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7899        ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7900        ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7901        ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7902
7903        ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7904                       IPR_IOADL_FLAGS_READ_LAST);
7905
7906        ipr_cmd->job_step = ipr_init_res_table;
7907
7908        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7909
7910        LEAVE;
7911        return IPR_RC_JOB_RETURN;
7912}
7913
7914static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7915{
7916        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7917
7918        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7919                return IPR_RC_JOB_CONTINUE;
7920
7921        return ipr_reset_cmd_failed(ipr_cmd);
7922}
7923
7924static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7925                                         __be32 res_handle, u8 sa_code)
7926{
7927        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7928
7929        ioarcb->res_handle = res_handle;
7930        ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7931        ioarcb->cmd_pkt.cdb[1] = sa_code;
7932        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7933}
7934
7935/**
7936 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7937 * action
7938 *
7939 * Return value:
7940 *      none
7941 **/
7942static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7943{
7944        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7945        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7946        struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7947
7948        ENTER;
7949
7950        ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7951
7952        if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7953                ipr_build_ioa_service_action(ipr_cmd,
7954                                             cpu_to_be32(IPR_IOA_RES_HANDLE),
7955                                             IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7956
7957                ioarcb->cmd_pkt.cdb[2] = 0x40;
7958
7959                ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7960                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7961                           IPR_SET_SUP_DEVICE_TIMEOUT);
7962
7963                LEAVE;
7964                return IPR_RC_JOB_RETURN;
7965        }
7966
7967        LEAVE;
7968        return IPR_RC_JOB_CONTINUE;
7969}
7970
7971/**
7972 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7973 * @ipr_cmd:    ipr command struct
7974 *
7975 * This utility function sends an inquiry to the adapter.
7976 *
7977 * Return value:
7978 *      none
7979 **/
7980static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7981                              dma_addr_t dma_addr, u8 xfer_len)
7982{
7983        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7984
7985        ENTER;
7986        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7987        ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7988
7989        ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7990        ioarcb->cmd_pkt.cdb[1] = flags;
7991        ioarcb->cmd_pkt.cdb[2] = page;
7992        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7993
7994        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7995
7996        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7997        LEAVE;
7998}
7999
8000/**
8001 * ipr_inquiry_page_supported - Is the given inquiry page supported
8002 * @page0:              inquiry page 0 buffer
8003 * @page:               page code.
8004 *
8005 * This function determines if the specified inquiry page is supported.
8006 *
8007 * Return value:
8008 *      1 if page is supported / 0 if not
8009 **/
8010static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8011{
8012        int i;
8013
8014        for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8015                if (page0->page[i] == page)
8016                        return 1;
8017
8018        return 0;
8019}
8020
8021/**
8022 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8023 * @ipr_cmd:    ipr command struct
8024 *
8025 * This function sends a Page 0xC4 inquiry to the adapter
8026 * to retrieve software VPD information.
8027 *
8028 * Return value:
8029 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8030 **/
8031static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8032{
8033        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8034        struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8035        struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8036
8037        ENTER;
8038        ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8039        memset(pageC4, 0, sizeof(*pageC4));
8040
8041        if (ipr_inquiry_page_supported(page0, 0xC4)) {
8042                ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8043                                  (ioa_cfg->vpd_cbs_dma
8044                                   + offsetof(struct ipr_misc_cbs,
8045                                              pageC4_data)),
8046                                  sizeof(struct ipr_inquiry_pageC4));
8047                return IPR_RC_JOB_RETURN;
8048        }
8049
8050        LEAVE;
8051        return IPR_RC_JOB_CONTINUE;
8052}
8053
8054/**
8055 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8056 * @ipr_cmd:    ipr command struct
8057 *
8058 * This function sends a Page 0xD0 inquiry to the adapter
8059 * to retrieve adapter capabilities.
8060 *
8061 * Return value:
8062 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8063 **/
8064static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8065{
8066        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8067        struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8068        struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8069
8070        ENTER;
8071        ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8072        memset(cap, 0, sizeof(*cap));
8073
8074        if (ipr_inquiry_page_supported(page0, 0xD0)) {
8075                ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8076                                  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8077                                  sizeof(struct ipr_inquiry_cap));
8078                return IPR_RC_JOB_RETURN;
8079        }
8080
8081        LEAVE;
8082        return IPR_RC_JOB_CONTINUE;
8083}
8084
8085/**
8086 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8087 * @ipr_cmd:    ipr command struct
8088 *
8089 * This function sends a Page 3 inquiry to the adapter
8090 * to retrieve software VPD information.
8091 *
8092 * Return value:
8093 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8094 **/
8095static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8096{
8097        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8098
8099        ENTER;
8100
8101        ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8102
8103        ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8104                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8105                          sizeof(struct ipr_inquiry_page3));
8106
8107        LEAVE;
8108        return IPR_RC_JOB_RETURN;
8109}
8110
8111/**
8112 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8113 * @ipr_cmd:    ipr command struct
8114 *
8115 * This function sends a Page 0 inquiry to the adapter
8116 * to retrieve supported inquiry pages.
8117 *
8118 * Return value:
8119 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8120 **/
8121static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8122{
8123        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8124        char type[5];
8125
8126        ENTER;
8127
8128        /* Grab the type out of the VPD and store it away */
8129        memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8130        type[4] = '\0';
8131        ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8132
8133        if (ipr_invalid_adapter(ioa_cfg)) {
8134                dev_err(&ioa_cfg->pdev->dev,
8135                        "Adapter not supported in this hardware configuration.\n");
8136
8137                if (!ipr_testmode) {
8138                        ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8139                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8140                        list_add_tail(&ipr_cmd->queue,
8141                                        &ioa_cfg->hrrq->hrrq_free_q);
8142                        return IPR_RC_JOB_RETURN;
8143                }
8144        }
8145
8146        ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8147
8148        ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8149                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8150                          sizeof(struct ipr_inquiry_page0));
8151
8152        LEAVE;
8153        return IPR_RC_JOB_RETURN;
8154}
8155
8156/**
8157 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8158 * @ipr_cmd:    ipr command struct
8159 *
8160 * This function sends a standard inquiry to the adapter.
8161 *
8162 * Return value:
8163 *      IPR_RC_JOB_RETURN
8164 **/
8165static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8166{
8167        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8168
8169        ENTER;
8170        ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8171
8172        ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8173                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8174                          sizeof(struct ipr_ioa_vpd));
8175
8176        LEAVE;
8177        return IPR_RC_JOB_RETURN;
8178}
8179
8180/**
8181 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8182 * @ipr_cmd:    ipr command struct
8183 *
8184 * This function send an Identify Host Request Response Queue
8185 * command to establish the HRRQ with the adapter.
8186 *
8187 * Return value:
8188 *      IPR_RC_JOB_RETURN
8189 **/
8190static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8191{
8192        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8193        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8194        struct ipr_hrr_queue *hrrq;
8195
8196        ENTER;
8197        ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8198        if (ioa_cfg->identify_hrrq_index == 0)
8199                dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8200
8201        if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8202                hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8203
8204                ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8205                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8206
8207                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8208                if (ioa_cfg->sis64)
8209                        ioarcb->cmd_pkt.cdb[1] = 0x1;
8210
8211                if (ioa_cfg->nvectors == 1)
8212                        ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8213                else
8214                        ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8215
8216                ioarcb->cmd_pkt.cdb[2] =
8217                        ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8218                ioarcb->cmd_pkt.cdb[3] =
8219                        ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8220                ioarcb->cmd_pkt.cdb[4] =
8221                        ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8222                ioarcb->cmd_pkt.cdb[5] =
8223                        ((u64) hrrq->host_rrq_dma) & 0xff;
8224                ioarcb->cmd_pkt.cdb[7] =
8225                        ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8226                ioarcb->cmd_pkt.cdb[8] =
8227                        (sizeof(u32) * hrrq->size) & 0xff;
8228
8229                if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8230                        ioarcb->cmd_pkt.cdb[9] =
8231                                        ioa_cfg->identify_hrrq_index;
8232
8233                if (ioa_cfg->sis64) {
8234                        ioarcb->cmd_pkt.cdb[10] =
8235                                ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8236                        ioarcb->cmd_pkt.cdb[11] =
8237                                ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8238                        ioarcb->cmd_pkt.cdb[12] =
8239                                ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8240                        ioarcb->cmd_pkt.cdb[13] =
8241                                ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8242                }
8243
8244                if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8245                        ioarcb->cmd_pkt.cdb[14] =
8246                                        ioa_cfg->identify_hrrq_index;
8247
8248                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8249                           IPR_INTERNAL_TIMEOUT);
8250
8251                if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8252                        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8253
8254                LEAVE;
8255                return IPR_RC_JOB_RETURN;
8256        }
8257
8258        LEAVE;
8259        return IPR_RC_JOB_CONTINUE;
8260}
8261
8262/**
8263 * ipr_reset_timer_done - Adapter reset timer function
8264 * @ipr_cmd:    ipr command struct
8265 *
8266 * Description: This function is used in adapter reset processing
8267 * for timing events. If the reset_cmd pointer in the IOA
8268 * config struct is not this adapter's we are doing nested
8269 * resets and fail_all_ops will take care of freeing the
8270 * command block.
8271 *
8272 * Return value:
8273 *      none
8274 **/
8275static void ipr_reset_timer_done(struct timer_list *t)
8276{
8277        struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8278        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8279        unsigned long lock_flags = 0;
8280
8281        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8282
8283        if (ioa_cfg->reset_cmd == ipr_cmd) {
8284                list_del(&ipr_cmd->queue);
8285                ipr_cmd->done(ipr_cmd);
8286        }
8287
8288        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8289}
8290
8291/**
8292 * ipr_reset_start_timer - Start a timer for adapter reset job
8293 * @ipr_cmd:    ipr command struct
8294 * @timeout:    timeout value
8295 *
8296 * Description: This function is used in adapter reset processing
8297 * for timing events. If the reset_cmd pointer in the IOA
8298 * config struct is not this adapter's we are doing nested
8299 * resets and fail_all_ops will take care of freeing the
8300 * command block.
8301 *
8302 * Return value:
8303 *      none
8304 **/
8305static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8306                                  unsigned long timeout)
8307{
8308
8309        ENTER;
8310        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8311        ipr_cmd->done = ipr_reset_ioa_job;
8312
8313        ipr_cmd->timer.expires = jiffies + timeout;
8314        ipr_cmd->timer.function = ipr_reset_timer_done;
8315        add_timer(&ipr_cmd->timer);
8316}
8317
8318/**
8319 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8320 * @ioa_cfg:    ioa cfg struct
8321 *
8322 * Return value:
8323 *      nothing
8324 **/
8325static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8326{
8327        struct ipr_hrr_queue *hrrq;
8328
8329        for_each_hrrq(hrrq, ioa_cfg) {
8330                spin_lock(&hrrq->_lock);
8331                memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8332
8333                /* Initialize Host RRQ pointers */
8334                hrrq->hrrq_start = hrrq->host_rrq;
8335                hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8336                hrrq->hrrq_curr = hrrq->hrrq_start;
8337                hrrq->toggle_bit = 1;
8338                spin_unlock(&hrrq->_lock);
8339        }
8340        wmb();
8341
8342        ioa_cfg->identify_hrrq_index = 0;
8343        if (ioa_cfg->hrrq_num == 1)
8344                atomic_set(&ioa_cfg->hrrq_index, 0);
8345        else
8346                atomic_set(&ioa_cfg->hrrq_index, 1);
8347
8348        /* Zero out config table */
8349        memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8350}
8351
8352/**
8353 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8354 * @ipr_cmd:    ipr command struct
8355 *
8356 * Return value:
8357 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8358 **/
8359static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8360{
8361        unsigned long stage, stage_time;
8362        u32 feedback;
8363        volatile u32 int_reg;
8364        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8365        u64 maskval = 0;
8366
8367        feedback = readl(ioa_cfg->regs.init_feedback_reg);
8368        stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8369        stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8370
8371        ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8372
8373        /* sanity check the stage_time value */
8374        if (stage_time == 0)
8375                stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8376        else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8377                stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8378        else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8379                stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8380
8381        if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8382                writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8383                int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8384                stage_time = ioa_cfg->transop_timeout;
8385                ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8386        } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8387                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8388                if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8389                        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8390                        maskval = IPR_PCII_IPL_STAGE_CHANGE;
8391                        maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8392                        writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8393                        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8394                        return IPR_RC_JOB_CONTINUE;
8395                }
8396        }
8397
8398        ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8399        ipr_cmd->timer.function = ipr_oper_timeout;
8400        ipr_cmd->done = ipr_reset_ioa_job;
8401        add_timer(&ipr_cmd->timer);
8402
8403        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8404
8405        return IPR_RC_JOB_RETURN;
8406}
8407
8408/**
8409 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8410 * @ipr_cmd:    ipr command struct
8411 *
8412 * This function reinitializes some control blocks and
8413 * enables destructive diagnostics on the adapter.
8414 *
8415 * Return value:
8416 *      IPR_RC_JOB_RETURN
8417 **/
8418static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8419{
8420        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8421        volatile u32 int_reg;
8422        volatile u64 maskval;
8423        int i;
8424
8425        ENTER;
8426        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8427        ipr_init_ioa_mem(ioa_cfg);
8428
8429        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8430                spin_lock(&ioa_cfg->hrrq[i]._lock);
8431                ioa_cfg->hrrq[i].allow_interrupts = 1;
8432                spin_unlock(&ioa_cfg->hrrq[i]._lock);
8433        }
8434        if (ioa_cfg->sis64) {
8435                /* Set the adapter to the correct endian mode. */
8436                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8437                int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8438        }
8439
8440        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8441
8442        if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8443                writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8444                       ioa_cfg->regs.clr_interrupt_mask_reg32);
8445                int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8446                return IPR_RC_JOB_CONTINUE;
8447        }
8448
8449        /* Enable destructive diagnostics on IOA */
8450        writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8451
8452        if (ioa_cfg->sis64) {
8453                maskval = IPR_PCII_IPL_STAGE_CHANGE;
8454                maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8455                writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8456        } else
8457                writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8458
8459        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8460
8461        dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8462
8463        if (ioa_cfg->sis64) {
8464                ipr_cmd->job_step = ipr_reset_next_stage;
8465                return IPR_RC_JOB_CONTINUE;
8466        }
8467
8468        ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8469        ipr_cmd->timer.function = ipr_oper_timeout;
8470        ipr_cmd->done = ipr_reset_ioa_job;
8471        add_timer(&ipr_cmd->timer);
8472        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8473
8474        LEAVE;
8475        return IPR_RC_JOB_RETURN;
8476}
8477
8478/**
8479 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8480 * @ipr_cmd:    ipr command struct
8481 *
8482 * This function is invoked when an adapter dump has run out
8483 * of processing time.
8484 *
8485 * Return value:
8486 *      IPR_RC_JOB_CONTINUE
8487 **/
8488static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8489{
8490        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8491
8492        if (ioa_cfg->sdt_state == GET_DUMP)
8493                ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8494        else if (ioa_cfg->sdt_state == READ_DUMP)
8495                ioa_cfg->sdt_state = ABORT_DUMP;
8496
8497        ioa_cfg->dump_timeout = 1;
8498        ipr_cmd->job_step = ipr_reset_alert;
8499
8500        return IPR_RC_JOB_CONTINUE;
8501}
8502
8503/**
8504 * ipr_unit_check_no_data - Log a unit check/no data error log
8505 * @ioa_cfg:            ioa config struct
8506 *
8507 * Logs an error indicating the adapter unit checked, but for some
8508 * reason, we were unable to fetch the unit check buffer.
8509 *
8510 * Return value:
8511 *      nothing
8512 **/
8513static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8514{
8515        ioa_cfg->errors_logged++;
8516        dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8517}
8518
8519/**
8520 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8521 * @ioa_cfg:            ioa config struct
8522 *
8523 * Fetches the unit check buffer from the adapter by clocking the data
8524 * through the mailbox register.
8525 *
8526 * Return value:
8527 *      nothing
8528 **/
8529static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8530{
8531        unsigned long mailbox;
8532        struct ipr_hostrcb *hostrcb;
8533        struct ipr_uc_sdt sdt;
8534        int rc, length;
8535        u32 ioasc;
8536
8537        mailbox = readl(ioa_cfg->ioa_mailbox);
8538
8539        if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8540                ipr_unit_check_no_data(ioa_cfg);
8541                return;
8542        }
8543
8544        memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8545        rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8546                                        (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8547
8548        if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8549            ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8550            (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8551                ipr_unit_check_no_data(ioa_cfg);
8552                return;
8553        }
8554
8555        /* Find length of the first sdt entry (UC buffer) */
8556        if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8557                length = be32_to_cpu(sdt.entry[0].end_token);
8558        else
8559                length = (be32_to_cpu(sdt.entry[0].end_token) -
8560                          be32_to_cpu(sdt.entry[0].start_token)) &
8561                          IPR_FMT2_MBX_ADDR_MASK;
8562
8563        hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8564                             struct ipr_hostrcb, queue);
8565        list_del_init(&hostrcb->queue);
8566        memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8567
8568        rc = ipr_get_ldump_data_section(ioa_cfg,
8569                                        be32_to_cpu(sdt.entry[0].start_token),
8570                                        (__be32 *)&hostrcb->hcam,
8571                                        min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8572
8573        if (!rc) {
8574                ipr_handle_log_data(ioa_cfg, hostrcb);
8575                ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8576                if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8577                    ioa_cfg->sdt_state == GET_DUMP)
8578                        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8579        } else
8580                ipr_unit_check_no_data(ioa_cfg);
8581
8582        list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8583}
8584
8585/**
8586 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8587 * @ipr_cmd:    ipr command struct
8588 *
8589 * Description: This function will call to get the unit check buffer.
8590 *
8591 * Return value:
8592 *      IPR_RC_JOB_RETURN
8593 **/
8594static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8595{
8596        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8597
8598        ENTER;
8599        ioa_cfg->ioa_unit_checked = 0;
8600        ipr_get_unit_check_buffer(ioa_cfg);
8601        ipr_cmd->job_step = ipr_reset_alert;
8602        ipr_reset_start_timer(ipr_cmd, 0);
8603
8604        LEAVE;
8605        return IPR_RC_JOB_RETURN;
8606}
8607
8608static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8609{
8610        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8611
8612        ENTER;
8613
8614        if (ioa_cfg->sdt_state != GET_DUMP)
8615                return IPR_RC_JOB_RETURN;
8616
8617        if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8618            (readl(ioa_cfg->regs.sense_interrupt_reg) &
8619             IPR_PCII_MAILBOX_STABLE)) {
8620
8621                if (!ipr_cmd->u.time_left)
8622                        dev_err(&ioa_cfg->pdev->dev,
8623                                "Timed out waiting for Mailbox register.\n");
8624
8625                ioa_cfg->sdt_state = READ_DUMP;
8626                ioa_cfg->dump_timeout = 0;
8627                if (ioa_cfg->sis64)
8628                        ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8629                else
8630                        ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8631                ipr_cmd->job_step = ipr_reset_wait_for_dump;
8632                schedule_work(&ioa_cfg->work_q);
8633
8634        } else {
8635                ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8636                ipr_reset_start_timer(ipr_cmd,
8637                                      IPR_CHECK_FOR_RESET_TIMEOUT);
8638        }
8639
8640        LEAVE;
8641        return IPR_RC_JOB_RETURN;
8642}
8643
8644/**
8645 * ipr_reset_restore_cfg_space - Restore PCI config space.
8646 * @ipr_cmd:    ipr command struct
8647 *
8648 * Description: This function restores the saved PCI config space of
8649 * the adapter, fails all outstanding ops back to the callers, and
8650 * fetches the dump/unit check if applicable to this reset.
8651 *
8652 * Return value:
8653 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8654 **/
8655static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8656{
8657        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8658        u32 int_reg;
8659
8660        ENTER;
8661        ioa_cfg->pdev->state_saved = true;
8662        pci_restore_state(ioa_cfg->pdev);
8663
8664        if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8665                ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8666                return IPR_RC_JOB_CONTINUE;
8667        }
8668
8669        ipr_fail_all_ops(ioa_cfg);
8670
8671        if (ioa_cfg->sis64) {
8672                /* Set the adapter to the correct endian mode. */
8673                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8674                int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8675        }
8676
8677        if (ioa_cfg->ioa_unit_checked) {
8678                if (ioa_cfg->sis64) {
8679                        ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8680                        ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8681                        return IPR_RC_JOB_RETURN;
8682                } else {
8683                        ioa_cfg->ioa_unit_checked = 0;
8684                        ipr_get_unit_check_buffer(ioa_cfg);
8685                        ipr_cmd->job_step = ipr_reset_alert;
8686                        ipr_reset_start_timer(ipr_cmd, 0);
8687                        return IPR_RC_JOB_RETURN;
8688                }
8689        }
8690
8691        if (ioa_cfg->in_ioa_bringdown) {
8692                ipr_cmd->job_step = ipr_ioa_bringdown_done;
8693        } else if (ioa_cfg->sdt_state == GET_DUMP) {
8694                ipr_cmd->job_step = ipr_dump_mailbox_wait;
8695                ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8696        } else {
8697                ipr_cmd->job_step = ipr_reset_enable_ioa;
8698        }
8699
8700        LEAVE;
8701        return IPR_RC_JOB_CONTINUE;
8702}
8703
8704/**
8705 * ipr_reset_bist_done - BIST has completed on the adapter.
8706 * @ipr_cmd:    ipr command struct
8707 *
8708 * Description: Unblock config space and resume the reset process.
8709 *
8710 * Return value:
8711 *      IPR_RC_JOB_CONTINUE
8712 **/
8713static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8714{
8715        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8716
8717        ENTER;
8718        if (ioa_cfg->cfg_locked)
8719                pci_cfg_access_unlock(ioa_cfg->pdev);
8720        ioa_cfg->cfg_locked = 0;
8721        ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8722        LEAVE;
8723        return IPR_RC_JOB_CONTINUE;
8724}
8725
8726/**
8727 * ipr_reset_start_bist - Run BIST on the adapter.
8728 * @ipr_cmd:    ipr command struct
8729 *
8730 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8731 *
8732 * Return value:
8733 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8734 **/
8735static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8736{
8737        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8738        int rc = PCIBIOS_SUCCESSFUL;
8739
8740        ENTER;
8741        if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8742                writel(IPR_UPROCI_SIS64_START_BIST,
8743                       ioa_cfg->regs.set_uproc_interrupt_reg32);
8744        else
8745                rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8746
8747        if (rc == PCIBIOS_SUCCESSFUL) {
8748                ipr_cmd->job_step = ipr_reset_bist_done;
8749                ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8750                rc = IPR_RC_JOB_RETURN;
8751        } else {
8752                if (ioa_cfg->cfg_locked)
8753                        pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8754                ioa_cfg->cfg_locked = 0;
8755                ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8756                rc = IPR_RC_JOB_CONTINUE;
8757        }
8758
8759        LEAVE;
8760        return rc;
8761}
8762
8763/**
8764 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8765 * @ipr_cmd:    ipr command struct
8766 *
8767 * Description: This clears PCI reset to the adapter and delays two seconds.
8768 *
8769 * Return value:
8770 *      IPR_RC_JOB_RETURN
8771 **/
8772static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8773{
8774        ENTER;
8775        ipr_cmd->job_step = ipr_reset_bist_done;
8776        ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8777        LEAVE;
8778        return IPR_RC_JOB_RETURN;
8779}
8780
8781/**
8782 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8783 * @work:       work struct
8784 *
8785 * Description: This pulses warm reset to a slot.
8786 *
8787 **/
8788static void ipr_reset_reset_work(struct work_struct *work)
8789{
8790        struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8791        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8792        struct pci_dev *pdev = ioa_cfg->pdev;
8793        unsigned long lock_flags = 0;
8794
8795        ENTER;
8796        pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8797        msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8798        pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8799
8800        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8801        if (ioa_cfg->reset_cmd == ipr_cmd)
8802                ipr_reset_ioa_job(ipr_cmd);
8803        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8804        LEAVE;
8805}
8806
8807/**
8808 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8809 * @ipr_cmd:    ipr command struct
8810 *
8811 * Description: This asserts PCI reset to the adapter.
8812 *
8813 * Return value:
8814 *      IPR_RC_JOB_RETURN
8815 **/
8816static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8817{
8818        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8819
8820        ENTER;
8821        INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8822        queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8823        ipr_cmd->job_step = ipr_reset_slot_reset_done;
8824        LEAVE;
8825        return IPR_RC_JOB_RETURN;
8826}
8827
8828/**
8829 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8830 * @ipr_cmd:    ipr command struct
8831 *
8832 * Description: This attempts to block config access to the IOA.
8833 *
8834 * Return value:
8835 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8836 **/
8837static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8838{
8839        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8840        int rc = IPR_RC_JOB_CONTINUE;
8841
8842        if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8843                ioa_cfg->cfg_locked = 1;
8844                ipr_cmd->job_step = ioa_cfg->reset;
8845        } else {
8846                if (ipr_cmd->u.time_left) {
8847                        rc = IPR_RC_JOB_RETURN;
8848                        ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8849                        ipr_reset_start_timer(ipr_cmd,
8850                                              IPR_CHECK_FOR_RESET_TIMEOUT);
8851                } else {
8852                        ipr_cmd->job_step = ioa_cfg->reset;
8853                        dev_err(&ioa_cfg->pdev->dev,
8854                                "Timed out waiting to lock config access. Resetting anyway.\n");
8855                }
8856        }
8857
8858        return rc;
8859}
8860
8861/**
8862 * ipr_reset_block_config_access - Block config access to the IOA
8863 * @ipr_cmd:    ipr command struct
8864 *
8865 * Description: This attempts to block config access to the IOA
8866 *
8867 * Return value:
8868 *      IPR_RC_JOB_CONTINUE
8869 **/
8870static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8871{
8872        ipr_cmd->ioa_cfg->cfg_locked = 0;
8873        ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8874        ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8875        return IPR_RC_JOB_CONTINUE;
8876}
8877
8878/**
8879 * ipr_reset_allowed - Query whether or not IOA can be reset
8880 * @ioa_cfg:    ioa config struct
8881 *
8882 * Return value:
8883 *      0 if reset not allowed / non-zero if reset is allowed
8884 **/
8885static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8886{
8887        volatile u32 temp_reg;
8888
8889        temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8890        return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8891}
8892
8893/**
8894 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8895 * @ipr_cmd:    ipr command struct
8896 *
8897 * Description: This function waits for adapter permission to run BIST,
8898 * then runs BIST. If the adapter does not give permission after a
8899 * reasonable time, we will reset the adapter anyway. The impact of
8900 * resetting the adapter without warning the adapter is the risk of
8901 * losing the persistent error log on the adapter. If the adapter is
8902 * reset while it is writing to the flash on the adapter, the flash
8903 * segment will have bad ECC and be zeroed.
8904 *
8905 * Return value:
8906 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8907 **/
8908static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8909{
8910        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8911        int rc = IPR_RC_JOB_RETURN;
8912
8913        if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8914                ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8915                ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8916        } else {
8917                ipr_cmd->job_step = ipr_reset_block_config_access;
8918                rc = IPR_RC_JOB_CONTINUE;
8919        }
8920
8921        return rc;
8922}
8923
8924/**
8925 * ipr_reset_alert - Alert the adapter of a pending reset
8926 * @ipr_cmd:    ipr command struct
8927 *
8928 * Description: This function alerts the adapter that it will be reset.
8929 * If memory space is not currently enabled, proceed directly
8930 * to running BIST on the adapter. The timer must always be started
8931 * so we guarantee we do not run BIST from ipr_isr.
8932 *
8933 * Return value:
8934 *      IPR_RC_JOB_RETURN
8935 **/
8936static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8937{
8938        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8939        u16 cmd_reg;
8940        int rc;
8941
8942        ENTER;
8943        rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8944
8945        if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8946                ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8947                writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8948                ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8949        } else {
8950                ipr_cmd->job_step = ipr_reset_block_config_access;
8951        }
8952
8953        ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8954        ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8955
8956        LEAVE;
8957        return IPR_RC_JOB_RETURN;
8958}
8959
8960/**
8961 * ipr_reset_quiesce_done - Complete IOA disconnect
8962 * @ipr_cmd:    ipr command struct
8963 *
8964 * Description: Freeze the adapter to complete quiesce processing
8965 *
8966 * Return value:
8967 *      IPR_RC_JOB_CONTINUE
8968 **/
8969static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8970{
8971        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8972
8973        ENTER;
8974        ipr_cmd->job_step = ipr_ioa_bringdown_done;
8975        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8976        LEAVE;
8977        return IPR_RC_JOB_CONTINUE;
8978}
8979
8980/**
8981 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8982 * @ipr_cmd:    ipr command struct
8983 *
8984 * Description: Ensure nothing is outstanding to the IOA and
8985 *                      proceed with IOA disconnect. Otherwise reset the IOA.
8986 *
8987 * Return value:
8988 *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8989 **/
8990static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8991{
8992        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8993        struct ipr_cmnd *loop_cmd;
8994        struct ipr_hrr_queue *hrrq;
8995        int rc = IPR_RC_JOB_CONTINUE;
8996        int count = 0;
8997
8998        ENTER;
8999        ipr_cmd->job_step = ipr_reset_quiesce_done;
9000
9001        for_each_hrrq(hrrq, ioa_cfg) {
9002                spin_lock(&hrrq->_lock);
9003                list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9004                        count++;
9005                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9006                        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9007                        rc = IPR_RC_JOB_RETURN;
9008                        break;
9009                }
9010                spin_unlock(&hrrq->_lock);
9011
9012                if (count)
9013                        break;
9014        }
9015
9016        LEAVE;
9017        return rc;
9018}
9019
9020/**
9021 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9022 * @ipr_cmd:    ipr command struct
9023 *
9024 * Description: Cancel any oustanding HCAMs to the IOA.
9025 *
9026 * Return value:
9027 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9028 **/
9029static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9030{
9031        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9032        int rc = IPR_RC_JOB_CONTINUE;
9033        struct ipr_cmd_pkt *cmd_pkt;
9034        struct ipr_cmnd *hcam_cmd;
9035        struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9036
9037        ENTER;
9038        ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9039
9040        if (!hrrq->ioa_is_dead) {
9041                if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9042                        list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9043                                if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9044                                        continue;
9045
9046                                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9047                                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9048                                cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9049                                cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9050                                cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9051                                cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9052                                cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9053                                cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9054                                cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9055                                cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9056                                cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9057                                cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9058                                cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9059                                cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9060
9061                                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9062                                           IPR_CANCEL_TIMEOUT);
9063
9064                                rc = IPR_RC_JOB_RETURN;
9065                                ipr_cmd->job_step = ipr_reset_cancel_hcam;
9066                                break;
9067                        }
9068                }
9069        } else
9070                ipr_cmd->job_step = ipr_reset_alert;
9071
9072        LEAVE;
9073        return rc;
9074}
9075
9076/**
9077 * ipr_reset_ucode_download_done - Microcode download completion
9078 * @ipr_cmd:    ipr command struct
9079 *
9080 * Description: This function unmaps the microcode download buffer.
9081 *
9082 * Return value:
9083 *      IPR_RC_JOB_CONTINUE
9084 **/
9085static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9086{
9087        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9088        struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9089
9090        dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9091                     sglist->num_sg, DMA_TO_DEVICE);
9092
9093        ipr_cmd->job_step = ipr_reset_alert;
9094        return IPR_RC_JOB_CONTINUE;
9095}
9096
9097/**
9098 * ipr_reset_ucode_download - Download microcode to the adapter
9099 * @ipr_cmd:    ipr command struct
9100 *
9101 * Description: This function checks to see if it there is microcode
9102 * to download to the adapter. If there is, a download is performed.
9103 *
9104 * Return value:
9105 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9106 **/
9107static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9108{
9109        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9110        struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9111
9112        ENTER;
9113        ipr_cmd->job_step = ipr_reset_alert;
9114
9115        if (!sglist)
9116                return IPR_RC_JOB_CONTINUE;
9117
9118        ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9119        ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9120        ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9121        ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9122        ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9123        ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9124        ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9125
9126        if (ioa_cfg->sis64)
9127                ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9128        else
9129                ipr_build_ucode_ioadl(ipr_cmd, sglist);
9130        ipr_cmd->job_step = ipr_reset_ucode_download_done;
9131
9132        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9133                   IPR_WRITE_BUFFER_TIMEOUT);
9134
9135        LEAVE;
9136        return IPR_RC_JOB_RETURN;
9137}
9138
9139/**
9140 * ipr_reset_shutdown_ioa - Shutdown the adapter
9141 * @ipr_cmd:    ipr command struct
9142 *
9143 * Description: This function issues an adapter shutdown of the
9144 * specified type to the specified adapter as part of the
9145 * adapter reset job.
9146 *
9147 * Return value:
9148 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9149 **/
9150static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9151{
9152        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9153        enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9154        unsigned long timeout;
9155        int rc = IPR_RC_JOB_CONTINUE;
9156
9157        ENTER;
9158        if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9159                ipr_cmd->job_step = ipr_reset_cancel_hcam;
9160        else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9161                        !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9162                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9163                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9164                ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9165                ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9166
9167                if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9168                        timeout = IPR_SHUTDOWN_TIMEOUT;
9169                else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9170                        timeout = IPR_INTERNAL_TIMEOUT;
9171                else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9172                        timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9173                else
9174                        timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9175
9176                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9177
9178                rc = IPR_RC_JOB_RETURN;
9179                ipr_cmd->job_step = ipr_reset_ucode_download;
9180        } else
9181                ipr_cmd->job_step = ipr_reset_alert;
9182
9183        LEAVE;
9184        return rc;
9185}
9186
9187/**
9188 * ipr_reset_ioa_job - Adapter reset job
9189 * @ipr_cmd:    ipr command struct
9190 *
9191 * Description: This function is the job router for the adapter reset job.
9192 *
9193 * Return value:
9194 *      none
9195 **/
9196static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9197{
9198        u32 rc, ioasc;
9199        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9200
9201        do {
9202                ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9203
9204                if (ioa_cfg->reset_cmd != ipr_cmd) {
9205                        /*
9206                         * We are doing nested adapter resets and this is
9207                         * not the current reset job.
9208                         */
9209                        list_add_tail(&ipr_cmd->queue,
9210                                        &ipr_cmd->hrrq->hrrq_free_q);
9211                        return;
9212                }
9213
9214                if (IPR_IOASC_SENSE_KEY(ioasc)) {
9215                        rc = ipr_cmd->job_step_failed(ipr_cmd);
9216                        if (rc == IPR_RC_JOB_RETURN)
9217                                return;
9218                }
9219
9220                ipr_reinit_ipr_cmnd(ipr_cmd);
9221                ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9222                rc = ipr_cmd->job_step(ipr_cmd);
9223        } while (rc == IPR_RC_JOB_CONTINUE);
9224}
9225
9226/**
9227 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9228 * @ioa_cfg:            ioa config struct
9229 * @job_step:           first job step of reset job
9230 * @shutdown_type:      shutdown type
9231 *
9232 * Description: This function will initiate the reset of the given adapter
9233 * starting at the selected job step.
9234 * If the caller needs to wait on the completion of the reset,
9235 * the caller must sleep on the reset_wait_q.
9236 *
9237 * Return value:
9238 *      none
9239 **/
9240static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9241                                    int (*job_step) (struct ipr_cmnd *),
9242                                    enum ipr_shutdown_type shutdown_type)
9243{
9244        struct ipr_cmnd *ipr_cmd;
9245        int i;
9246
9247        ioa_cfg->in_reset_reload = 1;
9248        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9249                spin_lock(&ioa_cfg->hrrq[i]._lock);
9250                ioa_cfg->hrrq[i].allow_cmds = 0;
9251                spin_unlock(&ioa_cfg->hrrq[i]._lock);
9252        }
9253        wmb();
9254        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9255                ioa_cfg->scsi_unblock = 0;
9256                ioa_cfg->scsi_blocked = 1;
9257                scsi_block_requests(ioa_cfg->host);
9258        }
9259
9260        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9261        ioa_cfg->reset_cmd = ipr_cmd;
9262        ipr_cmd->job_step = job_step;
9263        ipr_cmd->u.shutdown_type = shutdown_type;
9264
9265        ipr_reset_ioa_job(ipr_cmd);
9266}
9267
9268/**
9269 * ipr_initiate_ioa_reset - Initiate an adapter reset
9270 * @ioa_cfg:            ioa config struct
9271 * @shutdown_type:      shutdown type
9272 *
9273 * Description: This function will initiate the reset of the given adapter.
9274 * If the caller needs to wait on the completion of the reset,
9275 * the caller must sleep on the reset_wait_q.
9276 *
9277 * Return value:
9278 *      none
9279 **/
9280static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9281                                   enum ipr_shutdown_type shutdown_type)
9282{
9283        int i;
9284
9285        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9286                return;
9287
9288        if (ioa_cfg->in_reset_reload) {
9289                if (ioa_cfg->sdt_state == GET_DUMP)
9290                        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9291                else if (ioa_cfg->sdt_state == READ_DUMP)
9292                        ioa_cfg->sdt_state = ABORT_DUMP;
9293        }
9294
9295        if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9296                dev_err(&ioa_cfg->pdev->dev,
9297                        "IOA taken offline - error recovery failed\n");
9298
9299                ioa_cfg->reset_retries = 0;
9300                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9301                        spin_lock(&ioa_cfg->hrrq[i]._lock);
9302                        ioa_cfg->hrrq[i].ioa_is_dead = 1;
9303                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
9304                }
9305                wmb();
9306
9307                if (ioa_cfg->in_ioa_bringdown) {
9308                        ioa_cfg->reset_cmd = NULL;
9309                        ioa_cfg->in_reset_reload = 0;
9310                        ipr_fail_all_ops(ioa_cfg);
9311                        wake_up_all(&ioa_cfg->reset_wait_q);
9312
9313                        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9314                                ioa_cfg->scsi_unblock = 1;
9315                                schedule_work(&ioa_cfg->work_q);
9316                        }
9317                        return;
9318                } else {
9319                        ioa_cfg->in_ioa_bringdown = 1;
9320                        shutdown_type = IPR_SHUTDOWN_NONE;
9321                }
9322        }
9323
9324        _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9325                                shutdown_type);
9326}
9327
9328/**
9329 * ipr_reset_freeze - Hold off all I/O activity
9330 * @ipr_cmd:    ipr command struct
9331 *
9332 * Description: If the PCI slot is frozen, hold off all I/O
9333 * activity; then, as soon as the slot is available again,
9334 * initiate an adapter reset.
9335 */
9336static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9337{
9338        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9339        int i;
9340
9341        /* Disallow new interrupts, avoid loop */
9342        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9343                spin_lock(&ioa_cfg->hrrq[i]._lock);
9344                ioa_cfg->hrrq[i].allow_interrupts = 0;
9345                spin_unlock(&ioa_cfg->hrrq[i]._lock);
9346        }
9347        wmb();
9348        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9349        ipr_cmd->done = ipr_reset_ioa_job;
9350        return IPR_RC_JOB_RETURN;
9351}
9352
9353/**
9354 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9355 * @pdev:       PCI device struct
9356 *
9357 * Description: This routine is called to tell us that the MMIO
9358 * access to the IOA has been restored
9359 */
9360static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9361{
9362        unsigned long flags = 0;
9363        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9364
9365        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9366        if (!ioa_cfg->probe_done)
9367                pci_save_state(pdev);
9368        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9369        return PCI_ERS_RESULT_NEED_RESET;
9370}
9371
9372/**
9373 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9374 * @pdev:       PCI device struct
9375 *
9376 * Description: This routine is called to tell us that the PCI bus
9377 * is down. Can't do anything here, except put the device driver
9378 * into a holding pattern, waiting for the PCI bus to come back.
9379 */
9380static void ipr_pci_frozen(struct pci_dev *pdev)
9381{
9382        unsigned long flags = 0;
9383        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9384
9385        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9386        if (ioa_cfg->probe_done)
9387                _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9388        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9389}
9390
9391/**
9392 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9393 * @pdev:       PCI device struct
9394 *
9395 * Description: This routine is called by the pci error recovery
9396 * code after the PCI slot has been reset, just before we
9397 * should resume normal operations.
9398 */
9399static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9400{
9401        unsigned long flags = 0;
9402        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9403
9404        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9405        if (ioa_cfg->probe_done) {
9406                if (ioa_cfg->needs_warm_reset)
9407                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9408                else
9409                        _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9410                                                IPR_SHUTDOWN_NONE);
9411        } else
9412                wake_up_all(&ioa_cfg->eeh_wait_q);
9413        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9414        return PCI_ERS_RESULT_RECOVERED;
9415}
9416
9417/**
9418 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9419 * @pdev:       PCI device struct
9420 *
9421 * Description: This routine is called when the PCI bus has
9422 * permanently failed.
9423 */
9424static void ipr_pci_perm_failure(struct pci_dev *pdev)
9425{
9426        unsigned long flags = 0;
9427        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9428        int i;
9429
9430        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9431        if (ioa_cfg->probe_done) {
9432                if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9433                        ioa_cfg->sdt_state = ABORT_DUMP;
9434                ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9435                ioa_cfg->in_ioa_bringdown = 1;
9436                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9437                        spin_lock(&ioa_cfg->hrrq[i]._lock);
9438                        ioa_cfg->hrrq[i].allow_cmds = 0;
9439                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
9440                }
9441                wmb();
9442                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9443        } else
9444                wake_up_all(&ioa_cfg->eeh_wait_q);
9445        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9446}
9447
9448/**
9449 * ipr_pci_error_detected - Called when a PCI error is detected.
9450 * @pdev:       PCI device struct
9451 * @state:      PCI channel state
9452 *
9453 * Description: Called when a PCI error is detected.
9454 *
9455 * Return value:
9456 *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9457 */
9458static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9459                                               pci_channel_state_t state)
9460{
9461        switch (state) {
9462        case pci_channel_io_frozen:
9463                ipr_pci_frozen(pdev);
9464                return PCI_ERS_RESULT_CAN_RECOVER;
9465        case pci_channel_io_perm_failure:
9466                ipr_pci_perm_failure(pdev);
9467                return PCI_ERS_RESULT_DISCONNECT;
9468                break;
9469        default:
9470                break;
9471        }
9472        return PCI_ERS_RESULT_NEED_RESET;
9473}
9474
9475/**
9476 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9477 * @ioa_cfg:    ioa cfg struct
9478 *
9479 * Description: This is the second phase of adapter initialization
9480 * This function takes care of initilizing the adapter to the point
9481 * where it can accept new commands.
9482
9483 * Return value:
9484 *      0 on success / -EIO on failure
9485 **/
9486static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9487{
9488        int rc = 0;
9489        unsigned long host_lock_flags = 0;
9490
9491        ENTER;
9492        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9493        dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9494        ioa_cfg->probe_done = 1;
9495        if (ioa_cfg->needs_hard_reset) {
9496                ioa_cfg->needs_hard_reset = 0;
9497                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9498        } else
9499                _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9500                                        IPR_SHUTDOWN_NONE);
9501        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9502
9503        LEAVE;
9504        return rc;
9505}
9506
9507/**
9508 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9509 * @ioa_cfg:    ioa config struct
9510 *
9511 * Return value:
9512 *      none
9513 **/
9514static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9515{
9516        int i;
9517
9518        if (ioa_cfg->ipr_cmnd_list) {
9519                for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9520                        if (ioa_cfg->ipr_cmnd_list[i])
9521                                dma_pool_free(ioa_cfg->ipr_cmd_pool,
9522                                              ioa_cfg->ipr_cmnd_list[i],
9523                                              ioa_cfg->ipr_cmnd_list_dma[i]);
9524
9525                        ioa_cfg->ipr_cmnd_list[i] = NULL;
9526                }
9527        }
9528
9529        if (ioa_cfg->ipr_cmd_pool)
9530                dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9531
9532        kfree(ioa_cfg->ipr_cmnd_list);
9533        kfree(ioa_cfg->ipr_cmnd_list_dma);
9534        ioa_cfg->ipr_cmnd_list = NULL;
9535        ioa_cfg->ipr_cmnd_list_dma = NULL;
9536        ioa_cfg->ipr_cmd_pool = NULL;
9537}
9538
9539/**
9540 * ipr_free_mem - Frees memory allocated for an adapter
9541 * @ioa_cfg:    ioa cfg struct
9542 *
9543 * Return value:
9544 *      nothing
9545 **/
9546static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9547{
9548        int i;
9549
9550        kfree(ioa_cfg->res_entries);
9551        dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9552                          ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9553        ipr_free_cmd_blks(ioa_cfg);
9554
9555        for (i = 0; i < ioa_cfg->hrrq_num; i++)
9556                dma_free_coherent(&ioa_cfg->pdev->dev,
9557                                  sizeof(u32) * ioa_cfg->hrrq[i].size,
9558                                  ioa_cfg->hrrq[i].host_rrq,
9559                                  ioa_cfg->hrrq[i].host_rrq_dma);
9560
9561        dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9562                          ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9563
9564        for (i = 0; i < IPR_MAX_HCAMS; i++) {
9565                dma_free_coherent(&ioa_cfg->pdev->dev,
9566                                  sizeof(struct ipr_hostrcb),
9567                                  ioa_cfg->hostrcb[i],
9568                                  ioa_cfg->hostrcb_dma[i]);
9569        }
9570
9571        ipr_free_dump(ioa_cfg);
9572        kfree(ioa_cfg->trace);
9573}
9574
9575/**
9576 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9577 * @ioa_cfg:    ipr cfg struct
9578 *
9579 * This function frees all allocated IRQs for the
9580 * specified adapter.
9581 *
9582 * Return value:
9583 *      none
9584 **/
9585static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9586{
9587        struct pci_dev *pdev = ioa_cfg->pdev;
9588        int i;
9589
9590        for (i = 0; i < ioa_cfg->nvectors; i++)
9591                free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9592        pci_free_irq_vectors(pdev);
9593}
9594
9595/**
9596 * ipr_free_all_resources - Free all allocated resources for an adapter.
9597 * @ipr_cmd:    ipr command struct
9598 *
9599 * This function frees all allocated resources for the
9600 * specified adapter.
9601 *
9602 * Return value:
9603 *      none
9604 **/
9605static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9606{
9607        struct pci_dev *pdev = ioa_cfg->pdev;
9608
9609        ENTER;
9610        ipr_free_irqs(ioa_cfg);
9611        if (ioa_cfg->reset_work_q)
9612                destroy_workqueue(ioa_cfg->reset_work_q);
9613        iounmap(ioa_cfg->hdw_dma_regs);
9614        pci_release_regions(pdev);
9615        ipr_free_mem(ioa_cfg);
9616        scsi_host_put(ioa_cfg->host);
9617        pci_disable_device(pdev);
9618        LEAVE;
9619}
9620
9621/**
9622 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9623 * @ioa_cfg:    ioa config struct
9624 *
9625 * Return value:
9626 *      0 on success / -ENOMEM on allocation failure
9627 **/
9628static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9629{
9630        struct ipr_cmnd *ipr_cmd;
9631        struct ipr_ioarcb *ioarcb;
9632        dma_addr_t dma_addr;
9633        int i, entries_each_hrrq, hrrq_id = 0;
9634
9635        ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9636                                                sizeof(struct ipr_cmnd), 512, 0);
9637
9638        if (!ioa_cfg->ipr_cmd_pool)
9639                return -ENOMEM;
9640
9641        ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9642        ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9643
9644        if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9645                ipr_free_cmd_blks(ioa_cfg);
9646                return -ENOMEM;
9647        }
9648
9649        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9650                if (ioa_cfg->hrrq_num > 1) {
9651                        if (i == 0) {
9652                                entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9653                                ioa_cfg->hrrq[i].min_cmd_id = 0;
9654                                ioa_cfg->hrrq[i].max_cmd_id =
9655                                        (entries_each_hrrq - 1);
9656                        } else {
9657                                entries_each_hrrq =
9658                                        IPR_NUM_BASE_CMD_BLKS/
9659                                        (ioa_cfg->hrrq_num - 1);
9660                                ioa_cfg->hrrq[i].min_cmd_id =
9661                                        IPR_NUM_INTERNAL_CMD_BLKS +
9662                                        (i - 1) * entries_each_hrrq;
9663                                ioa_cfg->hrrq[i].max_cmd_id =
9664                                        (IPR_NUM_INTERNAL_CMD_BLKS +
9665                                        i * entries_each_hrrq - 1);
9666                        }
9667                } else {
9668                        entries_each_hrrq = IPR_NUM_CMD_BLKS;
9669                        ioa_cfg->hrrq[i].min_cmd_id = 0;
9670                        ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9671                }
9672                ioa_cfg->hrrq[i].size = entries_each_hrrq;
9673        }
9674
9675        BUG_ON(ioa_cfg->hrrq_num == 0);
9676
9677        i = IPR_NUM_CMD_BLKS -
9678                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9679        if (i > 0) {
9680                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9681                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9682        }
9683
9684        for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9685                ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9686                                GFP_KERNEL, &dma_addr);
9687
9688                if (!ipr_cmd) {
9689                        ipr_free_cmd_blks(ioa_cfg);
9690                        return -ENOMEM;
9691                }
9692
9693                ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9694                ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9695
9696                ioarcb = &ipr_cmd->ioarcb;
9697                ipr_cmd->dma_addr = dma_addr;
9698                if (ioa_cfg->sis64)
9699                        ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9700                else
9701                        ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9702
9703                ioarcb->host_response_handle = cpu_to_be32(i << 2);
9704                if (ioa_cfg->sis64) {
9705                        ioarcb->u.sis64_addr_data.data_ioadl_addr =
9706                                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9707                        ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9708                                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9709                } else {
9710                        ioarcb->write_ioadl_addr =
9711                                cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9712                        ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9713                        ioarcb->ioasa_host_pci_addr =
9714                                cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9715                }
9716                ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9717                ipr_cmd->cmd_index = i;
9718                ipr_cmd->ioa_cfg = ioa_cfg;
9719                ipr_cmd->sense_buffer_dma = dma_addr +
9720                        offsetof(struct ipr_cmnd, sense_buffer);
9721
9722                ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9723                ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9724                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9725                if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9726                        hrrq_id++;
9727        }
9728
9729        return 0;
9730}
9731
9732/**
9733 * ipr_alloc_mem - Allocate memory for an adapter
9734 * @ioa_cfg:    ioa config struct
9735 *
9736 * Return value:
9737 *      0 on success / non-zero for error
9738 **/
9739static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9740{
9741        struct pci_dev *pdev = ioa_cfg->pdev;
9742        int i, rc = -ENOMEM;
9743
9744        ENTER;
9745        ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9746                                       sizeof(struct ipr_resource_entry),
9747                                       GFP_KERNEL);
9748
9749        if (!ioa_cfg->res_entries)
9750                goto out;
9751
9752        for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9753                list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9754                ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9755        }
9756
9757        ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9758                                              sizeof(struct ipr_misc_cbs),
9759                                              &ioa_cfg->vpd_cbs_dma,
9760                                              GFP_KERNEL);
9761
9762        if (!ioa_cfg->vpd_cbs)
9763                goto out_free_res_entries;
9764
9765        if (ipr_alloc_cmd_blks(ioa_cfg))
9766                goto out_free_vpd_cbs;
9767
9768        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9769                ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9770                                        sizeof(u32) * ioa_cfg->hrrq[i].size,
9771                                        &ioa_cfg->hrrq[i].host_rrq_dma,
9772                                        GFP_KERNEL);
9773
9774                if (!ioa_cfg->hrrq[i].host_rrq)  {
9775                        while (--i > 0)
9776                                dma_free_coherent(&pdev->dev,
9777                                        sizeof(u32) * ioa_cfg->hrrq[i].size,
9778                                        ioa_cfg->hrrq[i].host_rrq,
9779                                        ioa_cfg->hrrq[i].host_rrq_dma);
9780                        goto out_ipr_free_cmd_blocks;
9781                }
9782                ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9783        }
9784
9785        ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9786                                                  ioa_cfg->cfg_table_size,
9787                                                  &ioa_cfg->cfg_table_dma,
9788                                                  GFP_KERNEL);
9789
9790        if (!ioa_cfg->u.cfg_table)
9791                goto out_free_host_rrq;
9792
9793        for (i = 0; i < IPR_MAX_HCAMS; i++) {
9794                ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9795                                                         sizeof(struct ipr_hostrcb),
9796                                                         &ioa_cfg->hostrcb_dma[i],
9797                                                         GFP_KERNEL);
9798
9799                if (!ioa_cfg->hostrcb[i])
9800                        goto out_free_hostrcb_dma;
9801
9802                ioa_cfg->hostrcb[i]->hostrcb_dma =
9803                        ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9804                ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9805                list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9806        }
9807
9808        ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9809                                 sizeof(struct ipr_trace_entry),
9810                                 GFP_KERNEL);
9811
9812        if (!ioa_cfg->trace)
9813                goto out_free_hostrcb_dma;
9814
9815        rc = 0;
9816out:
9817        LEAVE;
9818        return rc;
9819
9820out_free_hostrcb_dma:
9821        while (i-- > 0) {
9822                dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9823                                  ioa_cfg->hostrcb[i],
9824                                  ioa_cfg->hostrcb_dma[i]);
9825        }
9826        dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9827                          ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9828out_free_host_rrq:
9829        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9830                dma_free_coherent(&pdev->dev,
9831                                  sizeof(u32) * ioa_cfg->hrrq[i].size,
9832                                  ioa_cfg->hrrq[i].host_rrq,
9833                                  ioa_cfg->hrrq[i].host_rrq_dma);
9834        }
9835out_ipr_free_cmd_blocks:
9836        ipr_free_cmd_blks(ioa_cfg);
9837out_free_vpd_cbs:
9838        dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9839                          ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9840out_free_res_entries:
9841        kfree(ioa_cfg->res_entries);
9842        goto out;
9843}
9844
9845/**
9846 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9847 * @ioa_cfg:    ioa config struct
9848 *
9849 * Return value:
9850 *      none
9851 **/
9852static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9853{
9854        int i;
9855
9856        for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9857                ioa_cfg->bus_attr[i].bus = i;
9858                ioa_cfg->bus_attr[i].qas_enabled = 0;
9859                ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9860                if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9861                        ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9862                else
9863                        ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9864        }
9865}
9866
9867/**
9868 * ipr_init_regs - Initialize IOA registers
9869 * @ioa_cfg:    ioa config struct
9870 *
9871 * Return value:
9872 *      none
9873 **/
9874static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9875{
9876        const struct ipr_interrupt_offsets *p;
9877        struct ipr_interrupts *t;
9878        void __iomem *base;
9879
9880        p = &ioa_cfg->chip_cfg->regs;
9881        t = &ioa_cfg->regs;
9882        base = ioa_cfg->hdw_dma_regs;
9883
9884        t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9885        t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9886        t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9887        t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9888        t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9889        t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9890        t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9891        t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9892        t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9893        t->ioarrin_reg = base + p->ioarrin_reg;
9894        t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9895        t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9896        t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9897        t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9898        t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9899        t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9900
9901        if (ioa_cfg->sis64) {
9902                t->init_feedback_reg = base + p->init_feedback_reg;
9903                t->dump_addr_reg = base + p->dump_addr_reg;
9904                t->dump_data_reg = base + p->dump_data_reg;
9905                t->endian_swap_reg = base + p->endian_swap_reg;
9906        }
9907}
9908
9909/**
9910 * ipr_init_ioa_cfg - Initialize IOA config struct
9911 * @ioa_cfg:    ioa config struct
9912 * @host:               scsi host struct
9913 * @pdev:               PCI dev struct
9914 *
9915 * Return value:
9916 *      none
9917 **/
9918static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9919                             struct Scsi_Host *host, struct pci_dev *pdev)
9920{
9921        int i;
9922
9923        ioa_cfg->host = host;
9924        ioa_cfg->pdev = pdev;
9925        ioa_cfg->log_level = ipr_log_level;
9926        ioa_cfg->doorbell = IPR_DOORBELL;
9927        sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9928        sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9929        sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9930        sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9931        sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9932        sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9933
9934        INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9935        INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9936        INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9937        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9938        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9939        INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9940        INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9941        init_waitqueue_head(&ioa_cfg->reset_wait_q);
9942        init_waitqueue_head(&ioa_cfg->msi_wait_q);
9943        init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9944        ioa_cfg->sdt_state = INACTIVE;
9945
9946        ipr_initialize_bus_attr(ioa_cfg);
9947        ioa_cfg->max_devs_supported = ipr_max_devs;
9948
9949        if (ioa_cfg->sis64) {
9950                host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9951                host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9952                if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9953                        ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9954                ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9955                                           + ((sizeof(struct ipr_config_table_entry64)
9956                                               * ioa_cfg->max_devs_supported)));
9957        } else {
9958                host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9959                host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9960                if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9961                        ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9962                ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9963                                           + ((sizeof(struct ipr_config_table_entry)
9964                                               * ioa_cfg->max_devs_supported)));
9965        }
9966
9967        host->max_channel = IPR_VSET_BUS;
9968        host->unique_id = host->host_no;
9969        host->max_cmd_len = IPR_MAX_CDB_LEN;
9970        host->can_queue = ioa_cfg->max_cmds;
9971        pci_set_drvdata(pdev, ioa_cfg);
9972
9973        for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9974                INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9975                INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9976                spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9977                if (i == 0)
9978                        ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9979                else
9980                        ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9981        }
9982}
9983
9984/**
9985 * ipr_get_chip_info - Find adapter chip information
9986 * @dev_id:             PCI device id struct
9987 *
9988 * Return value:
9989 *      ptr to chip information on success / NULL on failure
9990 **/
9991static const struct ipr_chip_t *
9992ipr_get_chip_info(const struct pci_device_id *dev_id)
9993{
9994        int i;
9995
9996        for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9997                if (ipr_chip[i].vendor == dev_id->vendor &&
9998                    ipr_chip[i].device == dev_id->device)
9999                        return &ipr_chip[i];
10000        return NULL;
10001}
10002
10003/**
10004 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10005 *                                              during probe time
10006 * @ioa_cfg:    ioa config struct
10007 *
10008 * Return value:
10009 *      None
10010 **/
10011static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10012{
10013        struct pci_dev *pdev = ioa_cfg->pdev;
10014
10015        if (pci_channel_offline(pdev)) {
10016                wait_event_timeout(ioa_cfg->eeh_wait_q,
10017                                   !pci_channel_offline(pdev),
10018                                   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10019                pci_restore_state(pdev);
10020        }
10021}
10022
10023static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10024{
10025        int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10026
10027        for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10028                snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10029                         "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10030                ioa_cfg->vectors_info[vec_idx].
10031                        desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10032        }
10033}
10034
10035static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10036                struct pci_dev *pdev)
10037{
10038        int i, rc;
10039
10040        for (i = 1; i < ioa_cfg->nvectors; i++) {
10041                rc = request_irq(pci_irq_vector(pdev, i),
10042                        ipr_isr_mhrrq,
10043                        0,
10044                        ioa_cfg->vectors_info[i].desc,
10045                        &ioa_cfg->hrrq[i]);
10046                if (rc) {
10047                        while (--i >= 0)
10048                                free_irq(pci_irq_vector(pdev, i),
10049                                        &ioa_cfg->hrrq[i]);
10050                        return rc;
10051                }
10052        }
10053        return 0;
10054}
10055
10056/**
10057 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10058 * @pdev:               PCI device struct
10059 *
10060 * Description: Simply set the msi_received flag to 1 indicating that
10061 * Message Signaled Interrupts are supported.
10062 *
10063 * Return value:
10064 *      0 on success / non-zero on failure
10065 **/
10066static irqreturn_t ipr_test_intr(int irq, void *devp)
10067{
10068        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10069        unsigned long lock_flags = 0;
10070        irqreturn_t rc = IRQ_HANDLED;
10071
10072        dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10073        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10074
10075        ioa_cfg->msi_received = 1;
10076        wake_up(&ioa_cfg->msi_wait_q);
10077
10078        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10079        return rc;
10080}
10081
10082/**
10083 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10084 * @pdev:               PCI device struct
10085 *
10086 * Description: This routine sets up and initiates a test interrupt to determine
10087 * if the interrupt is received via the ipr_test_intr() service routine.
10088 * If the tests fails, the driver will fall back to LSI.
10089 *
10090 * Return value:
10091 *      0 on success / non-zero on failure
10092 **/
10093static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10094{
10095        int rc;
10096        volatile u32 int_reg;
10097        unsigned long lock_flags = 0;
10098        int irq = pci_irq_vector(pdev, 0);
10099
10100        ENTER;
10101
10102        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10103        init_waitqueue_head(&ioa_cfg->msi_wait_q);
10104        ioa_cfg->msi_received = 0;
10105        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10106        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10107        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10108        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10109
10110        rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10111        if (rc) {
10112                dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10113                return rc;
10114        } else if (ipr_debug)
10115                dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10116
10117        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10118        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10119        wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10120        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10121        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10122
10123        if (!ioa_cfg->msi_received) {
10124                /* MSI test failed */
10125                dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10126                rc = -EOPNOTSUPP;
10127        } else if (ipr_debug)
10128                dev_info(&pdev->dev, "MSI test succeeded.\n");
10129
10130        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10131
10132        free_irq(irq, ioa_cfg);
10133
10134        LEAVE;
10135
10136        return rc;
10137}
10138
10139 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10140 * @pdev:               PCI device struct
10141 * @dev_id:             PCI device id struct
10142 *
10143 * Return value:
10144 *      0 on success / non-zero on failure
10145 **/
10146static int ipr_probe_ioa(struct pci_dev *pdev,
10147                         const struct pci_device_id *dev_id)
10148{
10149        struct ipr_ioa_cfg *ioa_cfg;
10150        struct Scsi_Host *host;
10151        unsigned long ipr_regs_pci;
10152        void __iomem *ipr_regs;
10153        int rc = PCIBIOS_SUCCESSFUL;
10154        volatile u32 mask, uproc, interrupts;
10155        unsigned long lock_flags, driver_lock_flags;
10156        unsigned int irq_flag;
10157
10158        ENTER;
10159
10160        dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10161        host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10162
10163        if (!host) {
10164                dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10165                rc = -ENOMEM;
10166                goto out;
10167        }
10168
10169        ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10170        memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10171        ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10172
10173        ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10174
10175        if (!ioa_cfg->ipr_chip) {
10176                dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10177                        dev_id->vendor, dev_id->device);
10178                goto out_scsi_host_put;
10179        }
10180
10181        /* set SIS 32 or SIS 64 */
10182        ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10183        ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10184        ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10185        ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10186
10187        if (ipr_transop_timeout)
10188                ioa_cfg->transop_timeout = ipr_transop_timeout;
10189        else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10190                ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10191        else
10192                ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10193
10194        ioa_cfg->revid = pdev->revision;
10195
10196        ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10197
10198        ipr_regs_pci = pci_resource_start(pdev, 0);
10199
10200        rc = pci_request_regions(pdev, IPR_NAME);
10201        if (rc < 0) {
10202                dev_err(&pdev->dev,
10203                        "Couldn't register memory range of registers\n");
10204                goto out_scsi_host_put;
10205        }
10206
10207        rc = pci_enable_device(pdev);
10208
10209        if (rc || pci_channel_offline(pdev)) {
10210                if (pci_channel_offline(pdev)) {
10211                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10212                        rc = pci_enable_device(pdev);
10213                }
10214
10215                if (rc) {
10216                        dev_err(&pdev->dev, "Cannot enable adapter\n");
10217                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10218                        goto out_release_regions;
10219                }
10220        }
10221
10222        ipr_regs = pci_ioremap_bar(pdev, 0);
10223
10224        if (!ipr_regs) {
10225                dev_err(&pdev->dev,
10226                        "Couldn't map memory range of registers\n");
10227                rc = -ENOMEM;
10228                goto out_disable;
10229        }
10230
10231        ioa_cfg->hdw_dma_regs = ipr_regs;
10232        ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10233        ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10234
10235        ipr_init_regs(ioa_cfg);
10236
10237        if (ioa_cfg->sis64) {
10238                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10239                if (rc < 0) {
10240                        dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10241                        rc = dma_set_mask_and_coherent(&pdev->dev,
10242                                                       DMA_BIT_MASK(32));
10243                }
10244        } else
10245                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10246
10247        if (rc < 0) {
10248                dev_err(&pdev->dev, "Failed to set DMA mask\n");
10249                goto cleanup_nomem;
10250        }
10251
10252        rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10253                                   ioa_cfg->chip_cfg->cache_line_size);
10254
10255        if (rc != PCIBIOS_SUCCESSFUL) {
10256                dev_err(&pdev->dev, "Write of cache line size failed\n");
10257                ipr_wait_for_pci_err_recovery(ioa_cfg);
10258                rc = -EIO;
10259                goto cleanup_nomem;
10260        }
10261
10262        /* Issue MMIO read to ensure card is not in EEH */
10263        interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10264        ipr_wait_for_pci_err_recovery(ioa_cfg);
10265
10266        if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10267                dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10268                        IPR_MAX_MSIX_VECTORS);
10269                ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10270        }
10271
10272        irq_flag = PCI_IRQ_LEGACY;
10273        if (ioa_cfg->ipr_chip->has_msi)
10274                irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10275        rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10276        if (rc < 0) {
10277                ipr_wait_for_pci_err_recovery(ioa_cfg);
10278                goto cleanup_nomem;
10279        }
10280        ioa_cfg->nvectors = rc;
10281
10282        if (!pdev->msi_enabled && !pdev->msix_enabled)
10283                ioa_cfg->clear_isr = 1;
10284
10285        pci_set_master(pdev);
10286
10287        if (pci_channel_offline(pdev)) {
10288                ipr_wait_for_pci_err_recovery(ioa_cfg);
10289                pci_set_master(pdev);
10290                if (pci_channel_offline(pdev)) {
10291                        rc = -EIO;
10292                        goto out_msi_disable;
10293                }
10294        }
10295
10296        if (pdev->msi_enabled || pdev->msix_enabled) {
10297                rc = ipr_test_msi(ioa_cfg, pdev);
10298                switch (rc) {
10299                case 0:
10300                        dev_info(&pdev->dev,
10301                                "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10302                                pdev->msix_enabled ? "-X" : "");
10303                        break;
10304                case -EOPNOTSUPP:
10305                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10306                        pci_free_irq_vectors(pdev);
10307
10308                        ioa_cfg->nvectors = 1;
10309                        ioa_cfg->clear_isr = 1;
10310                        break;
10311                default:
10312                        goto out_msi_disable;
10313                }
10314        }
10315
10316        ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10317                                (unsigned int)num_online_cpus(),
10318                                (unsigned int)IPR_MAX_HRRQ_NUM);
10319
10320        if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10321                goto out_msi_disable;
10322
10323        if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10324                goto out_msi_disable;
10325
10326        rc = ipr_alloc_mem(ioa_cfg);
10327        if (rc < 0) {
10328                dev_err(&pdev->dev,
10329                        "Couldn't allocate enough memory for device driver!\n");
10330                goto out_msi_disable;
10331        }
10332
10333        /* Save away PCI config space for use following IOA reset */
10334        rc = pci_save_state(pdev);
10335
10336        if (rc != PCIBIOS_SUCCESSFUL) {
10337                dev_err(&pdev->dev, "Failed to save PCI config space\n");
10338                rc = -EIO;
10339                goto cleanup_nolog;
10340        }
10341
10342        /*
10343         * If HRRQ updated interrupt is not masked, or reset alert is set,
10344         * the card is in an unknown state and needs a hard reset
10345         */
10346        mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10347        interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10348        uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10349        if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10350                ioa_cfg->needs_hard_reset = 1;
10351        if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10352                ioa_cfg->needs_hard_reset = 1;
10353        if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10354                ioa_cfg->ioa_unit_checked = 1;
10355
10356        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10357        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10358        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10359
10360        if (pdev->msi_enabled || pdev->msix_enabled) {
10361                name_msi_vectors(ioa_cfg);
10362                rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10363                        ioa_cfg->vectors_info[0].desc,
10364                        &ioa_cfg->hrrq[0]);
10365                if (!rc)
10366                        rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10367        } else {
10368                rc = request_irq(pdev->irq, ipr_isr,
10369                         IRQF_SHARED,
10370                         IPR_NAME, &ioa_cfg->hrrq[0]);
10371        }
10372        if (rc) {
10373                dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10374                        pdev->irq, rc);
10375                goto cleanup_nolog;
10376        }
10377
10378        if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10379            (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10380                ioa_cfg->needs_warm_reset = 1;
10381                ioa_cfg->reset = ipr_reset_slot_reset;
10382
10383                ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10384                                                                WQ_MEM_RECLAIM, host->host_no);
10385
10386                if (!ioa_cfg->reset_work_q) {
10387                        dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10388                        rc = -ENOMEM;
10389                        goto out_free_irq;
10390                }
10391        } else
10392                ioa_cfg->reset = ipr_reset_start_bist;
10393
10394        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10395        list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10396        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10397
10398        LEAVE;
10399out:
10400        return rc;
10401
10402out_free_irq:
10403        ipr_free_irqs(ioa_cfg);
10404cleanup_nolog:
10405        ipr_free_mem(ioa_cfg);
10406out_msi_disable:
10407        ipr_wait_for_pci_err_recovery(ioa_cfg);
10408        pci_free_irq_vectors(pdev);
10409cleanup_nomem:
10410        iounmap(ipr_regs);
10411out_disable:
10412        pci_disable_device(pdev);
10413out_release_regions:
10414        pci_release_regions(pdev);
10415out_scsi_host_put:
10416        scsi_host_put(host);
10417        goto out;
10418}
10419
10420/**
10421 * ipr_initiate_ioa_bringdown - Bring down an adapter
10422 * @ioa_cfg:            ioa config struct
10423 * @shutdown_type:      shutdown type
10424 *
10425 * Description: This function will initiate bringing down the adapter.
10426 * This consists of issuing an IOA shutdown to the adapter
10427 * to flush the cache, and running BIST.
10428 * If the caller needs to wait on the completion of the reset,
10429 * the caller must sleep on the reset_wait_q.
10430 *
10431 * Return value:
10432 *      none
10433 **/
10434static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10435                                       enum ipr_shutdown_type shutdown_type)
10436{
10437        ENTER;
10438        if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10439                ioa_cfg->sdt_state = ABORT_DUMP;
10440        ioa_cfg->reset_retries = 0;
10441        ioa_cfg->in_ioa_bringdown = 1;
10442        ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10443        LEAVE;
10444}
10445
10446/**
10447 * __ipr_remove - Remove a single adapter
10448 * @pdev:       pci device struct
10449 *
10450 * Adapter hot plug remove entry point.
10451 *
10452 * Return value:
10453 *      none
10454 **/
10455static void __ipr_remove(struct pci_dev *pdev)
10456{
10457        unsigned long host_lock_flags = 0;
10458        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10459        int i;
10460        unsigned long driver_lock_flags;
10461        ENTER;
10462
10463        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10464        while (ioa_cfg->in_reset_reload) {
10465                spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10466                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10467                spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10468        }
10469
10470        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10471                spin_lock(&ioa_cfg->hrrq[i]._lock);
10472                ioa_cfg->hrrq[i].removing_ioa = 1;
10473                spin_unlock(&ioa_cfg->hrrq[i]._lock);
10474        }
10475        wmb();
10476        ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10477
10478        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10479        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10480        flush_work(&ioa_cfg->work_q);
10481        if (ioa_cfg->reset_work_q)
10482                flush_workqueue(ioa_cfg->reset_work_q);
10483        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10484        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10485
10486        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10487        list_del(&ioa_cfg->queue);
10488        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10489
10490        if (ioa_cfg->sdt_state == ABORT_DUMP)
10491                ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10492        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10493
10494        ipr_free_all_resources(ioa_cfg);
10495
10496        LEAVE;
10497}
10498
10499/**
10500 * ipr_remove - IOA hot plug remove entry point
10501 * @pdev:       pci device struct
10502 *
10503 * Adapter hot plug remove entry point.
10504 *
10505 * Return value:
10506 *      none
10507 **/
10508static void ipr_remove(struct pci_dev *pdev)
10509{
10510        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10511
10512        ENTER;
10513
10514        ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10515                              &ipr_trace_attr);
10516        ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10517                             &ipr_dump_attr);
10518        sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10519                        &ipr_ioa_async_err_log);
10520        scsi_remove_host(ioa_cfg->host);
10521
10522        __ipr_remove(pdev);
10523
10524        LEAVE;
10525}
10526
10527/**
10528 * ipr_probe - Adapter hot plug add entry point
10529 *
10530 * Return value:
10531 *      0 on success / non-zero on failure
10532 **/
10533static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10534{
10535        struct ipr_ioa_cfg *ioa_cfg;
10536        unsigned long flags;
10537        int rc, i;
10538
10539        rc = ipr_probe_ioa(pdev, dev_id);
10540
10541        if (rc)
10542                return rc;
10543
10544        ioa_cfg = pci_get_drvdata(pdev);
10545        rc = ipr_probe_ioa_part2(ioa_cfg);
10546
10547        if (rc) {
10548                __ipr_remove(pdev);
10549                return rc;
10550        }
10551
10552        rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10553
10554        if (rc) {
10555                __ipr_remove(pdev);
10556                return rc;
10557        }
10558
10559        rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10560                                   &ipr_trace_attr);
10561
10562        if (rc) {
10563                scsi_remove_host(ioa_cfg->host);
10564                __ipr_remove(pdev);
10565                return rc;
10566        }
10567
10568        rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10569                        &ipr_ioa_async_err_log);
10570
10571        if (rc) {
10572                ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10573                                &ipr_dump_attr);
10574                ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10575                                &ipr_trace_attr);
10576                scsi_remove_host(ioa_cfg->host);
10577                __ipr_remove(pdev);
10578                return rc;
10579        }
10580
10581        rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10582                                   &ipr_dump_attr);
10583
10584        if (rc) {
10585                sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10586                                      &ipr_ioa_async_err_log);
10587                ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10588                                      &ipr_trace_attr);
10589                scsi_remove_host(ioa_cfg->host);
10590                __ipr_remove(pdev);
10591                return rc;
10592        }
10593        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10594        ioa_cfg->scan_enabled = 1;
10595        schedule_work(&ioa_cfg->work_q);
10596        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10597
10598        ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10599
10600        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10601                for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10602                        irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10603                                        ioa_cfg->iopoll_weight, ipr_iopoll);
10604                }
10605        }
10606
10607        scsi_scan_host(ioa_cfg->host);
10608
10609        return 0;
10610}
10611
10612/**
10613 * ipr_shutdown - Shutdown handler.
10614 * @pdev:       pci device struct
10615 *
10616 * This function is invoked upon system shutdown/reboot. It will issue
10617 * an adapter shutdown to the adapter to flush the write cache.
10618 *
10619 * Return value:
10620 *      none
10621 **/
10622static void ipr_shutdown(struct pci_dev *pdev)
10623{
10624        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10625        unsigned long lock_flags = 0;
10626        enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10627        int i;
10628
10629        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10630        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10631                ioa_cfg->iopoll_weight = 0;
10632                for (i = 1; i < ioa_cfg->hrrq_num; i++)
10633                        irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10634        }
10635
10636        while (ioa_cfg->in_reset_reload) {
10637                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10638                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10639                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10640        }
10641
10642        if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10643                shutdown_type = IPR_SHUTDOWN_QUIESCE;
10644
10645        ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10646        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10647        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10648        if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10649                ipr_free_irqs(ioa_cfg);
10650                pci_disable_device(ioa_cfg->pdev);
10651        }
10652}
10653
10654static struct pci_device_id ipr_pci_table[] = {
10655        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10656                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10657        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10658                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10659        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10660                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10661        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10662                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10663        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10664                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10665        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10666                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10667        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10668                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10669        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10670                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10671                IPR_USE_LONG_TRANSOP_TIMEOUT },
10672        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10673              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10674        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10675              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10676              IPR_USE_LONG_TRANSOP_TIMEOUT },
10677        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10678              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10679              IPR_USE_LONG_TRANSOP_TIMEOUT },
10680        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10681              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10682        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10683              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10684              IPR_USE_LONG_TRANSOP_TIMEOUT},
10685        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10686              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10687              IPR_USE_LONG_TRANSOP_TIMEOUT },
10688        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10689              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10690              IPR_USE_LONG_TRANSOP_TIMEOUT },
10691        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10692              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10693        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10694              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10695        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10696              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10697              IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10698        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10699                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10700        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10701                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10702        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10703                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10704                IPR_USE_LONG_TRANSOP_TIMEOUT },
10705        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10706                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10707                IPR_USE_LONG_TRANSOP_TIMEOUT },
10708        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10709                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10710        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10711                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10712        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10713                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10714        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10715                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10716        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10717                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10718        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10719                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10720        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10722        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10724        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10726        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10728        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10730        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10731                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10732        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10733                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10734        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10735                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10736        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10737                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10738        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10739                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10740        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10741                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10742        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10743                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10744        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10745                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10746        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10747                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10748        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10749                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10750        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10751                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10752        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10753                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10754        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10755                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10756        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10757                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10758        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10759                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10760        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10761                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10762        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10763                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10764        { }
10765};
10766MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10767
10768static const struct pci_error_handlers ipr_err_handler = {
10769        .error_detected = ipr_pci_error_detected,
10770        .mmio_enabled = ipr_pci_mmio_enabled,
10771        .slot_reset = ipr_pci_slot_reset,
10772};
10773
10774static struct pci_driver ipr_driver = {
10775        .name = IPR_NAME,
10776        .id_table = ipr_pci_table,
10777        .probe = ipr_probe,
10778        .remove = ipr_remove,
10779        .shutdown = ipr_shutdown,
10780        .err_handler = &ipr_err_handler,
10781};
10782
10783/**
10784 * ipr_halt_done - Shutdown prepare completion
10785 *
10786 * Return value:
10787 *      none
10788 **/
10789static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10790{
10791        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10792}
10793
10794/**
10795 * ipr_halt - Issue shutdown prepare to all adapters
10796 *
10797 * Return value:
10798 *      NOTIFY_OK on success / NOTIFY_DONE on failure
10799 **/
10800static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10801{
10802        struct ipr_cmnd *ipr_cmd;
10803        struct ipr_ioa_cfg *ioa_cfg;
10804        unsigned long flags = 0, driver_lock_flags;
10805
10806        if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10807                return NOTIFY_DONE;
10808
10809        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10810
10811        list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10812                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10813                if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10814                    (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10815                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10816                        continue;
10817                }
10818
10819                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10820                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10821                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10822                ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10823                ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10824
10825                ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10826                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10827        }
10828        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10829
10830        return NOTIFY_OK;
10831}
10832
10833static struct notifier_block ipr_notifier = {
10834        ipr_halt, NULL, 0
10835};
10836
10837/**
10838 * ipr_init - Module entry point
10839 *
10840 * Return value:
10841 *      0 on success / negative value on failure
10842 **/
10843static int __init ipr_init(void)
10844{
10845        ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10846                 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10847
10848        register_reboot_notifier(&ipr_notifier);
10849        return pci_register_driver(&ipr_driver);
10850}
10851
10852/**
10853 * ipr_exit - Module unload
10854 *
10855 * Module unload entry point.
10856 *
10857 * Return value:
10858 *      none
10859 **/
10860static void __exit ipr_exit(void)
10861{
10862        unregister_reboot_notifier(&ipr_notifier);
10863        pci_unregister_driver(&ipr_driver);
10864}
10865
10866module_init(ipr_init);
10867module_exit(ipr_exit);
10868