linux/drivers/scsi/ipr.c
<<
>>
Prefs
   1/*
   2 * ipr.c -- driver for IBM Power Linux RAID adapters
   3 *
   4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
   5 *
   6 * Copyright (C) 2003, 2004 IBM Corporation
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21 *
  22 */
  23
  24/*
  25 * Notes:
  26 *
  27 * This driver is used to control the following SCSI adapters:
  28 *
  29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
  30 *
  31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
  32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
  33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
  34 *              Embedded SCSI adapter on p615 and p655 systems
  35 *
  36 * Supported Hardware Features:
  37 *      - Ultra 320 SCSI controller
  38 *      - PCI-X host interface
  39 *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
  40 *      - Non-Volatile Write Cache
  41 *      - Supports attachment of non-RAID disks, tape, and optical devices
  42 *      - RAID Levels 0, 5, 10
  43 *      - Hot spare
  44 *      - Background Parity Checking
  45 *      - Background Data Scrubbing
  46 *      - Ability to increase the capacity of an existing RAID 5 disk array
  47 *              by adding disks
  48 *
  49 * Driver Features:
  50 *      - Tagged command queuing
  51 *      - Adapter microcode download
  52 *      - PCI hot plug
  53 *      - SCSI device hot plug
  54 *
  55 */
  56
  57#include <linux/fs.h>
  58#include <linux/init.h>
  59#include <linux/types.h>
  60#include <linux/errno.h>
  61#include <linux/kernel.h>
  62#include <linux/slab.h>
  63#include <linux/vmalloc.h>
  64#include <linux/ioport.h>
  65#include <linux/delay.h>
  66#include <linux/pci.h>
  67#include <linux/wait.h>
  68#include <linux/spinlock.h>
  69#include <linux/sched.h>
  70#include <linux/interrupt.h>
  71#include <linux/blkdev.h>
  72#include <linux/firmware.h>
  73#include <linux/module.h>
  74#include <linux/moduleparam.h>
  75#include <linux/libata.h>
  76#include <linux/hdreg.h>
  77#include <linux/reboot.h>
  78#include <linux/stringify.h>
  79#include <asm/io.h>
  80#include <asm/irq.h>
  81#include <asm/processor.h>
  82#include <scsi/scsi.h>
  83#include <scsi/scsi_host.h>
  84#include <scsi/scsi_tcq.h>
  85#include <scsi/scsi_eh.h>
  86#include <scsi/scsi_cmnd.h>
  87#include "ipr.h"
  88
  89/*
  90 *   Global Data
  91 */
  92static LIST_HEAD(ipr_ioa_head);
  93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
  94static unsigned int ipr_max_speed = 1;
  95static int ipr_testmode = 0;
  96static unsigned int ipr_fastfail = 0;
  97static unsigned int ipr_transop_timeout = 0;
  98static unsigned int ipr_debug = 0;
  99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
 100static unsigned int ipr_dual_ioa_raid = 1;
 101static unsigned int ipr_number_of_msix = 2;
 102static unsigned int ipr_fast_reboot;
 103static DEFINE_SPINLOCK(ipr_driver_lock);
 104
 105/* This table describes the differences between DMA controller chips */
 106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
 107        { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
 108                .mailbox = 0x0042C,
 109                .max_cmds = 100,
 110                .cache_line_size = 0x20,
 111                .clear_isr = 1,
 112                .iopoll_weight = 0,
 113                {
 114                        .set_interrupt_mask_reg = 0x0022C,
 115                        .clr_interrupt_mask_reg = 0x00230,
 116                        .clr_interrupt_mask_reg32 = 0x00230,
 117                        .sense_interrupt_mask_reg = 0x0022C,
 118                        .sense_interrupt_mask_reg32 = 0x0022C,
 119                        .clr_interrupt_reg = 0x00228,
 120                        .clr_interrupt_reg32 = 0x00228,
 121                        .sense_interrupt_reg = 0x00224,
 122                        .sense_interrupt_reg32 = 0x00224,
 123                        .ioarrin_reg = 0x00404,
 124                        .sense_uproc_interrupt_reg = 0x00214,
 125                        .sense_uproc_interrupt_reg32 = 0x00214,
 126                        .set_uproc_interrupt_reg = 0x00214,
 127                        .set_uproc_interrupt_reg32 = 0x00214,
 128                        .clr_uproc_interrupt_reg = 0x00218,
 129                        .clr_uproc_interrupt_reg32 = 0x00218
 130                }
 131        },
 132        { /* Snipe and Scamp */
 133                .mailbox = 0x0052C,
 134                .max_cmds = 100,
 135                .cache_line_size = 0x20,
 136                .clear_isr = 1,
 137                .iopoll_weight = 0,
 138                {
 139                        .set_interrupt_mask_reg = 0x00288,
 140                        .clr_interrupt_mask_reg = 0x0028C,
 141                        .clr_interrupt_mask_reg32 = 0x0028C,
 142                        .sense_interrupt_mask_reg = 0x00288,
 143                        .sense_interrupt_mask_reg32 = 0x00288,
 144                        .clr_interrupt_reg = 0x00284,
 145                        .clr_interrupt_reg32 = 0x00284,
 146                        .sense_interrupt_reg = 0x00280,
 147                        .sense_interrupt_reg32 = 0x00280,
 148                        .ioarrin_reg = 0x00504,
 149                        .sense_uproc_interrupt_reg = 0x00290,
 150                        .sense_uproc_interrupt_reg32 = 0x00290,
 151                        .set_uproc_interrupt_reg = 0x00290,
 152                        .set_uproc_interrupt_reg32 = 0x00290,
 153                        .clr_uproc_interrupt_reg = 0x00294,
 154                        .clr_uproc_interrupt_reg32 = 0x00294
 155                }
 156        },
 157        { /* CRoC */
 158                .mailbox = 0x00044,
 159                .max_cmds = 1000,
 160                .cache_line_size = 0x20,
 161                .clear_isr = 0,
 162                .iopoll_weight = 64,
 163                {
 164                        .set_interrupt_mask_reg = 0x00010,
 165                        .clr_interrupt_mask_reg = 0x00018,
 166                        .clr_interrupt_mask_reg32 = 0x0001C,
 167                        .sense_interrupt_mask_reg = 0x00010,
 168                        .sense_interrupt_mask_reg32 = 0x00014,
 169                        .clr_interrupt_reg = 0x00008,
 170                        .clr_interrupt_reg32 = 0x0000C,
 171                        .sense_interrupt_reg = 0x00000,
 172                        .sense_interrupt_reg32 = 0x00004,
 173                        .ioarrin_reg = 0x00070,
 174                        .sense_uproc_interrupt_reg = 0x00020,
 175                        .sense_uproc_interrupt_reg32 = 0x00024,
 176                        .set_uproc_interrupt_reg = 0x00020,
 177                        .set_uproc_interrupt_reg32 = 0x00024,
 178                        .clr_uproc_interrupt_reg = 0x00028,
 179                        .clr_uproc_interrupt_reg32 = 0x0002C,
 180                        .init_feedback_reg = 0x0005C,
 181                        .dump_addr_reg = 0x00064,
 182                        .dump_data_reg = 0x00068,
 183                        .endian_swap_reg = 0x00084
 184                }
 185        },
 186};
 187
 188static const struct ipr_chip_t ipr_chip[] = {
 189        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 190        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 191        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 192        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 193        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
 194        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 195        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
 196        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
 197        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
 198};
 199
 200static int ipr_max_bus_speeds[] = {
 201        IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
 202};
 203
 204MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
 205MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
 206module_param_named(max_speed, ipr_max_speed, uint, 0);
 207MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
 208module_param_named(log_level, ipr_log_level, uint, 0);
 209MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
 210module_param_named(testmode, ipr_testmode, int, 0);
 211MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
 212module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
 213MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
 214module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
 215MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
 216module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
 217MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
 218module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
 219MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
 220module_param_named(max_devs, ipr_max_devs, int, 0);
 221MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
 222                 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
 223module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
 224MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
 225module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
 226MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
 227MODULE_LICENSE("GPL");
 228MODULE_VERSION(IPR_DRIVER_VERSION);
 229
 230/*  A constant array of IOASCs/URCs/Error Messages */
 231static const
 232struct ipr_error_table_t ipr_error_table[] = {
 233        {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
 234        "8155: An unknown error was received"},
 235        {0x00330000, 0, 0,
 236        "Soft underlength error"},
 237        {0x005A0000, 0, 0,
 238        "Command to be cancelled not found"},
 239        {0x00808000, 0, 0,
 240        "Qualified success"},
 241        {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
 242        "FFFE: Soft device bus error recovered by the IOA"},
 243        {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
 244        "4101: Soft device bus fabric error"},
 245        {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
 246        "FFFC: Logical block guard error recovered by the device"},
 247        {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
 248        "FFFC: Logical block reference tag error recovered by the device"},
 249        {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
 250        "4171: Recovered scatter list tag / sequence number error"},
 251        {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
 252        "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
 253        {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
 254        "4171: Recovered logical block sequence number error on IOA to Host transfer"},
 255        {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
 256        "FFFD: Recovered logical block reference tag error detected by the IOA"},
 257        {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
 258        "FFFD: Logical block guard error recovered by the IOA"},
 259        {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
 260        "FFF9: Device sector reassign successful"},
 261        {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
 262        "FFF7: Media error recovered by device rewrite procedures"},
 263        {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
 264        "7001: IOA sector reassignment successful"},
 265        {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
 266        "FFF9: Soft media error. Sector reassignment recommended"},
 267        {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
 268        "FFF7: Media error recovered by IOA rewrite procedures"},
 269        {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
 270        "FF3D: Soft PCI bus error recovered by the IOA"},
 271        {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
 272        "FFF6: Device hardware error recovered by the IOA"},
 273        {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
 274        "FFF6: Device hardware error recovered by the device"},
 275        {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
 276        "FF3D: Soft IOA error recovered by the IOA"},
 277        {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
 278        "FFFA: Undefined device response recovered by the IOA"},
 279        {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
 280        "FFF6: Device bus error, message or command phase"},
 281        {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
 282        "FFFE: Task Management Function failed"},
 283        {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
 284        "FFF6: Failure prediction threshold exceeded"},
 285        {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
 286        "8009: Impending cache battery pack failure"},
 287        {0x02040100, 0, 0,
 288        "Logical Unit in process of becoming ready"},
 289        {0x02040200, 0, 0,
 290        "Initializing command required"},
 291        {0x02040400, 0, 0,
 292        "34FF: Disk device format in progress"},
 293        {0x02040C00, 0, 0,
 294        "Logical unit not accessible, target port in unavailable state"},
 295        {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
 296        "9070: IOA requested reset"},
 297        {0x023F0000, 0, 0,
 298        "Synchronization required"},
 299        {0x02408500, 0, 0,
 300        "IOA microcode download required"},
 301        {0x02408600, 0, 0,
 302        "Device bus connection is prohibited by host"},
 303        {0x024E0000, 0, 0,
 304        "No ready, IOA shutdown"},
 305        {0x025A0000, 0, 0,
 306        "Not ready, IOA has been shutdown"},
 307        {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
 308        "3020: Storage subsystem configuration error"},
 309        {0x03110B00, 0, 0,
 310        "FFF5: Medium error, data unreadable, recommend reassign"},
 311        {0x03110C00, 0, 0,
 312        "7000: Medium error, data unreadable, do not reassign"},
 313        {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
 314        "FFF3: Disk media format bad"},
 315        {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
 316        "3002: Addressed device failed to respond to selection"},
 317        {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
 318        "3100: Device bus error"},
 319        {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
 320        "3109: IOA timed out a device command"},
 321        {0x04088000, 0, 0,
 322        "3120: SCSI bus is not operational"},
 323        {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
 324        "4100: Hard device bus fabric error"},
 325        {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
 326        "310C: Logical block guard error detected by the device"},
 327        {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
 328        "310C: Logical block reference tag error detected by the device"},
 329        {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
 330        "4170: Scatter list tag / sequence number error"},
 331        {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
 332        "8150: Logical block CRC error on IOA to Host transfer"},
 333        {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
 334        "4170: Logical block sequence number error on IOA to Host transfer"},
 335        {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
 336        "310D: Logical block reference tag error detected by the IOA"},
 337        {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
 338        "310D: Logical block guard error detected by the IOA"},
 339        {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
 340        "9000: IOA reserved area data check"},
 341        {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
 342        "9001: IOA reserved area invalid data pattern"},
 343        {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
 344        "9002: IOA reserved area LRC error"},
 345        {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
 346        "Hardware Error, IOA metadata access error"},
 347        {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
 348        "102E: Out of alternate sectors for disk storage"},
 349        {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
 350        "FFF4: Data transfer underlength error"},
 351        {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
 352        "FFF4: Data transfer overlength error"},
 353        {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
 354        "3400: Logical unit failure"},
 355        {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
 356        "FFF4: Device microcode is corrupt"},
 357        {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
 358        "8150: PCI bus error"},
 359        {0x04430000, 1, 0,
 360        "Unsupported device bus message received"},
 361        {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
 362        "FFF4: Disk device problem"},
 363        {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
 364        "8150: Permanent IOA failure"},
 365        {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
 366        "3010: Disk device returned wrong response to IOA"},
 367        {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
 368        "8151: IOA microcode error"},
 369        {0x04448500, 0, 0,
 370        "Device bus status error"},
 371        {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
 372        "8157: IOA error requiring IOA reset to recover"},
 373        {0x04448700, 0, 0,
 374        "ATA device status error"},
 375        {0x04490000, 0, 0,
 376        "Message reject received from the device"},
 377        {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
 378        "8008: A permanent cache battery pack failure occurred"},
 379        {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
 380        "9090: Disk unit has been modified after the last known status"},
 381        {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
 382        "9081: IOA detected device error"},
 383        {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
 384        "9082: IOA detected device error"},
 385        {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
 386        "3110: Device bus error, message or command phase"},
 387        {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
 388        "3110: SAS Command / Task Management Function failed"},
 389        {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
 390        "9091: Incorrect hardware configuration change has been detected"},
 391        {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
 392        "9073: Invalid multi-adapter configuration"},
 393        {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
 394        "4010: Incorrect connection between cascaded expanders"},
 395        {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
 396        "4020: Connections exceed IOA design limits"},
 397        {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
 398        "4030: Incorrect multipath connection"},
 399        {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
 400        "4110: Unsupported enclosure function"},
 401        {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
 402        "4120: SAS cable VPD cannot be read"},
 403        {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
 404        "FFF4: Command to logical unit failed"},
 405        {0x05240000, 1, 0,
 406        "Illegal request, invalid request type or request packet"},
 407        {0x05250000, 0, 0,
 408        "Illegal request, invalid resource handle"},
 409        {0x05258000, 0, 0,
 410        "Illegal request, commands not allowed to this device"},
 411        {0x05258100, 0, 0,
 412        "Illegal request, command not allowed to a secondary adapter"},
 413        {0x05258200, 0, 0,
 414        "Illegal request, command not allowed to a non-optimized resource"},
 415        {0x05260000, 0, 0,
 416        "Illegal request, invalid field in parameter list"},
 417        {0x05260100, 0, 0,
 418        "Illegal request, parameter not supported"},
 419        {0x05260200, 0, 0,
 420        "Illegal request, parameter value invalid"},
 421        {0x052C0000, 0, 0,
 422        "Illegal request, command sequence error"},
 423        {0x052C8000, 1, 0,
 424        "Illegal request, dual adapter support not enabled"},
 425        {0x052C8100, 1, 0,
 426        "Illegal request, another cable connector was physically disabled"},
 427        {0x054E8000, 1, 0,
 428        "Illegal request, inconsistent group id/group count"},
 429        {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
 430        "9031: Array protection temporarily suspended, protection resuming"},
 431        {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
 432        "9040: Array protection temporarily suspended, protection resuming"},
 433        {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
 434        "4080: IOA exceeded maximum operating temperature"},
 435        {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
 436        "4085: Service required"},
 437        {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
 438        "3140: Device bus not ready to ready transition"},
 439        {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
 440        "FFFB: SCSI bus was reset"},
 441        {0x06290500, 0, 0,
 442        "FFFE: SCSI bus transition to single ended"},
 443        {0x06290600, 0, 0,
 444        "FFFE: SCSI bus transition to LVD"},
 445        {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
 446        "FFFB: SCSI bus was reset by another initiator"},
 447        {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
 448        "3029: A device replacement has occurred"},
 449        {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
 450        "4102: Device bus fabric performance degradation"},
 451        {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
 452        "9051: IOA cache data exists for a missing or failed device"},
 453        {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
 454        "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
 455        {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
 456        "9025: Disk unit is not supported at its physical location"},
 457        {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
 458        "3020: IOA detected a SCSI bus configuration error"},
 459        {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
 460        "3150: SCSI bus configuration error"},
 461        {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
 462        "9074: Asymmetric advanced function disk configuration"},
 463        {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
 464        "4040: Incomplete multipath connection between IOA and enclosure"},
 465        {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
 466        "4041: Incomplete multipath connection between enclosure and device"},
 467        {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
 468        "9075: Incomplete multipath connection between IOA and remote IOA"},
 469        {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
 470        "9076: Configuration error, missing remote IOA"},
 471        {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
 472        "4050: Enclosure does not support a required multipath function"},
 473        {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
 474        "4121: Configuration error, required cable is missing"},
 475        {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
 476        "4122: Cable is not plugged into the correct location on remote IOA"},
 477        {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
 478        "4123: Configuration error, invalid cable vital product data"},
 479        {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
 480        "4124: Configuration error, both cable ends are plugged into the same IOA"},
 481        {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
 482        "4070: Logically bad block written on device"},
 483        {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
 484        "9041: Array protection temporarily suspended"},
 485        {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
 486        "9042: Corrupt array parity detected on specified device"},
 487        {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
 488        "9030: Array no longer protected due to missing or failed disk unit"},
 489        {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
 490        "9071: Link operational transition"},
 491        {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
 492        "9072: Link not operational transition"},
 493        {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
 494        "9032: Array exposed but still protected"},
 495        {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
 496        "70DD: Device forced failed by disrupt device command"},
 497        {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
 498        "4061: Multipath redundancy level got better"},
 499        {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
 500        "4060: Multipath redundancy level got worse"},
 501        {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
 502        "9083: Device raw mode enabled"},
 503        {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
 504        "9084: Device raw mode disabled"},
 505        {0x07270000, 0, 0,
 506        "Failure due to other device"},
 507        {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
 508        "9008: IOA does not support functions expected by devices"},
 509        {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
 510        "9010: Cache data associated with attached devices cannot be found"},
 511        {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
 512        "9011: Cache data belongs to devices other than those attached"},
 513        {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
 514        "9020: Array missing 2 or more devices with only 1 device present"},
 515        {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
 516        "9021: Array missing 2 or more devices with 2 or more devices present"},
 517        {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
 518        "9022: Exposed array is missing a required device"},
 519        {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
 520        "9023: Array member(s) not at required physical locations"},
 521        {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
 522        "9024: Array not functional due to present hardware configuration"},
 523        {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
 524        "9026: Array not functional due to present hardware configuration"},
 525        {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
 526        "9027: Array is missing a device and parity is out of sync"},
 527        {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
 528        "9028: Maximum number of arrays already exist"},
 529        {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
 530        "9050: Required cache data cannot be located for a disk unit"},
 531        {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
 532        "9052: Cache data exists for a device that has been modified"},
 533        {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
 534        "9054: IOA resources not available due to previous problems"},
 535        {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
 536        "9092: Disk unit requires initialization before use"},
 537        {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
 538        "9029: Incorrect hardware configuration change has been detected"},
 539        {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
 540        "9060: One or more disk pairs are missing from an array"},
 541        {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
 542        "9061: One or more disks are missing from an array"},
 543        {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
 544        "9062: One or more disks are missing from an array"},
 545        {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
 546        "9063: Maximum number of functional arrays has been exceeded"},
 547        {0x07279A00, 0, 0,
 548        "Data protect, other volume set problem"},
 549        {0x0B260000, 0, 0,
 550        "Aborted command, invalid descriptor"},
 551        {0x0B3F9000, 0, 0,
 552        "Target operating conditions have changed, dual adapter takeover"},
 553        {0x0B530200, 0, 0,
 554        "Aborted command, medium removal prevented"},
 555        {0x0B5A0000, 0, 0,
 556        "Command terminated by host"},
 557        {0x0B5B8000, 0, 0,
 558        "Aborted command, command terminated by host"}
 559};
 560
 561static const struct ipr_ses_table_entry ipr_ses_table[] = {
 562        { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
 563        { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
 564        { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
 565        { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
 566        { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
 567        { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
 568        { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
 569        { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
 570        { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
 571        { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
 572        { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
 573        { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
 574        { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
 575};
 576
 577/*
 578 *  Function Prototypes
 579 */
 580static int ipr_reset_alert(struct ipr_cmnd *);
 581static void ipr_process_ccn(struct ipr_cmnd *);
 582static void ipr_process_error(struct ipr_cmnd *);
 583static void ipr_reset_ioa_job(struct ipr_cmnd *);
 584static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
 585                                   enum ipr_shutdown_type);
 586
 587#ifdef CONFIG_SCSI_IPR_TRACE
 588/**
 589 * ipr_trc_hook - Add a trace entry to the driver trace
 590 * @ipr_cmd:    ipr command struct
 591 * @type:               trace type
 592 * @add_data:   additional data
 593 *
 594 * Return value:
 595 *      none
 596 **/
 597static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
 598                         u8 type, u32 add_data)
 599{
 600        struct ipr_trace_entry *trace_entry;
 601        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 602        unsigned int trace_index;
 603
 604        trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
 605        trace_entry = &ioa_cfg->trace[trace_index];
 606        trace_entry->time = jiffies;
 607        trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
 608        trace_entry->type = type;
 609        if (ipr_cmd->ioa_cfg->sis64)
 610                trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
 611        else
 612                trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
 613        trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
 614        trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
 615        trace_entry->u.add_data = add_data;
 616        wmb();
 617}
 618#else
 619#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
 620#endif
 621
 622/**
 623 * ipr_lock_and_done - Acquire lock and complete command
 624 * @ipr_cmd:    ipr command struct
 625 *
 626 * Return value:
 627 *      none
 628 **/
 629static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
 630{
 631        unsigned long lock_flags;
 632        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 633
 634        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 635        ipr_cmd->done(ipr_cmd);
 636        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 637}
 638
 639/**
 640 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
 641 * @ipr_cmd:    ipr command struct
 642 *
 643 * Return value:
 644 *      none
 645 **/
 646static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
 647{
 648        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 649        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
 650        struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
 651        dma_addr_t dma_addr = ipr_cmd->dma_addr;
 652        int hrrq_id;
 653
 654        hrrq_id = ioarcb->cmd_pkt.hrrq_id;
 655        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
 656        ioarcb->cmd_pkt.hrrq_id = hrrq_id;
 657        ioarcb->data_transfer_length = 0;
 658        ioarcb->read_data_transfer_length = 0;
 659        ioarcb->ioadl_len = 0;
 660        ioarcb->read_ioadl_len = 0;
 661
 662        if (ipr_cmd->ioa_cfg->sis64) {
 663                ioarcb->u.sis64_addr_data.data_ioadl_addr =
 664                        cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
 665                ioasa64->u.gata.status = 0;
 666        } else {
 667                ioarcb->write_ioadl_addr =
 668                        cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
 669                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
 670                ioasa->u.gata.status = 0;
 671        }
 672
 673        ioasa->hdr.ioasc = 0;
 674        ioasa->hdr.residual_data_len = 0;
 675        ipr_cmd->scsi_cmd = NULL;
 676        ipr_cmd->qc = NULL;
 677        ipr_cmd->sense_buffer[0] = 0;
 678        ipr_cmd->dma_use_sg = 0;
 679}
 680
 681/**
 682 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
 683 * @ipr_cmd:    ipr command struct
 684 *
 685 * Return value:
 686 *      none
 687 **/
 688static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
 689                              void (*fast_done) (struct ipr_cmnd *))
 690{
 691        ipr_reinit_ipr_cmnd(ipr_cmd);
 692        ipr_cmd->u.scratch = 0;
 693        ipr_cmd->sibling = NULL;
 694        ipr_cmd->eh_comp = NULL;
 695        ipr_cmd->fast_done = fast_done;
 696        init_timer(&ipr_cmd->timer);
 697}
 698
 699/**
 700 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
 701 * @ioa_cfg:    ioa config struct
 702 *
 703 * Return value:
 704 *      pointer to ipr command struct
 705 **/
 706static
 707struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
 708{
 709        struct ipr_cmnd *ipr_cmd = NULL;
 710
 711        if (likely(!list_empty(&hrrq->hrrq_free_q))) {
 712                ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
 713                        struct ipr_cmnd, queue);
 714                list_del(&ipr_cmd->queue);
 715        }
 716
 717
 718        return ipr_cmd;
 719}
 720
 721/**
 722 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
 723 * @ioa_cfg:    ioa config struct
 724 *
 725 * Return value:
 726 *      pointer to ipr command struct
 727 **/
 728static
 729struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 730{
 731        struct ipr_cmnd *ipr_cmd =
 732                __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
 733        ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
 734        return ipr_cmd;
 735}
 736
 737/**
 738 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
 739 * @ioa_cfg:    ioa config struct
 740 * @clr_ints:     interrupts to clear
 741 *
 742 * This function masks all interrupts on the adapter, then clears the
 743 * interrupts specified in the mask
 744 *
 745 * Return value:
 746 *      none
 747 **/
 748static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
 749                                          u32 clr_ints)
 750{
 751        volatile u32 int_reg;
 752        int i;
 753
 754        /* Stop new interrupts */
 755        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
 756                spin_lock(&ioa_cfg->hrrq[i]._lock);
 757                ioa_cfg->hrrq[i].allow_interrupts = 0;
 758                spin_unlock(&ioa_cfg->hrrq[i]._lock);
 759        }
 760        wmb();
 761
 762        /* Set interrupt mask to stop all new interrupts */
 763        if (ioa_cfg->sis64)
 764                writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 765        else
 766                writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 767
 768        /* Clear any pending interrupts */
 769        if (ioa_cfg->sis64)
 770                writel(~0, ioa_cfg->regs.clr_interrupt_reg);
 771        writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
 772        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 773}
 774
 775/**
 776 * ipr_save_pcix_cmd_reg - Save PCI-X command register
 777 * @ioa_cfg:    ioa config struct
 778 *
 779 * Return value:
 780 *      0 on success / -EIO on failure
 781 **/
 782static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
 783{
 784        int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
 785
 786        if (pcix_cmd_reg == 0)
 787                return 0;
 788
 789        if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
 790                                 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
 791                dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
 792                return -EIO;
 793        }
 794
 795        ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
 796        return 0;
 797}
 798
 799/**
 800 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
 801 * @ioa_cfg:    ioa config struct
 802 *
 803 * Return value:
 804 *      0 on success / -EIO on failure
 805 **/
 806static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
 807{
 808        int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
 809
 810        if (pcix_cmd_reg) {
 811                if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
 812                                          ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
 813                        dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
 814                        return -EIO;
 815                }
 816        }
 817
 818        return 0;
 819}
 820
 821/**
 822 * ipr_sata_eh_done - done function for aborted SATA commands
 823 * @ipr_cmd:    ipr command struct
 824 *
 825 * This function is invoked for ops generated to SATA
 826 * devices which are being aborted.
 827 *
 828 * Return value:
 829 *      none
 830 **/
 831static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
 832{
 833        struct ata_queued_cmd *qc = ipr_cmd->qc;
 834        struct ipr_sata_port *sata_port = qc->ap->private_data;
 835
 836        qc->err_mask |= AC_ERR_OTHER;
 837        sata_port->ioasa.status |= ATA_BUSY;
 838        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 839        ata_qc_complete(qc);
 840}
 841
 842/**
 843 * ipr_scsi_eh_done - mid-layer done function for aborted ops
 844 * @ipr_cmd:    ipr command struct
 845 *
 846 * This function is invoked by the interrupt handler for
 847 * ops generated by the SCSI mid-layer which are being aborted.
 848 *
 849 * Return value:
 850 *      none
 851 **/
 852static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 853{
 854        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 855
 856        scsi_cmd->result |= (DID_ERROR << 16);
 857
 858        scsi_dma_unmap(ipr_cmd->scsi_cmd);
 859        scsi_cmd->scsi_done(scsi_cmd);
 860        if (ipr_cmd->eh_comp)
 861                complete(ipr_cmd->eh_comp);
 862        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 863}
 864
 865/**
 866 * ipr_fail_all_ops - Fails all outstanding ops.
 867 * @ioa_cfg:    ioa config struct
 868 *
 869 * This function fails all outstanding ops.
 870 *
 871 * Return value:
 872 *      none
 873 **/
 874static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
 875{
 876        struct ipr_cmnd *ipr_cmd, *temp;
 877        struct ipr_hrr_queue *hrrq;
 878
 879        ENTER;
 880        for_each_hrrq(hrrq, ioa_cfg) {
 881                spin_lock(&hrrq->_lock);
 882                list_for_each_entry_safe(ipr_cmd,
 883                                        temp, &hrrq->hrrq_pending_q, queue) {
 884                        list_del(&ipr_cmd->queue);
 885
 886                        ipr_cmd->s.ioasa.hdr.ioasc =
 887                                cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
 888                        ipr_cmd->s.ioasa.hdr.ilid =
 889                                cpu_to_be32(IPR_DRIVER_ILID);
 890
 891                        if (ipr_cmd->scsi_cmd)
 892                                ipr_cmd->done = ipr_scsi_eh_done;
 893                        else if (ipr_cmd->qc)
 894                                ipr_cmd->done = ipr_sata_eh_done;
 895
 896                        ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
 897                                     IPR_IOASC_IOA_WAS_RESET);
 898                        del_timer(&ipr_cmd->timer);
 899                        ipr_cmd->done(ipr_cmd);
 900                }
 901                spin_unlock(&hrrq->_lock);
 902        }
 903        LEAVE;
 904}
 905
 906/**
 907 * ipr_send_command -  Send driver initiated requests.
 908 * @ipr_cmd:            ipr command struct
 909 *
 910 * This function sends a command to the adapter using the correct write call.
 911 * In the case of sis64, calculate the ioarcb size required. Then or in the
 912 * appropriate bits.
 913 *
 914 * Return value:
 915 *      none
 916 **/
 917static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
 918{
 919        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 920        dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
 921
 922        if (ioa_cfg->sis64) {
 923                /* The default size is 256 bytes */
 924                send_dma_addr |= 0x1;
 925
 926                /* If the number of ioadls * size of ioadl > 128 bytes,
 927                   then use a 512 byte ioarcb */
 928                if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
 929                        send_dma_addr |= 0x4;
 930                writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
 931        } else
 932                writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
 933}
 934
 935/**
 936 * ipr_do_req -  Send driver initiated requests.
 937 * @ipr_cmd:            ipr command struct
 938 * @done:                       done function
 939 * @timeout_func:       timeout function
 940 * @timeout:            timeout value
 941 *
 942 * This function sends the specified command to the adapter with the
 943 * timeout given. The done function is invoked on command completion.
 944 *
 945 * Return value:
 946 *      none
 947 **/
 948static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
 949                       void (*done) (struct ipr_cmnd *),
 950                       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
 951{
 952        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
 953
 954        ipr_cmd->done = done;
 955
 956        ipr_cmd->timer.data = (unsigned long) ipr_cmd;
 957        ipr_cmd->timer.expires = jiffies + timeout;
 958        ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
 959
 960        add_timer(&ipr_cmd->timer);
 961
 962        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
 963
 964        ipr_send_command(ipr_cmd);
 965}
 966
 967/**
 968 * ipr_internal_cmd_done - Op done function for an internally generated op.
 969 * @ipr_cmd:    ipr command struct
 970 *
 971 * This function is the op done function for an internally generated,
 972 * blocking op. It simply wakes the sleeping thread.
 973 *
 974 * Return value:
 975 *      none
 976 **/
 977static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
 978{
 979        if (ipr_cmd->sibling)
 980                ipr_cmd->sibling = NULL;
 981        else
 982                complete(&ipr_cmd->completion);
 983}
 984
 985/**
 986 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
 987 * @ipr_cmd:    ipr command struct
 988 * @dma_addr:   dma address
 989 * @len:        transfer length
 990 * @flags:      ioadl flag value
 991 *
 992 * This function initializes an ioadl in the case where there is only a single
 993 * descriptor.
 994 *
 995 * Return value:
 996 *      nothing
 997 **/
 998static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
 999                           u32 len, int flags)
1000{
1001        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1002        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1003
1004        ipr_cmd->dma_use_sg = 1;
1005
1006        if (ipr_cmd->ioa_cfg->sis64) {
1007                ioadl64->flags = cpu_to_be32(flags);
1008                ioadl64->data_len = cpu_to_be32(len);
1009                ioadl64->address = cpu_to_be64(dma_addr);
1010
1011                ipr_cmd->ioarcb.ioadl_len =
1012                        cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1013                ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1014        } else {
1015                ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1016                ioadl->address = cpu_to_be32(dma_addr);
1017
1018                if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1019                        ipr_cmd->ioarcb.read_ioadl_len =
1020                                cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1021                        ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1022                } else {
1023                        ipr_cmd->ioarcb.ioadl_len =
1024                                cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1025                        ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1026                }
1027        }
1028}
1029
1030/**
1031 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1032 * @ipr_cmd:    ipr command struct
1033 * @timeout_func:       function to invoke if command times out
1034 * @timeout:    timeout
1035 *
1036 * Return value:
1037 *      none
1038 **/
1039static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1040                                  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1041                                  u32 timeout)
1042{
1043        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1044
1045        init_completion(&ipr_cmd->completion);
1046        ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1047
1048        spin_unlock_irq(ioa_cfg->host->host_lock);
1049        wait_for_completion(&ipr_cmd->completion);
1050        spin_lock_irq(ioa_cfg->host->host_lock);
1051}
1052
1053static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1054{
1055        unsigned int hrrq;
1056
1057        if (ioa_cfg->hrrq_num == 1)
1058                hrrq = 0;
1059        else {
1060                hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1061                hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1062        }
1063        return hrrq;
1064}
1065
1066/**
1067 * ipr_send_hcam - Send an HCAM to the adapter.
1068 * @ioa_cfg:    ioa config struct
1069 * @type:               HCAM type
1070 * @hostrcb:    hostrcb struct
1071 *
1072 * This function will send a Host Controlled Async command to the adapter.
1073 * If HCAMs are currently not allowed to be issued to the adapter, it will
1074 * place the hostrcb on the free queue.
1075 *
1076 * Return value:
1077 *      none
1078 **/
1079static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1080                          struct ipr_hostrcb *hostrcb)
1081{
1082        struct ipr_cmnd *ipr_cmd;
1083        struct ipr_ioarcb *ioarcb;
1084
1085        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1086                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1087                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1088                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1089
1090                ipr_cmd->u.hostrcb = hostrcb;
1091                ioarcb = &ipr_cmd->ioarcb;
1092
1093                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1094                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1095                ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1096                ioarcb->cmd_pkt.cdb[1] = type;
1097                ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1098                ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1099
1100                ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1101                               sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1102
1103                if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1104                        ipr_cmd->done = ipr_process_ccn;
1105                else
1106                        ipr_cmd->done = ipr_process_error;
1107
1108                ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1109
1110                ipr_send_command(ipr_cmd);
1111        } else {
1112                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1113        }
1114}
1115
1116/**
1117 * ipr_update_ata_class - Update the ata class in the resource entry
1118 * @res:        resource entry struct
1119 * @proto:      cfgte device bus protocol value
1120 *
1121 * Return value:
1122 *      none
1123 **/
1124static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1125{
1126        switch (proto) {
1127        case IPR_PROTO_SATA:
1128        case IPR_PROTO_SAS_STP:
1129                res->ata_class = ATA_DEV_ATA;
1130                break;
1131        case IPR_PROTO_SATA_ATAPI:
1132        case IPR_PROTO_SAS_STP_ATAPI:
1133                res->ata_class = ATA_DEV_ATAPI;
1134                break;
1135        default:
1136                res->ata_class = ATA_DEV_UNKNOWN;
1137                break;
1138        };
1139}
1140
1141/**
1142 * ipr_init_res_entry - Initialize a resource entry struct.
1143 * @res:        resource entry struct
1144 * @cfgtew:     config table entry wrapper struct
1145 *
1146 * Return value:
1147 *      none
1148 **/
1149static void ipr_init_res_entry(struct ipr_resource_entry *res,
1150                               struct ipr_config_table_entry_wrapper *cfgtew)
1151{
1152        int found = 0;
1153        unsigned int proto;
1154        struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1155        struct ipr_resource_entry *gscsi_res = NULL;
1156
1157        res->needs_sync_complete = 0;
1158        res->in_erp = 0;
1159        res->add_to_ml = 0;
1160        res->del_from_ml = 0;
1161        res->resetting_device = 0;
1162        res->reset_occurred = 0;
1163        res->sdev = NULL;
1164        res->sata_port = NULL;
1165
1166        if (ioa_cfg->sis64) {
1167                proto = cfgtew->u.cfgte64->proto;
1168                res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1169                res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1170                res->qmodel = IPR_QUEUEING_MODEL64(res);
1171                res->type = cfgtew->u.cfgte64->res_type;
1172
1173                memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1174                        sizeof(res->res_path));
1175
1176                res->bus = 0;
1177                memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1178                        sizeof(res->dev_lun.scsi_lun));
1179                res->lun = scsilun_to_int(&res->dev_lun);
1180
1181                if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1182                        list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1183                                if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1184                                        found = 1;
1185                                        res->target = gscsi_res->target;
1186                                        break;
1187                                }
1188                        }
1189                        if (!found) {
1190                                res->target = find_first_zero_bit(ioa_cfg->target_ids,
1191                                                                  ioa_cfg->max_devs_supported);
1192                                set_bit(res->target, ioa_cfg->target_ids);
1193                        }
1194                } else if (res->type == IPR_RES_TYPE_IOAFP) {
1195                        res->bus = IPR_IOAFP_VIRTUAL_BUS;
1196                        res->target = 0;
1197                } else if (res->type == IPR_RES_TYPE_ARRAY) {
1198                        res->bus = IPR_ARRAY_VIRTUAL_BUS;
1199                        res->target = find_first_zero_bit(ioa_cfg->array_ids,
1200                                                          ioa_cfg->max_devs_supported);
1201                        set_bit(res->target, ioa_cfg->array_ids);
1202                } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1203                        res->bus = IPR_VSET_VIRTUAL_BUS;
1204                        res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1205                                                          ioa_cfg->max_devs_supported);
1206                        set_bit(res->target, ioa_cfg->vset_ids);
1207                } else {
1208                        res->target = find_first_zero_bit(ioa_cfg->target_ids,
1209                                                          ioa_cfg->max_devs_supported);
1210                        set_bit(res->target, ioa_cfg->target_ids);
1211                }
1212        } else {
1213                proto = cfgtew->u.cfgte->proto;
1214                res->qmodel = IPR_QUEUEING_MODEL(res);
1215                res->flags = cfgtew->u.cfgte->flags;
1216                if (res->flags & IPR_IS_IOA_RESOURCE)
1217                        res->type = IPR_RES_TYPE_IOAFP;
1218                else
1219                        res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1220
1221                res->bus = cfgtew->u.cfgte->res_addr.bus;
1222                res->target = cfgtew->u.cfgte->res_addr.target;
1223                res->lun = cfgtew->u.cfgte->res_addr.lun;
1224                res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1225        }
1226
1227        ipr_update_ata_class(res, proto);
1228}
1229
1230/**
1231 * ipr_is_same_device - Determine if two devices are the same.
1232 * @res:        resource entry struct
1233 * @cfgtew:     config table entry wrapper struct
1234 *
1235 * Return value:
1236 *      1 if the devices are the same / 0 otherwise
1237 **/
1238static int ipr_is_same_device(struct ipr_resource_entry *res,
1239                              struct ipr_config_table_entry_wrapper *cfgtew)
1240{
1241        if (res->ioa_cfg->sis64) {
1242                if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1243                                        sizeof(cfgtew->u.cfgte64->dev_id)) &&
1244                        !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1245                                        sizeof(cfgtew->u.cfgte64->lun))) {
1246                        return 1;
1247                }
1248        } else {
1249                if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1250                    res->target == cfgtew->u.cfgte->res_addr.target &&
1251                    res->lun == cfgtew->u.cfgte->res_addr.lun)
1252                        return 1;
1253        }
1254
1255        return 0;
1256}
1257
1258/**
1259 * __ipr_format_res_path - Format the resource path for printing.
1260 * @res_path:   resource path
1261 * @buf:        buffer
1262 * @len:        length of buffer provided
1263 *
1264 * Return value:
1265 *      pointer to buffer
1266 **/
1267static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1268{
1269        int i;
1270        char *p = buffer;
1271
1272        *p = '\0';
1273        p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1274        for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1275                p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1276
1277        return buffer;
1278}
1279
1280/**
1281 * ipr_format_res_path - Format the resource path for printing.
1282 * @ioa_cfg:    ioa config struct
1283 * @res_path:   resource path
1284 * @buf:        buffer
1285 * @len:        length of buffer provided
1286 *
1287 * Return value:
1288 *      pointer to buffer
1289 **/
1290static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1291                                 u8 *res_path, char *buffer, int len)
1292{
1293        char *p = buffer;
1294
1295        *p = '\0';
1296        p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1297        __ipr_format_res_path(res_path, p, len - (buffer - p));
1298        return buffer;
1299}
1300
1301/**
1302 * ipr_update_res_entry - Update the resource entry.
1303 * @res:        resource entry struct
1304 * @cfgtew:     config table entry wrapper struct
1305 *
1306 * Return value:
1307 *      none
1308 **/
1309static void ipr_update_res_entry(struct ipr_resource_entry *res,
1310                                 struct ipr_config_table_entry_wrapper *cfgtew)
1311{
1312        char buffer[IPR_MAX_RES_PATH_LENGTH];
1313        unsigned int proto;
1314        int new_path = 0;
1315
1316        if (res->ioa_cfg->sis64) {
1317                res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1318                res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1319                res->type = cfgtew->u.cfgte64->res_type;
1320
1321                memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1322                        sizeof(struct ipr_std_inq_data));
1323
1324                res->qmodel = IPR_QUEUEING_MODEL64(res);
1325                proto = cfgtew->u.cfgte64->proto;
1326                res->res_handle = cfgtew->u.cfgte64->res_handle;
1327                res->dev_id = cfgtew->u.cfgte64->dev_id;
1328
1329                memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1330                        sizeof(res->dev_lun.scsi_lun));
1331
1332                if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1333                                        sizeof(res->res_path))) {
1334                        memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1335                                sizeof(res->res_path));
1336                        new_path = 1;
1337                }
1338
1339                if (res->sdev && new_path)
1340                        sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1341                                    ipr_format_res_path(res->ioa_cfg,
1342                                        res->res_path, buffer, sizeof(buffer)));
1343        } else {
1344                res->flags = cfgtew->u.cfgte->flags;
1345                if (res->flags & IPR_IS_IOA_RESOURCE)
1346                        res->type = IPR_RES_TYPE_IOAFP;
1347                else
1348                        res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1349
1350                memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1351                        sizeof(struct ipr_std_inq_data));
1352
1353                res->qmodel = IPR_QUEUEING_MODEL(res);
1354                proto = cfgtew->u.cfgte->proto;
1355                res->res_handle = cfgtew->u.cfgte->res_handle;
1356        }
1357
1358        ipr_update_ata_class(res, proto);
1359}
1360
1361/**
1362 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1363 *                        for the resource.
1364 * @res:        resource entry struct
1365 * @cfgtew:     config table entry wrapper struct
1366 *
1367 * Return value:
1368 *      none
1369 **/
1370static void ipr_clear_res_target(struct ipr_resource_entry *res)
1371{
1372        struct ipr_resource_entry *gscsi_res = NULL;
1373        struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1374
1375        if (!ioa_cfg->sis64)
1376                return;
1377
1378        if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1379                clear_bit(res->target, ioa_cfg->array_ids);
1380        else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1381                clear_bit(res->target, ioa_cfg->vset_ids);
1382        else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1383                list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1384                        if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1385                                return;
1386                clear_bit(res->target, ioa_cfg->target_ids);
1387
1388        } else if (res->bus == 0)
1389                clear_bit(res->target, ioa_cfg->target_ids);
1390}
1391
1392/**
1393 * ipr_handle_config_change - Handle a config change from the adapter
1394 * @ioa_cfg:    ioa config struct
1395 * @hostrcb:    hostrcb
1396 *
1397 * Return value:
1398 *      none
1399 **/
1400static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1401                                     struct ipr_hostrcb *hostrcb)
1402{
1403        struct ipr_resource_entry *res = NULL;
1404        struct ipr_config_table_entry_wrapper cfgtew;
1405        __be32 cc_res_handle;
1406
1407        u32 is_ndn = 1;
1408
1409        if (ioa_cfg->sis64) {
1410                cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1411                cc_res_handle = cfgtew.u.cfgte64->res_handle;
1412        } else {
1413                cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1414                cc_res_handle = cfgtew.u.cfgte->res_handle;
1415        }
1416
1417        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1418                if (res->res_handle == cc_res_handle) {
1419                        is_ndn = 0;
1420                        break;
1421                }
1422        }
1423
1424        if (is_ndn) {
1425                if (list_empty(&ioa_cfg->free_res_q)) {
1426                        ipr_send_hcam(ioa_cfg,
1427                                      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1428                                      hostrcb);
1429                        return;
1430                }
1431
1432                res = list_entry(ioa_cfg->free_res_q.next,
1433                                 struct ipr_resource_entry, queue);
1434
1435                list_del(&res->queue);
1436                ipr_init_res_entry(res, &cfgtew);
1437                list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1438        }
1439
1440        ipr_update_res_entry(res, &cfgtew);
1441
1442        if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1443                if (res->sdev) {
1444                        res->del_from_ml = 1;
1445                        res->res_handle = IPR_INVALID_RES_HANDLE;
1446                        schedule_work(&ioa_cfg->work_q);
1447                } else {
1448                        ipr_clear_res_target(res);
1449                        list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1450                }
1451        } else if (!res->sdev || res->del_from_ml) {
1452                res->add_to_ml = 1;
1453                schedule_work(&ioa_cfg->work_q);
1454        }
1455
1456        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1457}
1458
1459/**
1460 * ipr_process_ccn - Op done function for a CCN.
1461 * @ipr_cmd:    ipr command struct
1462 *
1463 * This function is the op done function for a configuration
1464 * change notification host controlled async from the adapter.
1465 *
1466 * Return value:
1467 *      none
1468 **/
1469static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1470{
1471        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1472        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1473        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1474
1475        list_del(&hostrcb->queue);
1476        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1477
1478        if (ioasc) {
1479                if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1480                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1481                        dev_err(&ioa_cfg->pdev->dev,
1482                                "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1483
1484                ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1485        } else {
1486                ipr_handle_config_change(ioa_cfg, hostrcb);
1487        }
1488}
1489
1490/**
1491 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1492 * @i:          index into buffer
1493 * @buf:                string to modify
1494 *
1495 * This function will strip all trailing whitespace, pad the end
1496 * of the string with a single space, and NULL terminate the string.
1497 *
1498 * Return value:
1499 *      new length of string
1500 **/
1501static int strip_and_pad_whitespace(int i, char *buf)
1502{
1503        while (i && buf[i] == ' ')
1504                i--;
1505        buf[i+1] = ' ';
1506        buf[i+2] = '\0';
1507        return i + 2;
1508}
1509
1510/**
1511 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1512 * @prefix:             string to print at start of printk
1513 * @hostrcb:    hostrcb pointer
1514 * @vpd:                vendor/product id/sn struct
1515 *
1516 * Return value:
1517 *      none
1518 **/
1519static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1520                                struct ipr_vpd *vpd)
1521{
1522        char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1523        int i = 0;
1524
1525        memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1526        i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1527
1528        memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1529        i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1530
1531        memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1532        buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1533
1534        ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1535}
1536
1537/**
1538 * ipr_log_vpd - Log the passed VPD to the error log.
1539 * @vpd:                vendor/product id/sn struct
1540 *
1541 * Return value:
1542 *      none
1543 **/
1544static void ipr_log_vpd(struct ipr_vpd *vpd)
1545{
1546        char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1547                    + IPR_SERIAL_NUM_LEN];
1548
1549        memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1550        memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1551               IPR_PROD_ID_LEN);
1552        buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1553        ipr_err("Vendor/Product ID: %s\n", buffer);
1554
1555        memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1556        buffer[IPR_SERIAL_NUM_LEN] = '\0';
1557        ipr_err("    Serial Number: %s\n", buffer);
1558}
1559
1560/**
1561 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1562 * @prefix:             string to print at start of printk
1563 * @hostrcb:    hostrcb pointer
1564 * @vpd:                vendor/product id/sn/wwn struct
1565 *
1566 * Return value:
1567 *      none
1568 **/
1569static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1570                                    struct ipr_ext_vpd *vpd)
1571{
1572        ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1573        ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1574                     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1575}
1576
1577/**
1578 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1579 * @vpd:                vendor/product id/sn/wwn struct
1580 *
1581 * Return value:
1582 *      none
1583 **/
1584static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1585{
1586        ipr_log_vpd(&vpd->vpd);
1587        ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1588                be32_to_cpu(vpd->wwid[1]));
1589}
1590
1591/**
1592 * ipr_log_enhanced_cache_error - Log a cache error.
1593 * @ioa_cfg:    ioa config struct
1594 * @hostrcb:    hostrcb struct
1595 *
1596 * Return value:
1597 *      none
1598 **/
1599static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1600                                         struct ipr_hostrcb *hostrcb)
1601{
1602        struct ipr_hostrcb_type_12_error *error;
1603
1604        if (ioa_cfg->sis64)
1605                error = &hostrcb->hcam.u.error64.u.type_12_error;
1606        else
1607                error = &hostrcb->hcam.u.error.u.type_12_error;
1608
1609        ipr_err("-----Current Configuration-----\n");
1610        ipr_err("Cache Directory Card Information:\n");
1611        ipr_log_ext_vpd(&error->ioa_vpd);
1612        ipr_err("Adapter Card Information:\n");
1613        ipr_log_ext_vpd(&error->cfc_vpd);
1614
1615        ipr_err("-----Expected Configuration-----\n");
1616        ipr_err("Cache Directory Card Information:\n");
1617        ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1618        ipr_err("Adapter Card Information:\n");
1619        ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1620
1621        ipr_err("Additional IOA Data: %08X %08X %08X\n",
1622                     be32_to_cpu(error->ioa_data[0]),
1623                     be32_to_cpu(error->ioa_data[1]),
1624                     be32_to_cpu(error->ioa_data[2]));
1625}
1626
1627/**
1628 * ipr_log_cache_error - Log a cache error.
1629 * @ioa_cfg:    ioa config struct
1630 * @hostrcb:    hostrcb struct
1631 *
1632 * Return value:
1633 *      none
1634 **/
1635static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1636                                struct ipr_hostrcb *hostrcb)
1637{
1638        struct ipr_hostrcb_type_02_error *error =
1639                &hostrcb->hcam.u.error.u.type_02_error;
1640
1641        ipr_err("-----Current Configuration-----\n");
1642        ipr_err("Cache Directory Card Information:\n");
1643        ipr_log_vpd(&error->ioa_vpd);
1644        ipr_err("Adapter Card Information:\n");
1645        ipr_log_vpd(&error->cfc_vpd);
1646
1647        ipr_err("-----Expected Configuration-----\n");
1648        ipr_err("Cache Directory Card Information:\n");
1649        ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1650        ipr_err("Adapter Card Information:\n");
1651        ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1652
1653        ipr_err("Additional IOA Data: %08X %08X %08X\n",
1654                     be32_to_cpu(error->ioa_data[0]),
1655                     be32_to_cpu(error->ioa_data[1]),
1656                     be32_to_cpu(error->ioa_data[2]));
1657}
1658
1659/**
1660 * ipr_log_enhanced_config_error - Log a configuration error.
1661 * @ioa_cfg:    ioa config struct
1662 * @hostrcb:    hostrcb struct
1663 *
1664 * Return value:
1665 *      none
1666 **/
1667static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1668                                          struct ipr_hostrcb *hostrcb)
1669{
1670        int errors_logged, i;
1671        struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1672        struct ipr_hostrcb_type_13_error *error;
1673
1674        error = &hostrcb->hcam.u.error.u.type_13_error;
1675        errors_logged = be32_to_cpu(error->errors_logged);
1676
1677        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1678                be32_to_cpu(error->errors_detected), errors_logged);
1679
1680        dev_entry = error->dev;
1681
1682        for (i = 0; i < errors_logged; i++, dev_entry++) {
1683                ipr_err_separator;
1684
1685                ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1686                ipr_log_ext_vpd(&dev_entry->vpd);
1687
1688                ipr_err("-----New Device Information-----\n");
1689                ipr_log_ext_vpd(&dev_entry->new_vpd);
1690
1691                ipr_err("Cache Directory Card Information:\n");
1692                ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1693
1694                ipr_err("Adapter Card Information:\n");
1695                ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1696        }
1697}
1698
1699/**
1700 * ipr_log_sis64_config_error - Log a device error.
1701 * @ioa_cfg:    ioa config struct
1702 * @hostrcb:    hostrcb struct
1703 *
1704 * Return value:
1705 *      none
1706 **/
1707static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1708                                       struct ipr_hostrcb *hostrcb)
1709{
1710        int errors_logged, i;
1711        struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1712        struct ipr_hostrcb_type_23_error *error;
1713        char buffer[IPR_MAX_RES_PATH_LENGTH];
1714
1715        error = &hostrcb->hcam.u.error64.u.type_23_error;
1716        errors_logged = be32_to_cpu(error->errors_logged);
1717
1718        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1719                be32_to_cpu(error->errors_detected), errors_logged);
1720
1721        dev_entry = error->dev;
1722
1723        for (i = 0; i < errors_logged; i++, dev_entry++) {
1724                ipr_err_separator;
1725
1726                ipr_err("Device %d : %s", i + 1,
1727                        __ipr_format_res_path(dev_entry->res_path,
1728                                              buffer, sizeof(buffer)));
1729                ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                ipr_err("-----New Device Information-----\n");
1732                ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                ipr_err("Cache Directory Card Information:\n");
1735                ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                ipr_err("Adapter Card Information:\n");
1738                ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739        }
1740}
1741
1742/**
1743 * ipr_log_config_error - Log a configuration error.
1744 * @ioa_cfg:    ioa config struct
1745 * @hostrcb:    hostrcb struct
1746 *
1747 * Return value:
1748 *      none
1749 **/
1750static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                 struct ipr_hostrcb *hostrcb)
1752{
1753        int errors_logged, i;
1754        struct ipr_hostrcb_device_data_entry *dev_entry;
1755        struct ipr_hostrcb_type_03_error *error;
1756
1757        error = &hostrcb->hcam.u.error.u.type_03_error;
1758        errors_logged = be32_to_cpu(error->errors_logged);
1759
1760        ipr_err("Device Errors Detected/Logged: %d/%d\n",
1761                be32_to_cpu(error->errors_detected), errors_logged);
1762
1763        dev_entry = error->dev;
1764
1765        for (i = 0; i < errors_logged; i++, dev_entry++) {
1766                ipr_err_separator;
1767
1768                ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1769                ipr_log_vpd(&dev_entry->vpd);
1770
1771                ipr_err("-----New Device Information-----\n");
1772                ipr_log_vpd(&dev_entry->new_vpd);
1773
1774                ipr_err("Cache Directory Card Information:\n");
1775                ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1776
1777                ipr_err("Adapter Card Information:\n");
1778                ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1779
1780                ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1781                        be32_to_cpu(dev_entry->ioa_data[0]),
1782                        be32_to_cpu(dev_entry->ioa_data[1]),
1783                        be32_to_cpu(dev_entry->ioa_data[2]),
1784                        be32_to_cpu(dev_entry->ioa_data[3]),
1785                        be32_to_cpu(dev_entry->ioa_data[4]));
1786        }
1787}
1788
1789/**
1790 * ipr_log_enhanced_array_error - Log an array configuration error.
1791 * @ioa_cfg:    ioa config struct
1792 * @hostrcb:    hostrcb struct
1793 *
1794 * Return value:
1795 *      none
1796 **/
1797static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1798                                         struct ipr_hostrcb *hostrcb)
1799{
1800        int i, num_entries;
1801        struct ipr_hostrcb_type_14_error *error;
1802        struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1803        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1804
1805        error = &hostrcb->hcam.u.error.u.type_14_error;
1806
1807        ipr_err_separator;
1808
1809        ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1810                error->protection_level,
1811                ioa_cfg->host->host_no,
1812                error->last_func_vset_res_addr.bus,
1813                error->last_func_vset_res_addr.target,
1814                error->last_func_vset_res_addr.lun);
1815
1816        ipr_err_separator;
1817
1818        array_entry = error->array_member;
1819        num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1820                            ARRAY_SIZE(error->array_member));
1821
1822        for (i = 0; i < num_entries; i++, array_entry++) {
1823                if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1824                        continue;
1825
1826                if (be32_to_cpu(error->exposed_mode_adn) == i)
1827                        ipr_err("Exposed Array Member %d:\n", i);
1828                else
1829                        ipr_err("Array Member %d:\n", i);
1830
1831                ipr_log_ext_vpd(&array_entry->vpd);
1832                ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1833                ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1834                                 "Expected Location");
1835
1836                ipr_err_separator;
1837        }
1838}
1839
1840/**
1841 * ipr_log_array_error - Log an array configuration error.
1842 * @ioa_cfg:    ioa config struct
1843 * @hostrcb:    hostrcb struct
1844 *
1845 * Return value:
1846 *      none
1847 **/
1848static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1849                                struct ipr_hostrcb *hostrcb)
1850{
1851        int i;
1852        struct ipr_hostrcb_type_04_error *error;
1853        struct ipr_hostrcb_array_data_entry *array_entry;
1854        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1855
1856        error = &hostrcb->hcam.u.error.u.type_04_error;
1857
1858        ipr_err_separator;
1859
1860        ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1861                error->protection_level,
1862                ioa_cfg->host->host_no,
1863                error->last_func_vset_res_addr.bus,
1864                error->last_func_vset_res_addr.target,
1865                error->last_func_vset_res_addr.lun);
1866
1867        ipr_err_separator;
1868
1869        array_entry = error->array_member;
1870
1871        for (i = 0; i < 18; i++) {
1872                if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1873                        continue;
1874
1875                if (be32_to_cpu(error->exposed_mode_adn) == i)
1876                        ipr_err("Exposed Array Member %d:\n", i);
1877                else
1878                        ipr_err("Array Member %d:\n", i);
1879
1880                ipr_log_vpd(&array_entry->vpd);
1881
1882                ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1883                ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1884                                 "Expected Location");
1885
1886                ipr_err_separator;
1887
1888                if (i == 9)
1889                        array_entry = error->array_member2;
1890                else
1891                        array_entry++;
1892        }
1893}
1894
1895/**
1896 * ipr_log_hex_data - Log additional hex IOA error data.
1897 * @ioa_cfg:    ioa config struct
1898 * @data:               IOA error data
1899 * @len:                data length
1900 *
1901 * Return value:
1902 *      none
1903 **/
1904static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1905{
1906        int i;
1907
1908        if (len == 0)
1909                return;
1910
1911        if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1912                len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1913
1914        for (i = 0; i < len / 4; i += 4) {
1915                ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1916                        be32_to_cpu(data[i]),
1917                        be32_to_cpu(data[i+1]),
1918                        be32_to_cpu(data[i+2]),
1919                        be32_to_cpu(data[i+3]));
1920        }
1921}
1922
1923/**
1924 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1925 * @ioa_cfg:    ioa config struct
1926 * @hostrcb:    hostrcb struct
1927 *
1928 * Return value:
1929 *      none
1930 **/
1931static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1932                                            struct ipr_hostrcb *hostrcb)
1933{
1934        struct ipr_hostrcb_type_17_error *error;
1935
1936        if (ioa_cfg->sis64)
1937                error = &hostrcb->hcam.u.error64.u.type_17_error;
1938        else
1939                error = &hostrcb->hcam.u.error.u.type_17_error;
1940
1941        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1942        strim(error->failure_reason);
1943
1944        ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1945                     be32_to_cpu(hostrcb->hcam.u.error.prc));
1946        ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1947        ipr_log_hex_data(ioa_cfg, error->data,
1948                         be32_to_cpu(hostrcb->hcam.length) -
1949                         (offsetof(struct ipr_hostrcb_error, u) +
1950                          offsetof(struct ipr_hostrcb_type_17_error, data)));
1951}
1952
1953/**
1954 * ipr_log_dual_ioa_error - Log a dual adapter error.
1955 * @ioa_cfg:    ioa config struct
1956 * @hostrcb:    hostrcb struct
1957 *
1958 * Return value:
1959 *      none
1960 **/
1961static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1962                                   struct ipr_hostrcb *hostrcb)
1963{
1964        struct ipr_hostrcb_type_07_error *error;
1965
1966        error = &hostrcb->hcam.u.error.u.type_07_error;
1967        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1968        strim(error->failure_reason);
1969
1970        ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1971                     be32_to_cpu(hostrcb->hcam.u.error.prc));
1972        ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1973        ipr_log_hex_data(ioa_cfg, error->data,
1974                         be32_to_cpu(hostrcb->hcam.length) -
1975                         (offsetof(struct ipr_hostrcb_error, u) +
1976                          offsetof(struct ipr_hostrcb_type_07_error, data)));
1977}
1978
1979static const struct {
1980        u8 active;
1981        char *desc;
1982} path_active_desc[] = {
1983        { IPR_PATH_NO_INFO, "Path" },
1984        { IPR_PATH_ACTIVE, "Active path" },
1985        { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1986};
1987
1988static const struct {
1989        u8 state;
1990        char *desc;
1991} path_state_desc[] = {
1992        { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1993        { IPR_PATH_HEALTHY, "is healthy" },
1994        { IPR_PATH_DEGRADED, "is degraded" },
1995        { IPR_PATH_FAILED, "is failed" }
1996};
1997
1998/**
1999 * ipr_log_fabric_path - Log a fabric path error
2000 * @hostrcb:    hostrcb struct
2001 * @fabric:             fabric descriptor
2002 *
2003 * Return value:
2004 *      none
2005 **/
2006static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2007                                struct ipr_hostrcb_fabric_desc *fabric)
2008{
2009        int i, j;
2010        u8 path_state = fabric->path_state;
2011        u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2012        u8 state = path_state & IPR_PATH_STATE_MASK;
2013
2014        for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2015                if (path_active_desc[i].active != active)
2016                        continue;
2017
2018                for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2019                        if (path_state_desc[j].state != state)
2020                                continue;
2021
2022                        if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2023                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2024                                             path_active_desc[i].desc, path_state_desc[j].desc,
2025                                             fabric->ioa_port);
2026                        } else if (fabric->cascaded_expander == 0xff) {
2027                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2028                                             path_active_desc[i].desc, path_state_desc[j].desc,
2029                                             fabric->ioa_port, fabric->phy);
2030                        } else if (fabric->phy == 0xff) {
2031                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2032                                             path_active_desc[i].desc, path_state_desc[j].desc,
2033                                             fabric->ioa_port, fabric->cascaded_expander);
2034                        } else {
2035                                ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2036                                             path_active_desc[i].desc, path_state_desc[j].desc,
2037                                             fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2038                        }
2039                        return;
2040                }
2041        }
2042
2043        ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2044                fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2045}
2046
2047/**
2048 * ipr_log64_fabric_path - Log a fabric path error
2049 * @hostrcb:    hostrcb struct
2050 * @fabric:             fabric descriptor
2051 *
2052 * Return value:
2053 *      none
2054 **/
2055static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2056                                  struct ipr_hostrcb64_fabric_desc *fabric)
2057{
2058        int i, j;
2059        u8 path_state = fabric->path_state;
2060        u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2061        u8 state = path_state & IPR_PATH_STATE_MASK;
2062        char buffer[IPR_MAX_RES_PATH_LENGTH];
2063
2064        for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2065                if (path_active_desc[i].active != active)
2066                        continue;
2067
2068                for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2069                        if (path_state_desc[j].state != state)
2070                                continue;
2071
2072                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2073                                     path_active_desc[i].desc, path_state_desc[j].desc,
2074                                     ipr_format_res_path(hostrcb->ioa_cfg,
2075                                                fabric->res_path,
2076                                                buffer, sizeof(buffer)));
2077                        return;
2078                }
2079        }
2080
2081        ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2082                ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2083                                    buffer, sizeof(buffer)));
2084}
2085
2086static const struct {
2087        u8 type;
2088        char *desc;
2089} path_type_desc[] = {
2090        { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2091        { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2092        { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2093        { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2094};
2095
2096static const struct {
2097        u8 status;
2098        char *desc;
2099} path_status_desc[] = {
2100        { IPR_PATH_CFG_NO_PROB, "Functional" },
2101        { IPR_PATH_CFG_DEGRADED, "Degraded" },
2102        { IPR_PATH_CFG_FAILED, "Failed" },
2103        { IPR_PATH_CFG_SUSPECT, "Suspect" },
2104        { IPR_PATH_NOT_DETECTED, "Missing" },
2105        { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2106};
2107
2108static const char *link_rate[] = {
2109        "unknown",
2110        "disabled",
2111        "phy reset problem",
2112        "spinup hold",
2113        "port selector",
2114        "unknown",
2115        "unknown",
2116        "unknown",
2117        "1.5Gbps",
2118        "3.0Gbps",
2119        "unknown",
2120        "unknown",
2121        "unknown",
2122        "unknown",
2123        "unknown",
2124        "unknown"
2125};
2126
2127/**
2128 * ipr_log_path_elem - Log a fabric path element.
2129 * @hostrcb:    hostrcb struct
2130 * @cfg:                fabric path element struct
2131 *
2132 * Return value:
2133 *      none
2134 **/
2135static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2136                              struct ipr_hostrcb_config_element *cfg)
2137{
2138        int i, j;
2139        u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2140        u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2141
2142        if (type == IPR_PATH_CFG_NOT_EXIST)
2143                return;
2144
2145        for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2146                if (path_type_desc[i].type != type)
2147                        continue;
2148
2149                for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2150                        if (path_status_desc[j].status != status)
2151                                continue;
2152
2153                        if (type == IPR_PATH_CFG_IOA_PORT) {
2154                                ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2155                                             path_status_desc[j].desc, path_type_desc[i].desc,
2156                                             cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2157                                             be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2158                        } else {
2159                                if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2160                                        ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2161                                                     path_status_desc[j].desc, path_type_desc[i].desc,
2162                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2163                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2164                                } else if (cfg->cascaded_expander == 0xff) {
2165                                        ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2166                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2167                                                     path_type_desc[i].desc, cfg->phy,
2168                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2169                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2170                                } else if (cfg->phy == 0xff) {
2171                                        ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2172                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2173                                                     path_type_desc[i].desc, cfg->cascaded_expander,
2174                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176                                } else {
2177                                        ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2178                                                     "WWN=%08X%08X\n", path_status_desc[j].desc,
2179                                                     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2180                                                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2181                                                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2182                                }
2183                        }
2184                        return;
2185                }
2186        }
2187
2188        ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2189                     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2190                     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2191                     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2192}
2193
2194/**
2195 * ipr_log64_path_elem - Log a fabric path element.
2196 * @hostrcb:    hostrcb struct
2197 * @cfg:                fabric path element struct
2198 *
2199 * Return value:
2200 *      none
2201 **/
2202static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2203                                struct ipr_hostrcb64_config_element *cfg)
2204{
2205        int i, j;
2206        u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2207        u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2208        u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2209        char buffer[IPR_MAX_RES_PATH_LENGTH];
2210
2211        if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2212                return;
2213
2214        for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2215                if (path_type_desc[i].type != type)
2216                        continue;
2217
2218                for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2219                        if (path_status_desc[j].status != status)
2220                                continue;
2221
2222                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2223                                     path_status_desc[j].desc, path_type_desc[i].desc,
2224                                     ipr_format_res_path(hostrcb->ioa_cfg,
2225                                        cfg->res_path, buffer, sizeof(buffer)),
2226                                        link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2227                                        be32_to_cpu(cfg->wwid[0]),
2228                                        be32_to_cpu(cfg->wwid[1]));
2229                        return;
2230                }
2231        }
2232        ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2233                     "WWN=%08X%08X\n", cfg->type_status,
2234                     ipr_format_res_path(hostrcb->ioa_cfg,
2235                        cfg->res_path, buffer, sizeof(buffer)),
2236                        link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2237                        be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2238}
2239
2240/**
2241 * ipr_log_fabric_error - Log a fabric error.
2242 * @ioa_cfg:    ioa config struct
2243 * @hostrcb:    hostrcb struct
2244 *
2245 * Return value:
2246 *      none
2247 **/
2248static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2249                                 struct ipr_hostrcb *hostrcb)
2250{
2251        struct ipr_hostrcb_type_20_error *error;
2252        struct ipr_hostrcb_fabric_desc *fabric;
2253        struct ipr_hostrcb_config_element *cfg;
2254        int i, add_len;
2255
2256        error = &hostrcb->hcam.u.error.u.type_20_error;
2257        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2258        ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2259
2260        add_len = be32_to_cpu(hostrcb->hcam.length) -
2261                (offsetof(struct ipr_hostrcb_error, u) +
2262                 offsetof(struct ipr_hostrcb_type_20_error, desc));
2263
2264        for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2265                ipr_log_fabric_path(hostrcb, fabric);
2266                for_each_fabric_cfg(fabric, cfg)
2267                        ipr_log_path_elem(hostrcb, cfg);
2268
2269                add_len -= be16_to_cpu(fabric->length);
2270                fabric = (struct ipr_hostrcb_fabric_desc *)
2271                        ((unsigned long)fabric + be16_to_cpu(fabric->length));
2272        }
2273
2274        ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2275}
2276
2277/**
2278 * ipr_log_sis64_array_error - Log a sis64 array error.
2279 * @ioa_cfg:    ioa config struct
2280 * @hostrcb:    hostrcb struct
2281 *
2282 * Return value:
2283 *      none
2284 **/
2285static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2286                                      struct ipr_hostrcb *hostrcb)
2287{
2288        int i, num_entries;
2289        struct ipr_hostrcb_type_24_error *error;
2290        struct ipr_hostrcb64_array_data_entry *array_entry;
2291        char buffer[IPR_MAX_RES_PATH_LENGTH];
2292        const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2293
2294        error = &hostrcb->hcam.u.error64.u.type_24_error;
2295
2296        ipr_err_separator;
2297
2298        ipr_err("RAID %s Array Configuration: %s\n",
2299                error->protection_level,
2300                ipr_format_res_path(ioa_cfg, error->last_res_path,
2301                        buffer, sizeof(buffer)));
2302
2303        ipr_err_separator;
2304
2305        array_entry = error->array_member;
2306        num_entries = min_t(u32, error->num_entries,
2307                            ARRAY_SIZE(error->array_member));
2308
2309        for (i = 0; i < num_entries; i++, array_entry++) {
2310
2311                if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2312                        continue;
2313
2314                if (error->exposed_mode_adn == i)
2315                        ipr_err("Exposed Array Member %d:\n", i);
2316                else
2317                        ipr_err("Array Member %d:\n", i);
2318
2319                ipr_err("Array Member %d:\n", i);
2320                ipr_log_ext_vpd(&array_entry->vpd);
2321                ipr_err("Current Location: %s\n",
2322                         ipr_format_res_path(ioa_cfg, array_entry->res_path,
2323                                buffer, sizeof(buffer)));
2324                ipr_err("Expected Location: %s\n",
2325                         ipr_format_res_path(ioa_cfg,
2326                                array_entry->expected_res_path,
2327                                buffer, sizeof(buffer)));
2328
2329                ipr_err_separator;
2330        }
2331}
2332
2333/**
2334 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2335 * @ioa_cfg:    ioa config struct
2336 * @hostrcb:    hostrcb struct
2337 *
2338 * Return value:
2339 *      none
2340 **/
2341static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2342                                       struct ipr_hostrcb *hostrcb)
2343{
2344        struct ipr_hostrcb_type_30_error *error;
2345        struct ipr_hostrcb64_fabric_desc *fabric;
2346        struct ipr_hostrcb64_config_element *cfg;
2347        int i, add_len;
2348
2349        error = &hostrcb->hcam.u.error64.u.type_30_error;
2350
2351        error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2352        ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2353
2354        add_len = be32_to_cpu(hostrcb->hcam.length) -
2355                (offsetof(struct ipr_hostrcb64_error, u) +
2356                 offsetof(struct ipr_hostrcb_type_30_error, desc));
2357
2358        for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2359                ipr_log64_fabric_path(hostrcb, fabric);
2360                for_each_fabric_cfg(fabric, cfg)
2361                        ipr_log64_path_elem(hostrcb, cfg);
2362
2363                add_len -= be16_to_cpu(fabric->length);
2364                fabric = (struct ipr_hostrcb64_fabric_desc *)
2365                        ((unsigned long)fabric + be16_to_cpu(fabric->length));
2366        }
2367
2368        ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2369}
2370
2371/**
2372 * ipr_log_generic_error - Log an adapter error.
2373 * @ioa_cfg:    ioa config struct
2374 * @hostrcb:    hostrcb struct
2375 *
2376 * Return value:
2377 *      none
2378 **/
2379static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2380                                  struct ipr_hostrcb *hostrcb)
2381{
2382        ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2383                         be32_to_cpu(hostrcb->hcam.length));
2384}
2385
2386/**
2387 * ipr_log_sis64_device_error - Log a cache error.
2388 * @ioa_cfg:    ioa config struct
2389 * @hostrcb:    hostrcb struct
2390 *
2391 * Return value:
2392 *      none
2393 **/
2394static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2395                                         struct ipr_hostrcb *hostrcb)
2396{
2397        struct ipr_hostrcb_type_21_error *error;
2398        char buffer[IPR_MAX_RES_PATH_LENGTH];
2399
2400        error = &hostrcb->hcam.u.error64.u.type_21_error;
2401
2402        ipr_err("-----Failing Device Information-----\n");
2403        ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2404                be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2405                 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2406        ipr_err("Device Resource Path: %s\n",
2407                __ipr_format_res_path(error->res_path,
2408                                      buffer, sizeof(buffer)));
2409        error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2410        error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2411        ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2412        ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2413        ipr_err("SCSI Sense Data:\n");
2414        ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2415        ipr_err("SCSI Command Descriptor Block: \n");
2416        ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2417
2418        ipr_err("Additional IOA Data:\n");
2419        ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2420}
2421
2422/**
2423 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2424 * @ioasc:      IOASC
2425 *
2426 * This function will return the index of into the ipr_error_table
2427 * for the specified IOASC. If the IOASC is not in the table,
2428 * 0 will be returned, which points to the entry used for unknown errors.
2429 *
2430 * Return value:
2431 *      index into the ipr_error_table
2432 **/
2433static u32 ipr_get_error(u32 ioasc)
2434{
2435        int i;
2436
2437        for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2438                if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2439                        return i;
2440
2441        return 0;
2442}
2443
2444/**
2445 * ipr_handle_log_data - Log an adapter error.
2446 * @ioa_cfg:    ioa config struct
2447 * @hostrcb:    hostrcb struct
2448 *
2449 * This function logs an adapter error to the system.
2450 *
2451 * Return value:
2452 *      none
2453 **/
2454static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2455                                struct ipr_hostrcb *hostrcb)
2456{
2457        u32 ioasc;
2458        int error_index;
2459        struct ipr_hostrcb_type_21_error *error;
2460
2461        if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2462                return;
2463
2464        if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2465                dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2466
2467        if (ioa_cfg->sis64)
2468                ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2469        else
2470                ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2471
2472        if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2473            ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2474                /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2475                scsi_report_bus_reset(ioa_cfg->host,
2476                                      hostrcb->hcam.u.error.fd_res_addr.bus);
2477        }
2478
2479        error_index = ipr_get_error(ioasc);
2480
2481        if (!ipr_error_table[error_index].log_hcam)
2482                return;
2483
2484        if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2485            hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2486                error = &hostrcb->hcam.u.error64.u.type_21_error;
2487
2488                if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2489                        ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2490                                return;
2491        }
2492
2493        ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2494
2495        /* Set indication we have logged an error */
2496        ioa_cfg->errors_logged++;
2497
2498        if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2499                return;
2500        if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2501                hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2502
2503        switch (hostrcb->hcam.overlay_id) {
2504        case IPR_HOST_RCB_OVERLAY_ID_2:
2505                ipr_log_cache_error(ioa_cfg, hostrcb);
2506                break;
2507        case IPR_HOST_RCB_OVERLAY_ID_3:
2508                ipr_log_config_error(ioa_cfg, hostrcb);
2509                break;
2510        case IPR_HOST_RCB_OVERLAY_ID_4:
2511        case IPR_HOST_RCB_OVERLAY_ID_6:
2512                ipr_log_array_error(ioa_cfg, hostrcb);
2513                break;
2514        case IPR_HOST_RCB_OVERLAY_ID_7:
2515                ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2516                break;
2517        case IPR_HOST_RCB_OVERLAY_ID_12:
2518                ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2519                break;
2520        case IPR_HOST_RCB_OVERLAY_ID_13:
2521                ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2522                break;
2523        case IPR_HOST_RCB_OVERLAY_ID_14:
2524        case IPR_HOST_RCB_OVERLAY_ID_16:
2525                ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2526                break;
2527        case IPR_HOST_RCB_OVERLAY_ID_17:
2528                ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2529                break;
2530        case IPR_HOST_RCB_OVERLAY_ID_20:
2531                ipr_log_fabric_error(ioa_cfg, hostrcb);
2532                break;
2533        case IPR_HOST_RCB_OVERLAY_ID_21:
2534                ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2535                break;
2536        case IPR_HOST_RCB_OVERLAY_ID_23:
2537                ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2538                break;
2539        case IPR_HOST_RCB_OVERLAY_ID_24:
2540        case IPR_HOST_RCB_OVERLAY_ID_26:
2541                ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2542                break;
2543        case IPR_HOST_RCB_OVERLAY_ID_30:
2544                ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2545                break;
2546        case IPR_HOST_RCB_OVERLAY_ID_1:
2547        case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2548        default:
2549                ipr_log_generic_error(ioa_cfg, hostrcb);
2550                break;
2551        }
2552}
2553
2554/**
2555 * ipr_process_error - Op done function for an adapter error log.
2556 * @ipr_cmd:    ipr command struct
2557 *
2558 * This function is the op done function for an error log host
2559 * controlled async from the adapter. It will log the error and
2560 * send the HCAM back to the adapter.
2561 *
2562 * Return value:
2563 *      none
2564 **/
2565static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2566{
2567        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2568        struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2569        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2570        u32 fd_ioasc;
2571
2572        if (ioa_cfg->sis64)
2573                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2574        else
2575                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2576
2577        list_del(&hostrcb->queue);
2578        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2579
2580        if (!ioasc) {
2581                ipr_handle_log_data(ioa_cfg, hostrcb);
2582                if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2583                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2584        } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2585                   ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2586                dev_err(&ioa_cfg->pdev->dev,
2587                        "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2588        }
2589
2590        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2591}
2592
2593/**
2594 * ipr_timeout -  An internally generated op has timed out.
2595 * @ipr_cmd:    ipr command struct
2596 *
2597 * This function blocks host requests and initiates an
2598 * adapter reset.
2599 *
2600 * Return value:
2601 *      none
2602 **/
2603static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2604{
2605        unsigned long lock_flags = 0;
2606        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2607
2608        ENTER;
2609        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2610
2611        ioa_cfg->errors_logged++;
2612        dev_err(&ioa_cfg->pdev->dev,
2613                "Adapter being reset due to command timeout.\n");
2614
2615        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2616                ioa_cfg->sdt_state = GET_DUMP;
2617
2618        if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2619                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2620
2621        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2622        LEAVE;
2623}
2624
2625/**
2626 * ipr_oper_timeout -  Adapter timed out transitioning to operational
2627 * @ipr_cmd:    ipr command struct
2628 *
2629 * This function blocks host requests and initiates an
2630 * adapter reset.
2631 *
2632 * Return value:
2633 *      none
2634 **/
2635static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2636{
2637        unsigned long lock_flags = 0;
2638        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2639
2640        ENTER;
2641        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2642
2643        ioa_cfg->errors_logged++;
2644        dev_err(&ioa_cfg->pdev->dev,
2645                "Adapter timed out transitioning to operational.\n");
2646
2647        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2648                ioa_cfg->sdt_state = GET_DUMP;
2649
2650        if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2651                if (ipr_fastfail)
2652                        ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2653                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2654        }
2655
2656        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2657        LEAVE;
2658}
2659
2660/**
2661 * ipr_find_ses_entry - Find matching SES in SES table
2662 * @res:        resource entry struct of SES
2663 *
2664 * Return value:
2665 *      pointer to SES table entry / NULL on failure
2666 **/
2667static const struct ipr_ses_table_entry *
2668ipr_find_ses_entry(struct ipr_resource_entry *res)
2669{
2670        int i, j, matches;
2671        struct ipr_std_inq_vpids *vpids;
2672        const struct ipr_ses_table_entry *ste = ipr_ses_table;
2673
2674        for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2675                for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2676                        if (ste->compare_product_id_byte[j] == 'X') {
2677                                vpids = &res->std_inq_data.vpids;
2678                                if (vpids->product_id[j] == ste->product_id[j])
2679                                        matches++;
2680                                else
2681                                        break;
2682                        } else
2683                                matches++;
2684                }
2685
2686                if (matches == IPR_PROD_ID_LEN)
2687                        return ste;
2688        }
2689
2690        return NULL;
2691}
2692
2693/**
2694 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2695 * @ioa_cfg:    ioa config struct
2696 * @bus:                SCSI bus
2697 * @bus_width:  bus width
2698 *
2699 * Return value:
2700 *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2701 *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2702 *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2703 *      max 160MHz = max 320MB/sec).
2704 **/
2705static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2706{
2707        struct ipr_resource_entry *res;
2708        const struct ipr_ses_table_entry *ste;
2709        u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2710
2711        /* Loop through each config table entry in the config table buffer */
2712        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2713                if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2714                        continue;
2715
2716                if (bus != res->bus)
2717                        continue;
2718
2719                if (!(ste = ipr_find_ses_entry(res)))
2720                        continue;
2721
2722                max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2723        }
2724
2725        return max_xfer_rate;
2726}
2727
2728/**
2729 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2730 * @ioa_cfg:            ioa config struct
2731 * @max_delay:          max delay in micro-seconds to wait
2732 *
2733 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2734 *
2735 * Return value:
2736 *      0 on success / other on failure
2737 **/
2738static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2739{
2740        volatile u32 pcii_reg;
2741        int delay = 1;
2742
2743        /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2744        while (delay < max_delay) {
2745                pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2746
2747                if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2748                        return 0;
2749
2750                /* udelay cannot be used if delay is more than a few milliseconds */
2751                if ((delay / 1000) > MAX_UDELAY_MS)
2752                        mdelay(delay / 1000);
2753                else
2754                        udelay(delay);
2755
2756                delay += delay;
2757        }
2758        return -EIO;
2759}
2760
2761/**
2762 * ipr_get_sis64_dump_data_section - Dump IOA memory
2763 * @ioa_cfg:                    ioa config struct
2764 * @start_addr:                 adapter address to dump
2765 * @dest:                       destination kernel buffer
2766 * @length_in_words:            length to dump in 4 byte words
2767 *
2768 * Return value:
2769 *      0 on success
2770 **/
2771static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2772                                           u32 start_addr,
2773                                           __be32 *dest, u32 length_in_words)
2774{
2775        int i;
2776
2777        for (i = 0; i < length_in_words; i++) {
2778                writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2779                *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2780                dest++;
2781        }
2782
2783        return 0;
2784}
2785
2786/**
2787 * ipr_get_ldump_data_section - Dump IOA memory
2788 * @ioa_cfg:                    ioa config struct
2789 * @start_addr:                 adapter address to dump
2790 * @dest:                               destination kernel buffer
2791 * @length_in_words:    length to dump in 4 byte words
2792 *
2793 * Return value:
2794 *      0 on success / -EIO on failure
2795 **/
2796static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2797                                      u32 start_addr,
2798                                      __be32 *dest, u32 length_in_words)
2799{
2800        volatile u32 temp_pcii_reg;
2801        int i, delay = 0;
2802
2803        if (ioa_cfg->sis64)
2804                return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2805                                                       dest, length_in_words);
2806
2807        /* Write IOA interrupt reg starting LDUMP state  */
2808        writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2809               ioa_cfg->regs.set_uproc_interrupt_reg32);
2810
2811        /* Wait for IO debug acknowledge */
2812        if (ipr_wait_iodbg_ack(ioa_cfg,
2813                               IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2814                dev_err(&ioa_cfg->pdev->dev,
2815                        "IOA dump long data transfer timeout\n");
2816                return -EIO;
2817        }
2818
2819        /* Signal LDUMP interlocked - clear IO debug ack */
2820        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2821               ioa_cfg->regs.clr_interrupt_reg);
2822
2823        /* Write Mailbox with starting address */
2824        writel(start_addr, ioa_cfg->ioa_mailbox);
2825
2826        /* Signal address valid - clear IOA Reset alert */
2827        writel(IPR_UPROCI_RESET_ALERT,
2828               ioa_cfg->regs.clr_uproc_interrupt_reg32);
2829
2830        for (i = 0; i < length_in_words; i++) {
2831                /* Wait for IO debug acknowledge */
2832                if (ipr_wait_iodbg_ack(ioa_cfg,
2833                                       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2834                        dev_err(&ioa_cfg->pdev->dev,
2835                                "IOA dump short data transfer timeout\n");
2836                        return -EIO;
2837                }
2838
2839                /* Read data from mailbox and increment destination pointer */
2840                *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2841                dest++;
2842
2843                /* For all but the last word of data, signal data received */
2844                if (i < (length_in_words - 1)) {
2845                        /* Signal dump data received - Clear IO debug Ack */
2846                        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2847                               ioa_cfg->regs.clr_interrupt_reg);
2848                }
2849        }
2850
2851        /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2852        writel(IPR_UPROCI_RESET_ALERT,
2853               ioa_cfg->regs.set_uproc_interrupt_reg32);
2854
2855        writel(IPR_UPROCI_IO_DEBUG_ALERT,
2856               ioa_cfg->regs.clr_uproc_interrupt_reg32);
2857
2858        /* Signal dump data received - Clear IO debug Ack */
2859        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2860               ioa_cfg->regs.clr_interrupt_reg);
2861
2862        /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2863        while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2864                temp_pcii_reg =
2865                    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2866
2867                if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2868                        return 0;
2869
2870                udelay(10);
2871                delay += 10;
2872        }
2873
2874        return 0;
2875}
2876
2877#ifdef CONFIG_SCSI_IPR_DUMP
2878/**
2879 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2880 * @ioa_cfg:            ioa config struct
2881 * @pci_address:        adapter address
2882 * @length:                     length of data to copy
2883 *
2884 * Copy data from PCI adapter to kernel buffer.
2885 * Note: length MUST be a 4 byte multiple
2886 * Return value:
2887 *      0 on success / other on failure
2888 **/
2889static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2890                        unsigned long pci_address, u32 length)
2891{
2892        int bytes_copied = 0;
2893        int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2894        __be32 *page;
2895        unsigned long lock_flags = 0;
2896        struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2897
2898        if (ioa_cfg->sis64)
2899                max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2900        else
2901                max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2902
2903        while (bytes_copied < length &&
2904               (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2905                if (ioa_dump->page_offset >= PAGE_SIZE ||
2906                    ioa_dump->page_offset == 0) {
2907                        page = (__be32 *)__get_free_page(GFP_ATOMIC);
2908
2909                        if (!page) {
2910                                ipr_trace;
2911                                return bytes_copied;
2912                        }
2913
2914                        ioa_dump->page_offset = 0;
2915                        ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2916                        ioa_dump->next_page_index++;
2917                } else
2918                        page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2919
2920                rem_len = length - bytes_copied;
2921                rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2922                cur_len = min(rem_len, rem_page_len);
2923
2924                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2925                if (ioa_cfg->sdt_state == ABORT_DUMP) {
2926                        rc = -EIO;
2927                } else {
2928                        rc = ipr_get_ldump_data_section(ioa_cfg,
2929                                                        pci_address + bytes_copied,
2930                                                        &page[ioa_dump->page_offset / 4],
2931                                                        (cur_len / sizeof(u32)));
2932                }
2933                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2934
2935                if (!rc) {
2936                        ioa_dump->page_offset += cur_len;
2937                        bytes_copied += cur_len;
2938                } else {
2939                        ipr_trace;
2940                        break;
2941                }
2942                schedule();
2943        }
2944
2945        return bytes_copied;
2946}
2947
2948/**
2949 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2950 * @hdr:        dump entry header struct
2951 *
2952 * Return value:
2953 *      nothing
2954 **/
2955static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2956{
2957        hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2958        hdr->num_elems = 1;
2959        hdr->offset = sizeof(*hdr);
2960        hdr->status = IPR_DUMP_STATUS_SUCCESS;
2961}
2962
2963/**
2964 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2965 * @ioa_cfg:    ioa config struct
2966 * @driver_dump:        driver dump struct
2967 *
2968 * Return value:
2969 *      nothing
2970 **/
2971static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2972                                   struct ipr_driver_dump *driver_dump)
2973{
2974        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2975
2976        ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2977        driver_dump->ioa_type_entry.hdr.len =
2978                sizeof(struct ipr_dump_ioa_type_entry) -
2979                sizeof(struct ipr_dump_entry_header);
2980        driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2981        driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2982        driver_dump->ioa_type_entry.type = ioa_cfg->type;
2983        driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2984                (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2985                ucode_vpd->minor_release[1];
2986        driver_dump->hdr.num_entries++;
2987}
2988
2989/**
2990 * ipr_dump_version_data - Fill in the driver version in the dump.
2991 * @ioa_cfg:    ioa config struct
2992 * @driver_dump:        driver dump struct
2993 *
2994 * Return value:
2995 *      nothing
2996 **/
2997static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2998                                  struct ipr_driver_dump *driver_dump)
2999{
3000        ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3001        driver_dump->version_entry.hdr.len =
3002                sizeof(struct ipr_dump_version_entry) -
3003                sizeof(struct ipr_dump_entry_header);
3004        driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3005        driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3006        strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3007        driver_dump->hdr.num_entries++;
3008}
3009
3010/**
3011 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3012 * @ioa_cfg:    ioa config struct
3013 * @driver_dump:        driver dump struct
3014 *
3015 * Return value:
3016 *      nothing
3017 **/
3018static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3019                                   struct ipr_driver_dump *driver_dump)
3020{
3021        ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3022        driver_dump->trace_entry.hdr.len =
3023                sizeof(struct ipr_dump_trace_entry) -
3024                sizeof(struct ipr_dump_entry_header);
3025        driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3026        driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3027        memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3028        driver_dump->hdr.num_entries++;
3029}
3030
3031/**
3032 * ipr_dump_location_data - Fill in the IOA location in the dump.
3033 * @ioa_cfg:    ioa config struct
3034 * @driver_dump:        driver dump struct
3035 *
3036 * Return value:
3037 *      nothing
3038 **/
3039static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3040                                   struct ipr_driver_dump *driver_dump)
3041{
3042        ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3043        driver_dump->location_entry.hdr.len =
3044                sizeof(struct ipr_dump_location_entry) -
3045                sizeof(struct ipr_dump_entry_header);
3046        driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3047        driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3048        strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3049        driver_dump->hdr.num_entries++;
3050}
3051
3052/**
3053 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3054 * @ioa_cfg:    ioa config struct
3055 * @dump:               dump struct
3056 *
3057 * Return value:
3058 *      nothing
3059 **/
3060static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3061{
3062        unsigned long start_addr, sdt_word;
3063        unsigned long lock_flags = 0;
3064        struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3065        struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3066        u32 num_entries, max_num_entries, start_off, end_off;
3067        u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3068        struct ipr_sdt *sdt;
3069        int valid = 1;
3070        int i;
3071
3072        ENTER;
3073
3074        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3075
3076        if (ioa_cfg->sdt_state != READ_DUMP) {
3077                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3078                return;
3079        }
3080
3081        if (ioa_cfg->sis64) {
3082                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3083                ssleep(IPR_DUMP_DELAY_SECONDS);
3084                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3085        }
3086
3087        start_addr = readl(ioa_cfg->ioa_mailbox);
3088
3089        if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3090                dev_err(&ioa_cfg->pdev->dev,
3091                        "Invalid dump table format: %lx\n", start_addr);
3092                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3093                return;
3094        }
3095
3096        dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3097
3098        driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3099
3100        /* Initialize the overall dump header */
3101        driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3102        driver_dump->hdr.num_entries = 1;
3103        driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3104        driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3105        driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3106        driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3107
3108        ipr_dump_version_data(ioa_cfg, driver_dump);
3109        ipr_dump_location_data(ioa_cfg, driver_dump);
3110        ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3111        ipr_dump_trace_data(ioa_cfg, driver_dump);
3112
3113        /* Update dump_header */
3114        driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3115
3116        /* IOA Dump entry */
3117        ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3118        ioa_dump->hdr.len = 0;
3119        ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3120        ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3121
3122        /* First entries in sdt are actually a list of dump addresses and
3123         lengths to gather the real dump data.  sdt represents the pointer
3124         to the ioa generated dump table.  Dump data will be extracted based
3125         on entries in this table */
3126        sdt = &ioa_dump->sdt;
3127
3128        if (ioa_cfg->sis64) {
3129                max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3130                max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3131        } else {
3132                max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3133                max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3134        }
3135
3136        bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3137                        (max_num_entries * sizeof(struct ipr_sdt_entry));
3138        rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3139                                        bytes_to_copy / sizeof(__be32));
3140
3141        /* Smart Dump table is ready to use and the first entry is valid */
3142        if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3143            (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3144                dev_err(&ioa_cfg->pdev->dev,
3145                        "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3146                        rc, be32_to_cpu(sdt->hdr.state));
3147                driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3148                ioa_cfg->sdt_state = DUMP_OBTAINED;
3149                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150                return;
3151        }
3152
3153        num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3154
3155        if (num_entries > max_num_entries)
3156                num_entries = max_num_entries;
3157
3158        /* Update dump length to the actual data to be copied */
3159        dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3160        if (ioa_cfg->sis64)
3161                dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3162        else
3163                dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3164
3165        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3166
3167        for (i = 0; i < num_entries; i++) {
3168                if (ioa_dump->hdr.len > max_dump_size) {
3169                        driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3170                        break;
3171                }
3172
3173                if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3174                        sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3175                        if (ioa_cfg->sis64)
3176                                bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3177                        else {
3178                                start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3179                                end_off = be32_to_cpu(sdt->entry[i].end_token);
3180
3181                                if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3182                                        bytes_to_copy = end_off - start_off;
3183                                else
3184                                        valid = 0;
3185                        }
3186                        if (valid) {
3187                                if (bytes_to_copy > max_dump_size) {
3188                                        sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3189                                        continue;
3190                                }
3191
3192                                /* Copy data from adapter to driver buffers */
3193                                bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3194                                                            bytes_to_copy);
3195
3196                                ioa_dump->hdr.len += bytes_copied;
3197
3198                                if (bytes_copied != bytes_to_copy) {
3199                                        driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3200                                        break;
3201                                }
3202                        }
3203                }
3204        }
3205
3206        dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3207
3208        /* Update dump_header */
3209        driver_dump->hdr.len += ioa_dump->hdr.len;
3210        wmb();
3211        ioa_cfg->sdt_state = DUMP_OBTAINED;
3212        LEAVE;
3213}
3214
3215#else
3216#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3217#endif
3218
3219/**
3220 * ipr_release_dump - Free adapter dump memory
3221 * @kref:       kref struct
3222 *
3223 * Return value:
3224 *      nothing
3225 **/
3226static void ipr_release_dump(struct kref *kref)
3227{
3228        struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3229        struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3230        unsigned long lock_flags = 0;
3231        int i;
3232
3233        ENTER;
3234        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3235        ioa_cfg->dump = NULL;
3236        ioa_cfg->sdt_state = INACTIVE;
3237        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3238
3239        for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3240                free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3241
3242        vfree(dump->ioa_dump.ioa_data);
3243        kfree(dump);
3244        LEAVE;
3245}
3246
3247/**
3248 * ipr_worker_thread - Worker thread
3249 * @work:               ioa config struct
3250 *
3251 * Called at task level from a work thread. This function takes care
3252 * of adding and removing device from the mid-layer as configuration
3253 * changes are detected by the adapter.
3254 *
3255 * Return value:
3256 *      nothing
3257 **/
3258static void ipr_worker_thread(struct work_struct *work)
3259{
3260        unsigned long lock_flags;
3261        struct ipr_resource_entry *res;
3262        struct scsi_device *sdev;
3263        struct ipr_dump *dump;
3264        struct ipr_ioa_cfg *ioa_cfg =
3265                container_of(work, struct ipr_ioa_cfg, work_q);
3266        u8 bus, target, lun;
3267        int did_work;
3268
3269        ENTER;
3270        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3271
3272        if (ioa_cfg->sdt_state == READ_DUMP) {
3273                dump = ioa_cfg->dump;
3274                if (!dump) {
3275                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276                        return;
3277                }
3278                kref_get(&dump->kref);
3279                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3280                ipr_get_ioa_dump(ioa_cfg, dump);
3281                kref_put(&dump->kref, ipr_release_dump);
3282
3283                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3284                if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3285                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3286                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3287                return;
3288        }
3289
3290restart:
3291        do {
3292                did_work = 0;
3293                if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3294                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3295                        return;
3296                }
3297
3298                list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3299                        if (res->del_from_ml && res->sdev) {
3300                                did_work = 1;
3301                                sdev = res->sdev;
3302                                if (!scsi_device_get(sdev)) {
3303                                        if (!res->add_to_ml)
3304                                                list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3305                                        else
3306                                                res->del_from_ml = 0;
3307                                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308                                        scsi_remove_device(sdev);
3309                                        scsi_device_put(sdev);
3310                                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3311                                }
3312                                break;
3313                        }
3314                }
3315        } while (did_work);
3316
3317        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3318                if (res->add_to_ml) {
3319                        bus = res->bus;
3320                        target = res->target;
3321                        lun = res->lun;
3322                        res->add_to_ml = 0;
3323                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3324                        scsi_add_device(ioa_cfg->host, bus, target, lun);
3325                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326                        goto restart;
3327                }
3328        }
3329
3330        ioa_cfg->scan_done = 1;
3331        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3332        kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3333        LEAVE;
3334}
3335
3336#ifdef CONFIG_SCSI_IPR_TRACE
3337/**
3338 * ipr_read_trace - Dump the adapter trace
3339 * @filp:               open sysfs file
3340 * @kobj:               kobject struct
3341 * @bin_attr:           bin_attribute struct
3342 * @buf:                buffer
3343 * @off:                offset
3344 * @count:              buffer size
3345 *
3346 * Return value:
3347 *      number of bytes printed to buffer
3348 **/
3349static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3350                              struct bin_attribute *bin_attr,
3351                              char *buf, loff_t off, size_t count)
3352{
3353        struct device *dev = container_of(kobj, struct device, kobj);
3354        struct Scsi_Host *shost = class_to_shost(dev);
3355        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3356        unsigned long lock_flags = 0;
3357        ssize_t ret;
3358
3359        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3360        ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3361                                IPR_TRACE_SIZE);
3362        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3363
3364        return ret;
3365}
3366
3367static struct bin_attribute ipr_trace_attr = {
3368        .attr = {
3369                .name = "trace",
3370                .mode = S_IRUGO,
3371        },
3372        .size = 0,
3373        .read = ipr_read_trace,
3374};
3375#endif
3376
3377/**
3378 * ipr_show_fw_version - Show the firmware version
3379 * @dev:        class device struct
3380 * @buf:        buffer
3381 *
3382 * Return value:
3383 *      number of bytes printed to buffer
3384 **/
3385static ssize_t ipr_show_fw_version(struct device *dev,
3386                                   struct device_attribute *attr, char *buf)
3387{
3388        struct Scsi_Host *shost = class_to_shost(dev);
3389        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3390        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3391        unsigned long lock_flags = 0;
3392        int len;
3393
3394        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3395        len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3396                       ucode_vpd->major_release, ucode_vpd->card_type,
3397                       ucode_vpd->minor_release[0],
3398                       ucode_vpd->minor_release[1]);
3399        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3400        return len;
3401}
3402
3403static struct device_attribute ipr_fw_version_attr = {
3404        .attr = {
3405                .name =         "fw_version",
3406                .mode =         S_IRUGO,
3407        },
3408        .show = ipr_show_fw_version,
3409};
3410
3411/**
3412 * ipr_show_log_level - Show the adapter's error logging level
3413 * @dev:        class device struct
3414 * @buf:        buffer
3415 *
3416 * Return value:
3417 *      number of bytes printed to buffer
3418 **/
3419static ssize_t ipr_show_log_level(struct device *dev,
3420                                   struct device_attribute *attr, char *buf)
3421{
3422        struct Scsi_Host *shost = class_to_shost(dev);
3423        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3424        unsigned long lock_flags = 0;
3425        int len;
3426
3427        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3428        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3429        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3430        return len;
3431}
3432
3433/**
3434 * ipr_store_log_level - Change the adapter's error logging level
3435 * @dev:        class device struct
3436 * @buf:        buffer
3437 *
3438 * Return value:
3439 *      number of bytes printed to buffer
3440 **/
3441static ssize_t ipr_store_log_level(struct device *dev,
3442                                   struct device_attribute *attr,
3443                                   const char *buf, size_t count)
3444{
3445        struct Scsi_Host *shost = class_to_shost(dev);
3446        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3447        unsigned long lock_flags = 0;
3448
3449        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3450        ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3451        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3452        return strlen(buf);
3453}
3454
3455static struct device_attribute ipr_log_level_attr = {
3456        .attr = {
3457                .name =         "log_level",
3458                .mode =         S_IRUGO | S_IWUSR,
3459        },
3460        .show = ipr_show_log_level,
3461        .store = ipr_store_log_level
3462};
3463
3464/**
3465 * ipr_store_diagnostics - IOA Diagnostics interface
3466 * @dev:        device struct
3467 * @buf:        buffer
3468 * @count:      buffer size
3469 *
3470 * This function will reset the adapter and wait a reasonable
3471 * amount of time for any errors that the adapter might log.
3472 *
3473 * Return value:
3474 *      count on success / other on failure
3475 **/
3476static ssize_t ipr_store_diagnostics(struct device *dev,
3477                                     struct device_attribute *attr,
3478                                     const char *buf, size_t count)
3479{
3480        struct Scsi_Host *shost = class_to_shost(dev);
3481        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3482        unsigned long lock_flags = 0;
3483        int rc = count;
3484
3485        if (!capable(CAP_SYS_ADMIN))
3486                return -EACCES;
3487
3488        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3489        while (ioa_cfg->in_reset_reload) {
3490                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3491                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3492                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3493        }
3494
3495        ioa_cfg->errors_logged = 0;
3496        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3497
3498        if (ioa_cfg->in_reset_reload) {
3499                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3500                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3501
3502                /* Wait for a second for any errors to be logged */
3503                msleep(1000);
3504        } else {
3505                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3506                return -EIO;
3507        }
3508
3509        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3510        if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3511                rc = -EIO;
3512        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3513
3514        return rc;
3515}
3516
3517static struct device_attribute ipr_diagnostics_attr = {
3518        .attr = {
3519                .name =         "run_diagnostics",
3520                .mode =         S_IWUSR,
3521        },
3522        .store = ipr_store_diagnostics
3523};
3524
3525/**
3526 * ipr_show_adapter_state - Show the adapter's state
3527 * @class_dev:  device struct
3528 * @buf:        buffer
3529 *
3530 * Return value:
3531 *      number of bytes printed to buffer
3532 **/
3533static ssize_t ipr_show_adapter_state(struct device *dev,
3534                                      struct device_attribute *attr, char *buf)
3535{
3536        struct Scsi_Host *shost = class_to_shost(dev);
3537        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3538        unsigned long lock_flags = 0;
3539        int len;
3540
3541        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3542        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3543                len = snprintf(buf, PAGE_SIZE, "offline\n");
3544        else
3545                len = snprintf(buf, PAGE_SIZE, "online\n");
3546        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3547        return len;
3548}
3549
3550/**
3551 * ipr_store_adapter_state - Change adapter state
3552 * @dev:        device struct
3553 * @buf:        buffer
3554 * @count:      buffer size
3555 *
3556 * This function will change the adapter's state.
3557 *
3558 * Return value:
3559 *      count on success / other on failure
3560 **/
3561static ssize_t ipr_store_adapter_state(struct device *dev,
3562                                       struct device_attribute *attr,
3563                                       const char *buf, size_t count)
3564{
3565        struct Scsi_Host *shost = class_to_shost(dev);
3566        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3567        unsigned long lock_flags;
3568        int result = count, i;
3569
3570        if (!capable(CAP_SYS_ADMIN))
3571                return -EACCES;
3572
3573        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3574        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3575            !strncmp(buf, "online", 6)) {
3576                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3577                        spin_lock(&ioa_cfg->hrrq[i]._lock);
3578                        ioa_cfg->hrrq[i].ioa_is_dead = 0;
3579                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
3580                }
3581                wmb();
3582                ioa_cfg->reset_retries = 0;
3583                ioa_cfg->in_ioa_bringdown = 0;
3584                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3585        }
3586        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3587        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3588
3589        return result;
3590}
3591
3592static struct device_attribute ipr_ioa_state_attr = {
3593        .attr = {
3594                .name =         "online_state",
3595                .mode =         S_IRUGO | S_IWUSR,
3596        },
3597        .show = ipr_show_adapter_state,
3598        .store = ipr_store_adapter_state
3599};
3600
3601/**
3602 * ipr_store_reset_adapter - Reset the adapter
3603 * @dev:        device struct
3604 * @buf:        buffer
3605 * @count:      buffer size
3606 *
3607 * This function will reset the adapter.
3608 *
3609 * Return value:
3610 *      count on success / other on failure
3611 **/
3612static ssize_t ipr_store_reset_adapter(struct device *dev,
3613                                       struct device_attribute *attr,
3614                                       const char *buf, size_t count)
3615{
3616        struct Scsi_Host *shost = class_to_shost(dev);
3617        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3618        unsigned long lock_flags;
3619        int result = count;
3620
3621        if (!capable(CAP_SYS_ADMIN))
3622                return -EACCES;
3623
3624        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3625        if (!ioa_cfg->in_reset_reload)
3626                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3627        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3628        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3629
3630        return result;
3631}
3632
3633static struct device_attribute ipr_ioa_reset_attr = {
3634        .attr = {
3635                .name =         "reset_host",
3636                .mode =         S_IWUSR,
3637        },
3638        .store = ipr_store_reset_adapter
3639};
3640
3641static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3642 /**
3643 * ipr_show_iopoll_weight - Show ipr polling mode
3644 * @dev:        class device struct
3645 * @buf:        buffer
3646 *
3647 * Return value:
3648 *      number of bytes printed to buffer
3649 **/
3650static ssize_t ipr_show_iopoll_weight(struct device *dev,
3651                                   struct device_attribute *attr, char *buf)
3652{
3653        struct Scsi_Host *shost = class_to_shost(dev);
3654        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3655        unsigned long lock_flags = 0;
3656        int len;
3657
3658        spin_lock_irqsave(shost->host_lock, lock_flags);
3659        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3660        spin_unlock_irqrestore(shost->host_lock, lock_flags);
3661
3662        return len;
3663}
3664
3665/**
3666 * ipr_store_iopoll_weight - Change the adapter's polling mode
3667 * @dev:        class device struct
3668 * @buf:        buffer
3669 *
3670 * Return value:
3671 *      number of bytes printed to buffer
3672 **/
3673static ssize_t ipr_store_iopoll_weight(struct device *dev,
3674                                        struct device_attribute *attr,
3675                                        const char *buf, size_t count)
3676{
3677        struct Scsi_Host *shost = class_to_shost(dev);
3678        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3679        unsigned long user_iopoll_weight;
3680        unsigned long lock_flags = 0;
3681        int i;
3682
3683        if (!ioa_cfg->sis64) {
3684                dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3685                return -EINVAL;
3686        }
3687        if (kstrtoul(buf, 10, &user_iopoll_weight))
3688                return -EINVAL;
3689
3690        if (user_iopoll_weight > 256) {
3691                dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3692                return -EINVAL;
3693        }
3694
3695        if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3696                dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3697                return strlen(buf);
3698        }
3699
3700        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3701                for (i = 1; i < ioa_cfg->hrrq_num; i++)
3702                        blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3703        }
3704
3705        spin_lock_irqsave(shost->host_lock, lock_flags);
3706        ioa_cfg->iopoll_weight = user_iopoll_weight;
3707        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3708                for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3709                        blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3710                                        ioa_cfg->iopoll_weight, ipr_iopoll);
3711                        blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3712                }
3713        }
3714        spin_unlock_irqrestore(shost->host_lock, lock_flags);
3715
3716        return strlen(buf);
3717}
3718
3719static struct device_attribute ipr_iopoll_weight_attr = {
3720        .attr = {
3721                .name =         "iopoll_weight",
3722                .mode =         S_IRUGO | S_IWUSR,
3723        },
3724        .show = ipr_show_iopoll_weight,
3725        .store = ipr_store_iopoll_weight
3726};
3727
3728/**
3729 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3730 * @buf_len:            buffer length
3731 *
3732 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3733 * list to use for microcode download
3734 *
3735 * Return value:
3736 *      pointer to sglist / NULL on failure
3737 **/
3738static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3739{
3740        int sg_size, order, bsize_elem, num_elem, i, j;
3741        struct ipr_sglist *sglist;
3742        struct scatterlist *scatterlist;
3743        struct page *page;
3744
3745        /* Get the minimum size per scatter/gather element */
3746        sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3747
3748        /* Get the actual size per element */
3749        order = get_order(sg_size);
3750
3751        /* Determine the actual number of bytes per element */
3752        bsize_elem = PAGE_SIZE * (1 << order);
3753
3754        /* Determine the actual number of sg entries needed */
3755        if (buf_len % bsize_elem)
3756                num_elem = (buf_len / bsize_elem) + 1;
3757        else
3758                num_elem = buf_len / bsize_elem;
3759
3760        /* Allocate a scatter/gather list for the DMA */
3761        sglist = kzalloc(sizeof(struct ipr_sglist) +
3762                         (sizeof(struct scatterlist) * (num_elem - 1)),
3763                         GFP_KERNEL);
3764
3765        if (sglist == NULL) {
3766                ipr_trace;
3767                return NULL;
3768        }
3769
3770        scatterlist = sglist->scatterlist;
3771        sg_init_table(scatterlist, num_elem);
3772
3773        sglist->order = order;
3774        sglist->num_sg = num_elem;
3775
3776        /* Allocate a bunch of sg elements */
3777        for (i = 0; i < num_elem; i++) {
3778                page = alloc_pages(GFP_KERNEL, order);
3779                if (!page) {
3780                        ipr_trace;
3781
3782                        /* Free up what we already allocated */
3783                        for (j = i - 1; j >= 0; j--)
3784                                __free_pages(sg_page(&scatterlist[j]), order);
3785                        kfree(sglist);
3786                        return NULL;
3787                }
3788
3789                sg_set_page(&scatterlist[i], page, 0, 0);
3790        }
3791
3792        return sglist;
3793}
3794
3795/**
3796 * ipr_free_ucode_buffer - Frees a microcode download buffer
3797 * @p_dnld:             scatter/gather list pointer
3798 *
3799 * Free a DMA'able ucode download buffer previously allocated with
3800 * ipr_alloc_ucode_buffer
3801 *
3802 * Return value:
3803 *      nothing
3804 **/
3805static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3806{
3807        int i;
3808
3809        for (i = 0; i < sglist->num_sg; i++)
3810                __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3811
3812        kfree(sglist);
3813}
3814
3815/**
3816 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3817 * @sglist:             scatter/gather list pointer
3818 * @buffer:             buffer pointer
3819 * @len:                buffer length
3820 *
3821 * Copy a microcode image from a user buffer into a buffer allocated by
3822 * ipr_alloc_ucode_buffer
3823 *
3824 * Return value:
3825 *      0 on success / other on failure
3826 **/
3827static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3828                                 u8 *buffer, u32 len)
3829{
3830        int bsize_elem, i, result = 0;
3831        struct scatterlist *scatterlist;
3832        void *kaddr;
3833
3834        /* Determine the actual number of bytes per element */
3835        bsize_elem = PAGE_SIZE * (1 << sglist->order);
3836
3837        scatterlist = sglist->scatterlist;
3838
3839        for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3840                struct page *page = sg_page(&scatterlist[i]);
3841
3842                kaddr = kmap(page);
3843                memcpy(kaddr, buffer, bsize_elem);
3844                kunmap(page);
3845
3846                scatterlist[i].length = bsize_elem;
3847
3848                if (result != 0) {
3849                        ipr_trace;
3850                        return result;
3851                }
3852        }
3853
3854        if (len % bsize_elem) {
3855                struct page *page = sg_page(&scatterlist[i]);
3856
3857                kaddr = kmap(page);
3858                memcpy(kaddr, buffer, len % bsize_elem);
3859                kunmap(page);
3860
3861                scatterlist[i].length = len % bsize_elem;
3862        }
3863
3864        sglist->buffer_len = len;
3865        return result;
3866}
3867
3868/**
3869 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3870 * @ipr_cmd:            ipr command struct
3871 * @sglist:             scatter/gather list
3872 *
3873 * Builds a microcode download IOA data list (IOADL).
3874 *
3875 **/
3876static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3877                                    struct ipr_sglist *sglist)
3878{
3879        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3880        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3881        struct scatterlist *scatterlist = sglist->scatterlist;
3882        int i;
3883
3884        ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3885        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3886        ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3887
3888        ioarcb->ioadl_len =
3889                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3890        for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3891                ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3892                ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3893                ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3894        }
3895
3896        ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3897}
3898
3899/**
3900 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3901 * @ipr_cmd:    ipr command struct
3902 * @sglist:             scatter/gather list
3903 *
3904 * Builds a microcode download IOA data list (IOADL).
3905 *
3906 **/
3907static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3908                                  struct ipr_sglist *sglist)
3909{
3910        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3911        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3912        struct scatterlist *scatterlist = sglist->scatterlist;
3913        int i;
3914
3915        ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3916        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3917        ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3918
3919        ioarcb->ioadl_len =
3920                cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3921
3922        for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3923                ioadl[i].flags_and_data_len =
3924                        cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3925                ioadl[i].address =
3926                        cpu_to_be32(sg_dma_address(&scatterlist[i]));
3927        }
3928
3929        ioadl[i-1].flags_and_data_len |=
3930                cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3931}
3932
3933/**
3934 * ipr_update_ioa_ucode - Update IOA's microcode
3935 * @ioa_cfg:    ioa config struct
3936 * @sglist:             scatter/gather list
3937 *
3938 * Initiate an adapter reset to update the IOA's microcode
3939 *
3940 * Return value:
3941 *      0 on success / -EIO on failure
3942 **/
3943static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3944                                struct ipr_sglist *sglist)
3945{
3946        unsigned long lock_flags;
3947
3948        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3949        while (ioa_cfg->in_reset_reload) {
3950                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3951                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3952                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3953        }
3954
3955        if (ioa_cfg->ucode_sglist) {
3956                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3957                dev_err(&ioa_cfg->pdev->dev,
3958                        "Microcode download already in progress\n");
3959                return -EIO;
3960        }
3961
3962        sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3963                                        sglist->scatterlist, sglist->num_sg,
3964                                        DMA_TO_DEVICE);
3965
3966        if (!sglist->num_dma_sg) {
3967                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3968                dev_err(&ioa_cfg->pdev->dev,
3969                        "Failed to map microcode download buffer!\n");
3970                return -EIO;
3971        }
3972
3973        ioa_cfg->ucode_sglist = sglist;
3974        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3975        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3976        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3977
3978        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3979        ioa_cfg->ucode_sglist = NULL;
3980        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3981        return 0;
3982}
3983
3984/**
3985 * ipr_store_update_fw - Update the firmware on the adapter
3986 * @class_dev:  device struct
3987 * @buf:        buffer
3988 * @count:      buffer size
3989 *
3990 * This function will update the firmware on the adapter.
3991 *
3992 * Return value:
3993 *      count on success / other on failure
3994 **/
3995static ssize_t ipr_store_update_fw(struct device *dev,
3996                                   struct device_attribute *attr,
3997                                   const char *buf, size_t count)
3998{
3999        struct Scsi_Host *shost = class_to_shost(dev);
4000        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4001        struct ipr_ucode_image_header *image_hdr;
4002        const struct firmware *fw_entry;
4003        struct ipr_sglist *sglist;
4004        char fname[100];
4005        char *src;
4006        int len, result, dnld_size;
4007
4008        if (!capable(CAP_SYS_ADMIN))
4009                return -EACCES;
4010
4011        len = snprintf(fname, 99, "%s", buf);
4012        fname[len-1] = '\0';
4013
4014        if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4015                dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4016                return -EIO;
4017        }
4018
4019        image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4020
4021        src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4022        dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4023        sglist = ipr_alloc_ucode_buffer(dnld_size);
4024
4025        if (!sglist) {
4026                dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4027                release_firmware(fw_entry);
4028                return -ENOMEM;
4029        }
4030
4031        result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4032
4033        if (result) {
4034                dev_err(&ioa_cfg->pdev->dev,
4035                        "Microcode buffer copy to DMA buffer failed\n");
4036                goto out;
4037        }
4038
4039        ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4040
4041        result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4042
4043        if (!result)
4044                result = count;
4045out:
4046        ipr_free_ucode_buffer(sglist);
4047        release_firmware(fw_entry);
4048        return result;
4049}
4050
4051static struct device_attribute ipr_update_fw_attr = {
4052        .attr = {
4053                .name =         "update_fw",
4054                .mode =         S_IWUSR,
4055        },
4056        .store = ipr_store_update_fw
4057};
4058
4059/**
4060 * ipr_show_fw_type - Show the adapter's firmware type.
4061 * @dev:        class device struct
4062 * @buf:        buffer
4063 *
4064 * Return value:
4065 *      number of bytes printed to buffer
4066 **/
4067static ssize_t ipr_show_fw_type(struct device *dev,
4068                                struct device_attribute *attr, char *buf)
4069{
4070        struct Scsi_Host *shost = class_to_shost(dev);
4071        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4072        unsigned long lock_flags = 0;
4073        int len;
4074
4075        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4076        len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4077        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4078        return len;
4079}
4080
4081static struct device_attribute ipr_ioa_fw_type_attr = {
4082        .attr = {
4083                .name =         "fw_type",
4084                .mode =         S_IRUGO,
4085        },
4086        .show = ipr_show_fw_type
4087};
4088
4089static struct device_attribute *ipr_ioa_attrs[] = {
4090        &ipr_fw_version_attr,
4091        &ipr_log_level_attr,
4092        &ipr_diagnostics_attr,
4093        &ipr_ioa_state_attr,
4094        &ipr_ioa_reset_attr,
4095        &ipr_update_fw_attr,
4096        &ipr_ioa_fw_type_attr,
4097        &ipr_iopoll_weight_attr,
4098        NULL,
4099};
4100
4101#ifdef CONFIG_SCSI_IPR_DUMP
4102/**
4103 * ipr_read_dump - Dump the adapter
4104 * @filp:               open sysfs file
4105 * @kobj:               kobject struct
4106 * @bin_attr:           bin_attribute struct
4107 * @buf:                buffer
4108 * @off:                offset
4109 * @count:              buffer size
4110 *
4111 * Return value:
4112 *      number of bytes printed to buffer
4113 **/
4114static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4115                             struct bin_attribute *bin_attr,
4116                             char *buf, loff_t off, size_t count)
4117{
4118        struct device *cdev = container_of(kobj, struct device, kobj);
4119        struct Scsi_Host *shost = class_to_shost(cdev);
4120        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4121        struct ipr_dump *dump;
4122        unsigned long lock_flags = 0;
4123        char *src;
4124        int len, sdt_end;
4125        size_t rc = count;
4126
4127        if (!capable(CAP_SYS_ADMIN))
4128                return -EACCES;
4129
4130        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4131        dump = ioa_cfg->dump;
4132
4133        if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4134                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4135                return 0;
4136        }
4137        kref_get(&dump->kref);
4138        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4139
4140        if (off > dump->driver_dump.hdr.len) {
4141                kref_put(&dump->kref, ipr_release_dump);
4142                return 0;
4143        }
4144
4145        if (off + count > dump->driver_dump.hdr.len) {
4146                count = dump->driver_dump.hdr.len - off;
4147                rc = count;
4148        }
4149
4150        if (count && off < sizeof(dump->driver_dump)) {
4151                if (off + count > sizeof(dump->driver_dump))
4152                        len = sizeof(dump->driver_dump) - off;
4153                else
4154                        len = count;
4155                src = (u8 *)&dump->driver_dump + off;
4156                memcpy(buf, src, len);
4157                buf += len;
4158                off += len;
4159                count -= len;
4160        }
4161
4162        off -= sizeof(dump->driver_dump);
4163
4164        if (ioa_cfg->sis64)
4165                sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4166                          (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4167                           sizeof(struct ipr_sdt_entry));
4168        else
4169                sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4170                          (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4171
4172        if (count && off < sdt_end) {
4173                if (off + count > sdt_end)
4174                        len = sdt_end - off;
4175                else
4176                        len = count;
4177                src = (u8 *)&dump->ioa_dump + off;
4178                memcpy(buf, src, len);
4179                buf += len;
4180                off += len;
4181                count -= len;
4182        }
4183
4184        off -= sdt_end;
4185
4186        while (count) {
4187                if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4188                        len = PAGE_ALIGN(off) - off;
4189                else
4190                        len = count;
4191                src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4192                src += off & ~PAGE_MASK;
4193                memcpy(buf, src, len);
4194                buf += len;
4195                off += len;
4196                count -= len;
4197        }
4198
4199        kref_put(&dump->kref, ipr_release_dump);
4200        return rc;
4201}
4202
4203/**
4204 * ipr_alloc_dump - Prepare for adapter dump
4205 * @ioa_cfg:    ioa config struct
4206 *
4207 * Return value:
4208 *      0 on success / other on failure
4209 **/
4210static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4211{
4212        struct ipr_dump *dump;
4213        __be32 **ioa_data;
4214        unsigned long lock_flags = 0;
4215
4216        dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4217
4218        if (!dump) {
4219                ipr_err("Dump memory allocation failed\n");
4220                return -ENOMEM;
4221        }
4222
4223        if (ioa_cfg->sis64)
4224                ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4225        else
4226                ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4227
4228        if (!ioa_data) {
4229                ipr_err("Dump memory allocation failed\n");
4230                kfree(dump);
4231                return -ENOMEM;
4232        }
4233
4234        dump->ioa_dump.ioa_data = ioa_data;
4235
4236        kref_init(&dump->kref);
4237        dump->ioa_cfg = ioa_cfg;
4238
4239        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4240
4241        if (INACTIVE != ioa_cfg->sdt_state) {
4242                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4243                vfree(dump->ioa_dump.ioa_data);
4244                kfree(dump);
4245                return 0;
4246        }
4247
4248        ioa_cfg->dump = dump;
4249        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4250        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4251                ioa_cfg->dump_taken = 1;
4252                schedule_work(&ioa_cfg->work_q);
4253        }
4254        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4255
4256        return 0;
4257}
4258
4259/**
4260 * ipr_free_dump - Free adapter dump memory
4261 * @ioa_cfg:    ioa config struct
4262 *
4263 * Return value:
4264 *      0 on success / other on failure
4265 **/
4266static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4267{
4268        struct ipr_dump *dump;
4269        unsigned long lock_flags = 0;
4270
4271        ENTER;
4272
4273        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4274        dump = ioa_cfg->dump;
4275        if (!dump) {
4276                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4277                return 0;
4278        }
4279
4280        ioa_cfg->dump = NULL;
4281        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4282
4283        kref_put(&dump->kref, ipr_release_dump);
4284
4285        LEAVE;
4286        return 0;
4287}
4288
4289/**
4290 * ipr_write_dump - Setup dump state of adapter
4291 * @filp:               open sysfs file
4292 * @kobj:               kobject struct
4293 * @bin_attr:           bin_attribute struct
4294 * @buf:                buffer
4295 * @off:                offset
4296 * @count:              buffer size
4297 *
4298 * Return value:
4299 *      number of bytes printed to buffer
4300 **/
4301static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4302                              struct bin_attribute *bin_attr,
4303                              char *buf, loff_t off, size_t count)
4304{
4305        struct device *cdev = container_of(kobj, struct device, kobj);
4306        struct Scsi_Host *shost = class_to_shost(cdev);
4307        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4308        int rc;
4309
4310        if (!capable(CAP_SYS_ADMIN))
4311                return -EACCES;
4312
4313        if (buf[0] == '1')
4314                rc = ipr_alloc_dump(ioa_cfg);
4315        else if (buf[0] == '0')
4316                rc = ipr_free_dump(ioa_cfg);
4317        else
4318                return -EINVAL;
4319
4320        if (rc)
4321                return rc;
4322        else
4323                return count;
4324}
4325
4326static struct bin_attribute ipr_dump_attr = {
4327        .attr = {
4328                .name = "dump",
4329                .mode = S_IRUSR | S_IWUSR,
4330        },
4331        .size = 0,
4332        .read = ipr_read_dump,
4333        .write = ipr_write_dump
4334};
4335#else
4336static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4337#endif
4338
4339/**
4340 * ipr_change_queue_depth - Change the device's queue depth
4341 * @sdev:       scsi device struct
4342 * @qdepth:     depth to set
4343 * @reason:     calling context
4344 *
4345 * Return value:
4346 *      actual depth set
4347 **/
4348static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4349{
4350        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4351        struct ipr_resource_entry *res;
4352        unsigned long lock_flags = 0;
4353
4354        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4355        res = (struct ipr_resource_entry *)sdev->hostdata;
4356
4357        if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4358                qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4359        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4360
4361        scsi_change_queue_depth(sdev, qdepth);
4362        return sdev->queue_depth;
4363}
4364
4365/**
4366 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4367 * @dev:        device struct
4368 * @attr:       device attribute structure
4369 * @buf:        buffer
4370 *
4371 * Return value:
4372 *      number of bytes printed to buffer
4373 **/
4374static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4375{
4376        struct scsi_device *sdev = to_scsi_device(dev);
4377        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4378        struct ipr_resource_entry *res;
4379        unsigned long lock_flags = 0;
4380        ssize_t len = -ENXIO;
4381
4382        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4383        res = (struct ipr_resource_entry *)sdev->hostdata;
4384        if (res)
4385                len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4386        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4387        return len;
4388}
4389
4390static struct device_attribute ipr_adapter_handle_attr = {
4391        .attr = {
4392                .name =         "adapter_handle",
4393                .mode =         S_IRUSR,
4394        },
4395        .show = ipr_show_adapter_handle
4396};
4397
4398/**
4399 * ipr_show_resource_path - Show the resource path or the resource address for
4400 *                          this device.
4401 * @dev:        device struct
4402 * @attr:       device attribute structure
4403 * @buf:        buffer
4404 *
4405 * Return value:
4406 *      number of bytes printed to buffer
4407 **/
4408static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4409{
4410        struct scsi_device *sdev = to_scsi_device(dev);
4411        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4412        struct ipr_resource_entry *res;
4413        unsigned long lock_flags = 0;
4414        ssize_t len = -ENXIO;
4415        char buffer[IPR_MAX_RES_PATH_LENGTH];
4416
4417        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4418        res = (struct ipr_resource_entry *)sdev->hostdata;
4419        if (res && ioa_cfg->sis64)
4420                len = snprintf(buf, PAGE_SIZE, "%s\n",
4421                               __ipr_format_res_path(res->res_path, buffer,
4422                                                     sizeof(buffer)));
4423        else if (res)
4424                len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4425                               res->bus, res->target, res->lun);
4426
4427        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4428        return len;
4429}
4430
4431static struct device_attribute ipr_resource_path_attr = {
4432        .attr = {
4433                .name =         "resource_path",
4434                .mode =         S_IRUGO,
4435        },
4436        .show = ipr_show_resource_path
4437};
4438
4439/**
4440 * ipr_show_device_id - Show the device_id for this device.
4441 * @dev:        device struct
4442 * @attr:       device attribute structure
4443 * @buf:        buffer
4444 *
4445 * Return value:
4446 *      number of bytes printed to buffer
4447 **/
4448static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4449{
4450        struct scsi_device *sdev = to_scsi_device(dev);
4451        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4452        struct ipr_resource_entry *res;
4453        unsigned long lock_flags = 0;
4454        ssize_t len = -ENXIO;
4455
4456        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4457        res = (struct ipr_resource_entry *)sdev->hostdata;
4458        if (res && ioa_cfg->sis64)
4459                len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4460        else if (res)
4461                len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4462
4463        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4464        return len;
4465}
4466
4467static struct device_attribute ipr_device_id_attr = {
4468        .attr = {
4469                .name =         "device_id",
4470                .mode =         S_IRUGO,
4471        },
4472        .show = ipr_show_device_id
4473};
4474
4475/**
4476 * ipr_show_resource_type - Show the resource type for this device.
4477 * @dev:        device struct
4478 * @attr:       device attribute structure
4479 * @buf:        buffer
4480 *
4481 * Return value:
4482 *      number of bytes printed to buffer
4483 **/
4484static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4485{
4486        struct scsi_device *sdev = to_scsi_device(dev);
4487        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4488        struct ipr_resource_entry *res;
4489        unsigned long lock_flags = 0;
4490        ssize_t len = -ENXIO;
4491
4492        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4493        res = (struct ipr_resource_entry *)sdev->hostdata;
4494
4495        if (res)
4496                len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4497
4498        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4499        return len;
4500}
4501
4502static struct device_attribute ipr_resource_type_attr = {
4503        .attr = {
4504                .name =         "resource_type",
4505                .mode =         S_IRUGO,
4506        },
4507        .show = ipr_show_resource_type
4508};
4509
4510/**
4511 * ipr_show_raw_mode - Show the adapter's raw mode
4512 * @dev:        class device struct
4513 * @buf:        buffer
4514 *
4515 * Return value:
4516 *      number of bytes printed to buffer
4517 **/
4518static ssize_t ipr_show_raw_mode(struct device *dev,
4519                                 struct device_attribute *attr, char *buf)
4520{
4521        struct scsi_device *sdev = to_scsi_device(dev);
4522        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4523        struct ipr_resource_entry *res;
4524        unsigned long lock_flags = 0;
4525        ssize_t len;
4526
4527        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4528        res = (struct ipr_resource_entry *)sdev->hostdata;
4529        if (res)
4530                len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4531        else
4532                len = -ENXIO;
4533        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4534        return len;
4535}
4536
4537/**
4538 * ipr_store_raw_mode - Change the adapter's raw mode
4539 * @dev:        class device struct
4540 * @buf:        buffer
4541 *
4542 * Return value:
4543 *      number of bytes printed to buffer
4544 **/
4545static ssize_t ipr_store_raw_mode(struct device *dev,
4546                                  struct device_attribute *attr,
4547                                  const char *buf, size_t count)
4548{
4549        struct scsi_device *sdev = to_scsi_device(dev);
4550        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4551        struct ipr_resource_entry *res;
4552        unsigned long lock_flags = 0;
4553        ssize_t len;
4554
4555        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4556        res = (struct ipr_resource_entry *)sdev->hostdata;
4557        if (res) {
4558                if (ipr_is_af_dasd_device(res)) {
4559                        res->raw_mode = simple_strtoul(buf, NULL, 10);
4560                        len = strlen(buf);
4561                        if (res->sdev)
4562                                sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4563                                        res->raw_mode ? "enabled" : "disabled");
4564                } else
4565                        len = -EINVAL;
4566        } else
4567                len = -ENXIO;
4568        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4569        return len;
4570}
4571
4572static struct device_attribute ipr_raw_mode_attr = {
4573        .attr = {
4574                .name =         "raw_mode",
4575                .mode =         S_IRUGO | S_IWUSR,
4576        },
4577        .show = ipr_show_raw_mode,
4578        .store = ipr_store_raw_mode
4579};
4580
4581static struct device_attribute *ipr_dev_attrs[] = {
4582        &ipr_adapter_handle_attr,
4583        &ipr_resource_path_attr,
4584        &ipr_device_id_attr,
4585        &ipr_resource_type_attr,
4586        &ipr_raw_mode_attr,
4587        NULL,
4588};
4589
4590/**
4591 * ipr_biosparam - Return the HSC mapping
4592 * @sdev:                       scsi device struct
4593 * @block_device:       block device pointer
4594 * @capacity:           capacity of the device
4595 * @parm:                       Array containing returned HSC values.
4596 *
4597 * This function generates the HSC parms that fdisk uses.
4598 * We want to make sure we return something that places partitions
4599 * on 4k boundaries for best performance with the IOA.
4600 *
4601 * Return value:
4602 *      0 on success
4603 **/
4604static int ipr_biosparam(struct scsi_device *sdev,
4605                         struct block_device *block_device,
4606                         sector_t capacity, int *parm)
4607{
4608        int heads, sectors;
4609        sector_t cylinders;
4610
4611        heads = 128;
4612        sectors = 32;
4613
4614        cylinders = capacity;
4615        sector_div(cylinders, (128 * 32));
4616
4617        /* return result */
4618        parm[0] = heads;
4619        parm[1] = sectors;
4620        parm[2] = cylinders;
4621
4622        return 0;
4623}
4624
4625/**
4626 * ipr_find_starget - Find target based on bus/target.
4627 * @starget:    scsi target struct
4628 *
4629 * Return value:
4630 *      resource entry pointer if found / NULL if not found
4631 **/
4632static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4633{
4634        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4635        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4636        struct ipr_resource_entry *res;
4637
4638        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4639                if ((res->bus == starget->channel) &&
4640                    (res->target == starget->id)) {
4641                        return res;
4642                }
4643        }
4644
4645        return NULL;
4646}
4647
4648static struct ata_port_info sata_port_info;
4649
4650/**
4651 * ipr_target_alloc - Prepare for commands to a SCSI target
4652 * @starget:    scsi target struct
4653 *
4654 * If the device is a SATA device, this function allocates an
4655 * ATA port with libata, else it does nothing.
4656 *
4657 * Return value:
4658 *      0 on success / non-0 on failure
4659 **/
4660static int ipr_target_alloc(struct scsi_target *starget)
4661{
4662        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4663        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4664        struct ipr_sata_port *sata_port;
4665        struct ata_port *ap;
4666        struct ipr_resource_entry *res;
4667        unsigned long lock_flags;
4668
4669        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4670        res = ipr_find_starget(starget);
4671        starget->hostdata = NULL;
4672
4673        if (res && ipr_is_gata(res)) {
4674                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4675                sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4676                if (!sata_port)
4677                        return -ENOMEM;
4678
4679                ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4680                if (ap) {
4681                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4682                        sata_port->ioa_cfg = ioa_cfg;
4683                        sata_port->ap = ap;
4684                        sata_port->res = res;
4685
4686                        res->sata_port = sata_port;
4687                        ap->private_data = sata_port;
4688                        starget->hostdata = sata_port;
4689                } else {
4690                        kfree(sata_port);
4691                        return -ENOMEM;
4692                }
4693        }
4694        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4695
4696        return 0;
4697}
4698
4699/**
4700 * ipr_target_destroy - Destroy a SCSI target
4701 * @starget:    scsi target struct
4702 *
4703 * If the device was a SATA device, this function frees the libata
4704 * ATA port, else it does nothing.
4705 *
4706 **/
4707static void ipr_target_destroy(struct scsi_target *starget)
4708{
4709        struct ipr_sata_port *sata_port = starget->hostdata;
4710        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4711        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4712
4713        if (ioa_cfg->sis64) {
4714                if (!ipr_find_starget(starget)) {
4715                        if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4716                                clear_bit(starget->id, ioa_cfg->array_ids);
4717                        else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4718                                clear_bit(starget->id, ioa_cfg->vset_ids);
4719                        else if (starget->channel == 0)
4720                                clear_bit(starget->id, ioa_cfg->target_ids);
4721                }
4722        }
4723
4724        if (sata_port) {
4725                starget->hostdata = NULL;
4726                ata_sas_port_destroy(sata_port->ap);
4727                kfree(sata_port);
4728        }
4729}
4730
4731/**
4732 * ipr_find_sdev - Find device based on bus/target/lun.
4733 * @sdev:       scsi device struct
4734 *
4735 * Return value:
4736 *      resource entry pointer if found / NULL if not found
4737 **/
4738static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4739{
4740        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4741        struct ipr_resource_entry *res;
4742
4743        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4744                if ((res->bus == sdev->channel) &&
4745                    (res->target == sdev->id) &&
4746                    (res->lun == sdev->lun))
4747                        return res;
4748        }
4749
4750        return NULL;
4751}
4752
4753/**
4754 * ipr_slave_destroy - Unconfigure a SCSI device
4755 * @sdev:       scsi device struct
4756 *
4757 * Return value:
4758 *      nothing
4759 **/
4760static void ipr_slave_destroy(struct scsi_device *sdev)
4761{
4762        struct ipr_resource_entry *res;
4763        struct ipr_ioa_cfg *ioa_cfg;
4764        unsigned long lock_flags = 0;
4765
4766        ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4767
4768        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4769        res = (struct ipr_resource_entry *) sdev->hostdata;
4770        if (res) {
4771                if (res->sata_port)
4772                        res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4773                sdev->hostdata = NULL;
4774                res->sdev = NULL;
4775                res->sata_port = NULL;
4776        }
4777        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4778}
4779
4780/**
4781 * ipr_slave_configure - Configure a SCSI device
4782 * @sdev:       scsi device struct
4783 *
4784 * This function configures the specified scsi device.
4785 *
4786 * Return value:
4787 *      0 on success
4788 **/
4789static int ipr_slave_configure(struct scsi_device *sdev)
4790{
4791        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4792        struct ipr_resource_entry *res;
4793        struct ata_port *ap = NULL;
4794        unsigned long lock_flags = 0;
4795        char buffer[IPR_MAX_RES_PATH_LENGTH];
4796
4797        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4798        res = sdev->hostdata;
4799        if (res) {
4800                if (ipr_is_af_dasd_device(res))
4801                        sdev->type = TYPE_RAID;
4802                if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4803                        sdev->scsi_level = 4;
4804                        sdev->no_uld_attach = 1;
4805                }
4806                if (ipr_is_vset_device(res)) {
4807                        sdev->scsi_level = SCSI_SPC_3;
4808                        blk_queue_rq_timeout(sdev->request_queue,
4809                                             IPR_VSET_RW_TIMEOUT);
4810                        blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4811                }
4812                if (ipr_is_gata(res) && res->sata_port)
4813                        ap = res->sata_port->ap;
4814                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4815
4816                if (ap) {
4817                        scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4818                        ata_sas_slave_configure(sdev, ap);
4819                }
4820
4821                if (ioa_cfg->sis64)
4822                        sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4823                                    ipr_format_res_path(ioa_cfg,
4824                                res->res_path, buffer, sizeof(buffer)));
4825                return 0;
4826        }
4827        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4828        return 0;
4829}
4830
4831/**
4832 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4833 * @sdev:       scsi device struct
4834 *
4835 * This function initializes an ATA port so that future commands
4836 * sent through queuecommand will work.
4837 *
4838 * Return value:
4839 *      0 on success
4840 **/
4841static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4842{
4843        struct ipr_sata_port *sata_port = NULL;
4844        int rc = -ENXIO;
4845
4846        ENTER;
4847        if (sdev->sdev_target)
4848                sata_port = sdev->sdev_target->hostdata;
4849        if (sata_port) {
4850                rc = ata_sas_port_init(sata_port->ap);
4851                if (rc == 0)
4852                        rc = ata_sas_sync_probe(sata_port->ap);
4853        }
4854
4855        if (rc)
4856                ipr_slave_destroy(sdev);
4857
4858        LEAVE;
4859        return rc;
4860}
4861
4862/**
4863 * ipr_slave_alloc - Prepare for commands to a device.
4864 * @sdev:       scsi device struct
4865 *
4866 * This function saves a pointer to the resource entry
4867 * in the scsi device struct if the device exists. We
4868 * can then use this pointer in ipr_queuecommand when
4869 * handling new commands.
4870 *
4871 * Return value:
4872 *      0 on success / -ENXIO if device does not exist
4873 **/
4874static int ipr_slave_alloc(struct scsi_device *sdev)
4875{
4876        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4877        struct ipr_resource_entry *res;
4878        unsigned long lock_flags;
4879        int rc = -ENXIO;
4880
4881        sdev->hostdata = NULL;
4882
4883        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4884
4885        res = ipr_find_sdev(sdev);
4886        if (res) {
4887                res->sdev = sdev;
4888                res->add_to_ml = 0;
4889                res->in_erp = 0;
4890                sdev->hostdata = res;
4891                if (!ipr_is_naca_model(res))
4892                        res->needs_sync_complete = 1;
4893                rc = 0;
4894                if (ipr_is_gata(res)) {
4895                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4896                        return ipr_ata_slave_alloc(sdev);
4897                }
4898        }
4899
4900        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4901
4902        return rc;
4903}
4904
4905/**
4906 * ipr_match_lun - Match function for specified LUN
4907 * @ipr_cmd:    ipr command struct
4908 * @device:             device to match (sdev)
4909 *
4910 * Returns:
4911 *      1 if command matches sdev / 0 if command does not match sdev
4912 **/
4913static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4914{
4915        if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4916                return 1;
4917        return 0;
4918}
4919
4920/**
4921 * ipr_wait_for_ops - Wait for matching commands to complete
4922 * @ipr_cmd:    ipr command struct
4923 * @device:             device to match (sdev)
4924 * @match:              match function to use
4925 *
4926 * Returns:
4927 *      SUCCESS / FAILED
4928 **/
4929static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4930                            int (*match)(struct ipr_cmnd *, void *))
4931{
4932        struct ipr_cmnd *ipr_cmd;
4933        int wait;
4934        unsigned long flags;
4935        struct ipr_hrr_queue *hrrq;
4936        signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4937        DECLARE_COMPLETION_ONSTACK(comp);
4938
4939        ENTER;
4940        do {
4941                wait = 0;
4942
4943                for_each_hrrq(hrrq, ioa_cfg) {
4944                        spin_lock_irqsave(hrrq->lock, flags);
4945                        list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4946                                if (match(ipr_cmd, device)) {
4947                                        ipr_cmd->eh_comp = &comp;
4948                                        wait++;
4949                                }
4950                        }
4951                        spin_unlock_irqrestore(hrrq->lock, flags);
4952                }
4953
4954                if (wait) {
4955                        timeout = wait_for_completion_timeout(&comp, timeout);
4956
4957                        if (!timeout) {
4958                                wait = 0;
4959
4960                                for_each_hrrq(hrrq, ioa_cfg) {
4961                                        spin_lock_irqsave(hrrq->lock, flags);
4962                                        list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4963                                                if (match(ipr_cmd, device)) {
4964                                                        ipr_cmd->eh_comp = NULL;
4965                                                        wait++;
4966                                                }
4967                                        }
4968                                        spin_unlock_irqrestore(hrrq->lock, flags);
4969                                }
4970
4971                                if (wait)
4972                                        dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4973                                LEAVE;
4974                                return wait ? FAILED : SUCCESS;
4975                        }
4976                }
4977        } while (wait);
4978
4979        LEAVE;
4980        return SUCCESS;
4981}
4982
4983static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4984{
4985        struct ipr_ioa_cfg *ioa_cfg;
4986        unsigned long lock_flags = 0;
4987        int rc = SUCCESS;
4988
4989        ENTER;
4990        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4991        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4992
4993        if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4994                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4995                dev_err(&ioa_cfg->pdev->dev,
4996                        "Adapter being reset as a result of error recovery.\n");
4997
4998                if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4999                        ioa_cfg->sdt_state = GET_DUMP;
5000        }
5001
5002        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5003        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5004        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5005
5006        /* If we got hit with a host reset while we were already resetting
5007         the adapter for some reason, and the reset failed. */
5008        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5009                ipr_trace;
5010                rc = FAILED;
5011        }
5012
5013        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5014        LEAVE;
5015        return rc;
5016}
5017
5018/**
5019 * ipr_device_reset - Reset the device
5020 * @ioa_cfg:    ioa config struct
5021 * @res:                resource entry struct
5022 *
5023 * This function issues a device reset to the affected device.
5024 * If the device is a SCSI device, a LUN reset will be sent
5025 * to the device first. If that does not work, a target reset
5026 * will be sent. If the device is a SATA device, a PHY reset will
5027 * be sent.
5028 *
5029 * Return value:
5030 *      0 on success / non-zero on failure
5031 **/
5032static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5033                            struct ipr_resource_entry *res)
5034{
5035        struct ipr_cmnd *ipr_cmd;
5036        struct ipr_ioarcb *ioarcb;
5037        struct ipr_cmd_pkt *cmd_pkt;
5038        struct ipr_ioarcb_ata_regs *regs;
5039        u32 ioasc;
5040
5041        ENTER;
5042        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5043        ioarcb = &ipr_cmd->ioarcb;
5044        cmd_pkt = &ioarcb->cmd_pkt;
5045
5046        if (ipr_cmd->ioa_cfg->sis64) {
5047                regs = &ipr_cmd->i.ata_ioadl.regs;
5048                ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5049        } else
5050                regs = &ioarcb->u.add_data.u.regs;
5051
5052        ioarcb->res_handle = res->res_handle;
5053        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5054        cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5055        if (ipr_is_gata(res)) {
5056                cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5057                ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5058                regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5059        }
5060
5061        ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5062        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5063        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5064        if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5065                if (ipr_cmd->ioa_cfg->sis64)
5066                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5067                               sizeof(struct ipr_ioasa_gata));
5068                else
5069                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5070                               sizeof(struct ipr_ioasa_gata));
5071        }
5072
5073        LEAVE;
5074        return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5075}
5076
5077/**
5078 * ipr_sata_reset - Reset the SATA port
5079 * @link:       SATA link to reset
5080 * @classes:    class of the attached device
5081 *
5082 * This function issues a SATA phy reset to the affected ATA link.
5083 *
5084 * Return value:
5085 *      0 on success / non-zero on failure
5086 **/
5087static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5088                                unsigned long deadline)
5089{
5090        struct ipr_sata_port *sata_port = link->ap->private_data;
5091        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5092        struct ipr_resource_entry *res;
5093        unsigned long lock_flags = 0;
5094        int rc = -ENXIO;
5095
5096        ENTER;
5097        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5098        while (ioa_cfg->in_reset_reload) {
5099                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5100                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5101                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5102        }
5103
5104        res = sata_port->res;
5105        if (res) {
5106                rc = ipr_device_reset(ioa_cfg, res);
5107                *classes = res->ata_class;
5108        }
5109
5110        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5111        LEAVE;
5112        return rc;
5113}
5114
5115/**
5116 * ipr_eh_dev_reset - Reset the device
5117 * @scsi_cmd:   scsi command struct
5118 *
5119 * This function issues a device reset to the affected device.
5120 * A LUN reset will be sent to the device first. If that does
5121 * not work, a target reset will be sent.
5122 *
5123 * Return value:
5124 *      SUCCESS / FAILED
5125 **/
5126static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5127{
5128        struct ipr_cmnd *ipr_cmd;
5129        struct ipr_ioa_cfg *ioa_cfg;
5130        struct ipr_resource_entry *res;
5131        struct ata_port *ap;
5132        int rc = 0;
5133        struct ipr_hrr_queue *hrrq;
5134
5135        ENTER;
5136        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5137        res = scsi_cmd->device->hostdata;
5138
5139        if (!res)
5140                return FAILED;
5141
5142        /*
5143         * If we are currently going through reset/reload, return failed. This will force the
5144         * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5145         * reset to complete
5146         */
5147        if (ioa_cfg->in_reset_reload)
5148                return FAILED;
5149        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5150                return FAILED;
5151
5152        for_each_hrrq(hrrq, ioa_cfg) {
5153                spin_lock(&hrrq->_lock);
5154                list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5155                        if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5156                                if (ipr_cmd->scsi_cmd)
5157                                        ipr_cmd->done = ipr_scsi_eh_done;
5158                                if (ipr_cmd->qc)
5159                                        ipr_cmd->done = ipr_sata_eh_done;
5160                                if (ipr_cmd->qc &&
5161                                    !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5162                                        ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5163                                        ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5164                                }
5165                        }
5166                }
5167                spin_unlock(&hrrq->_lock);
5168        }
5169        res->resetting_device = 1;
5170        scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5171
5172        if (ipr_is_gata(res) && res->sata_port) {
5173                ap = res->sata_port->ap;
5174                spin_unlock_irq(scsi_cmd->device->host->host_lock);
5175                ata_std_error_handler(ap);
5176                spin_lock_irq(scsi_cmd->device->host->host_lock);
5177
5178                for_each_hrrq(hrrq, ioa_cfg) {
5179                        spin_lock(&hrrq->_lock);
5180                        list_for_each_entry(ipr_cmd,
5181                                            &hrrq->hrrq_pending_q, queue) {
5182                                if (ipr_cmd->ioarcb.res_handle ==
5183                                    res->res_handle) {
5184                                        rc = -EIO;
5185                                        break;
5186                                }
5187                        }
5188                        spin_unlock(&hrrq->_lock);
5189                }
5190        } else
5191                rc = ipr_device_reset(ioa_cfg, res);
5192        res->resetting_device = 0;
5193        res->reset_occurred = 1;
5194
5195        LEAVE;
5196        return rc ? FAILED : SUCCESS;
5197}
5198
5199static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5200{
5201        int rc;
5202        struct ipr_ioa_cfg *ioa_cfg;
5203
5204        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5205
5206        spin_lock_irq(cmd->device->host->host_lock);
5207        rc = __ipr_eh_dev_reset(cmd);
5208        spin_unlock_irq(cmd->device->host->host_lock);
5209
5210        if (rc == SUCCESS)
5211                rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5212
5213        return rc;
5214}
5215
5216/**
5217 * ipr_bus_reset_done - Op done function for bus reset.
5218 * @ipr_cmd:    ipr command struct
5219 *
5220 * This function is the op done function for a bus reset
5221 *
5222 * Return value:
5223 *      none
5224 **/
5225static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5226{
5227        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5228        struct ipr_resource_entry *res;
5229
5230        ENTER;
5231        if (!ioa_cfg->sis64)
5232                list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5233                        if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5234                                scsi_report_bus_reset(ioa_cfg->host, res->bus);
5235                                break;
5236                        }
5237                }
5238
5239        /*
5240         * If abort has not completed, indicate the reset has, else call the
5241         * abort's done function to wake the sleeping eh thread
5242         */
5243        if (ipr_cmd->sibling->sibling)
5244                ipr_cmd->sibling->sibling = NULL;
5245        else
5246                ipr_cmd->sibling->done(ipr_cmd->sibling);
5247
5248        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5249        LEAVE;
5250}
5251
5252/**
5253 * ipr_abort_timeout - An abort task has timed out
5254 * @ipr_cmd:    ipr command struct
5255 *
5256 * This function handles when an abort task times out. If this
5257 * happens we issue a bus reset since we have resources tied
5258 * up that must be freed before returning to the midlayer.
5259 *
5260 * Return value:
5261 *      none
5262 **/
5263static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5264{
5265        struct ipr_cmnd *reset_cmd;
5266        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5267        struct ipr_cmd_pkt *cmd_pkt;
5268        unsigned long lock_flags = 0;
5269
5270        ENTER;
5271        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5272        if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5273                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5274                return;
5275        }
5276
5277        sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5278        reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5279        ipr_cmd->sibling = reset_cmd;
5280        reset_cmd->sibling = ipr_cmd;
5281        reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5282        cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5283        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5284        cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5285        cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5286
5287        ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5288        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5289        LEAVE;
5290}
5291
5292/**
5293 * ipr_cancel_op - Cancel specified op
5294 * @scsi_cmd:   scsi command struct
5295 *
5296 * This function cancels specified op.
5297 *
5298 * Return value:
5299 *      SUCCESS / FAILED
5300 **/
5301static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5302{
5303        struct ipr_cmnd *ipr_cmd;
5304        struct ipr_ioa_cfg *ioa_cfg;
5305        struct ipr_resource_entry *res;
5306        struct ipr_cmd_pkt *cmd_pkt;
5307        u32 ioasc, int_reg;
5308        int op_found = 0;
5309        struct ipr_hrr_queue *hrrq;
5310
5311        ENTER;
5312        ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5313        res = scsi_cmd->device->hostdata;
5314
5315        /* If we are currently going through reset/reload, return failed.
5316         * This will force the mid-layer to call ipr_eh_host_reset,
5317         * which will then go to sleep and wait for the reset to complete
5318         */
5319        if (ioa_cfg->in_reset_reload ||
5320            ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5321                return FAILED;
5322        if (!res)
5323                return FAILED;
5324
5325        /*
5326         * If we are aborting a timed out op, chances are that the timeout was caused
5327         * by a still not detected EEH error. In such cases, reading a register will
5328         * trigger the EEH recovery infrastructure.
5329         */
5330        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5331
5332        if (!ipr_is_gscsi(res))
5333                return FAILED;
5334
5335        for_each_hrrq(hrrq, ioa_cfg) {
5336                spin_lock(&hrrq->_lock);
5337                list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5338                        if (ipr_cmd->scsi_cmd == scsi_cmd) {
5339                                ipr_cmd->done = ipr_scsi_eh_done;
5340                                op_found = 1;
5341                                break;
5342                        }
5343                }
5344                spin_unlock(&hrrq->_lock);
5345        }
5346
5347        if (!op_found)
5348                return SUCCESS;
5349
5350        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5351        ipr_cmd->ioarcb.res_handle = res->res_handle;
5352        cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5353        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5354        cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5355        ipr_cmd->u.sdev = scsi_cmd->device;
5356
5357        scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5358                    scsi_cmd->cmnd[0]);
5359        ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5360        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5361
5362        /*
5363         * If the abort task timed out and we sent a bus reset, we will get
5364         * one the following responses to the abort
5365         */
5366        if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5367                ioasc = 0;
5368                ipr_trace;
5369        }
5370
5371        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5372        if (!ipr_is_naca_model(res))
5373                res->needs_sync_complete = 1;
5374
5375        LEAVE;
5376        return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5377}
5378
5379/**
5380 * ipr_eh_abort - Abort a single op
5381 * @scsi_cmd:   scsi command struct
5382 *
5383 * Return value:
5384 *      0 if scan in progress / 1 if scan is complete
5385 **/
5386static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5387{
5388        unsigned long lock_flags;
5389        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5390        int rc = 0;
5391
5392        spin_lock_irqsave(shost->host_lock, lock_flags);
5393        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5394                rc = 1;
5395        if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5396                rc = 1;
5397        spin_unlock_irqrestore(shost->host_lock, lock_flags);
5398        return rc;
5399}
5400
5401/**
5402 * ipr_eh_host_reset - Reset the host adapter
5403 * @scsi_cmd:   scsi command struct
5404 *
5405 * Return value:
5406 *      SUCCESS / FAILED
5407 **/
5408static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5409{
5410        unsigned long flags;
5411        int rc;
5412        struct ipr_ioa_cfg *ioa_cfg;
5413
5414        ENTER;
5415
5416        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5417
5418        spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5419        rc = ipr_cancel_op(scsi_cmd);
5420        spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5421
5422        if (rc == SUCCESS)
5423                rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5424        LEAVE;
5425        return rc;
5426}
5427
5428/**
5429 * ipr_handle_other_interrupt - Handle "other" interrupts
5430 * @ioa_cfg:    ioa config struct
5431 * @int_reg:    interrupt register
5432 *
5433 * Return value:
5434 *      IRQ_NONE / IRQ_HANDLED
5435 **/
5436static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5437                                              u32 int_reg)
5438{
5439        irqreturn_t rc = IRQ_HANDLED;
5440        u32 int_mask_reg;
5441
5442        int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5443        int_reg &= ~int_mask_reg;
5444
5445        /* If an interrupt on the adapter did not occur, ignore it.
5446         * Or in the case of SIS 64, check for a stage change interrupt.
5447         */
5448        if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5449                if (ioa_cfg->sis64) {
5450                        int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5451                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5452                        if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5453
5454                                /* clear stage change */
5455                                writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5456                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5457                                list_del(&ioa_cfg->reset_cmd->queue);
5458                                del_timer(&ioa_cfg->reset_cmd->timer);
5459                                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5460                                return IRQ_HANDLED;
5461                        }
5462                }
5463
5464                return IRQ_NONE;
5465        }
5466
5467        if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5468                /* Mask the interrupt */
5469                writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5470                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5471
5472                list_del(&ioa_cfg->reset_cmd->queue);
5473                del_timer(&ioa_cfg->reset_cmd->timer);
5474                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5475        } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5476                if (ioa_cfg->clear_isr) {
5477                        if (ipr_debug && printk_ratelimit())
5478                                dev_err(&ioa_cfg->pdev->dev,
5479                                        "Spurious interrupt detected. 0x%08X\n", int_reg);
5480                        writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5481                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5482                        return IRQ_NONE;
5483                }
5484        } else {
5485                if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5486                        ioa_cfg->ioa_unit_checked = 1;
5487                else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5488                        dev_err(&ioa_cfg->pdev->dev,
5489                                "No Host RRQ. 0x%08X\n", int_reg);
5490                else
5491                        dev_err(&ioa_cfg->pdev->dev,
5492                                "Permanent IOA failure. 0x%08X\n", int_reg);
5493
5494                if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5495                        ioa_cfg->sdt_state = GET_DUMP;
5496
5497                ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5498                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5499        }
5500
5501        return rc;
5502}
5503
5504/**
5505 * ipr_isr_eh - Interrupt service routine error handler
5506 * @ioa_cfg:    ioa config struct
5507 * @msg:        message to log
5508 *
5509 * Return value:
5510 *      none
5511 **/
5512static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5513{
5514        ioa_cfg->errors_logged++;
5515        dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5516
5517        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5518                ioa_cfg->sdt_state = GET_DUMP;
5519
5520        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5521}
5522
5523static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5524                                                struct list_head *doneq)
5525{
5526        u32 ioasc;
5527        u16 cmd_index;
5528        struct ipr_cmnd *ipr_cmd;
5529        struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5530        int num_hrrq = 0;
5531
5532        /* If interrupts are disabled, ignore the interrupt */
5533        if (!hrr_queue->allow_interrupts)
5534                return 0;
5535
5536        while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5537               hrr_queue->toggle_bit) {
5538
5539                cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5540                             IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5541                             IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5542
5543                if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5544                             cmd_index < hrr_queue->min_cmd_id)) {
5545                        ipr_isr_eh(ioa_cfg,
5546                                "Invalid response handle from IOA: ",
5547                                cmd_index);
5548                        break;
5549                }
5550
5551                ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5552                ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5553
5554                ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5555
5556                list_move_tail(&ipr_cmd->queue, doneq);
5557
5558                if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5559                        hrr_queue->hrrq_curr++;
5560                } else {
5561                        hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5562                        hrr_queue->toggle_bit ^= 1u;
5563                }
5564                num_hrrq++;
5565                if (budget > 0 && num_hrrq >= budget)
5566                        break;
5567        }
5568
5569        return num_hrrq;
5570}
5571
5572static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5573{
5574        struct ipr_ioa_cfg *ioa_cfg;
5575        struct ipr_hrr_queue *hrrq;
5576        struct ipr_cmnd *ipr_cmd, *temp;
5577        unsigned long hrrq_flags;
5578        int completed_ops;
5579        LIST_HEAD(doneq);
5580
5581        hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5582        ioa_cfg = hrrq->ioa_cfg;
5583
5584        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5585        completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5586
5587        if (completed_ops < budget)
5588                blk_iopoll_complete(iop);
5589        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5590
5591        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5592                list_del(&ipr_cmd->queue);
5593                del_timer(&ipr_cmd->timer);
5594                ipr_cmd->fast_done(ipr_cmd);
5595        }
5596
5597        return completed_ops;
5598}
5599
5600/**
5601 * ipr_isr - Interrupt service routine
5602 * @irq:        irq number
5603 * @devp:       pointer to ioa config struct
5604 *
5605 * Return value:
5606 *      IRQ_NONE / IRQ_HANDLED
5607 **/
5608static irqreturn_t ipr_isr(int irq, void *devp)
5609{
5610        struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5611        struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5612        unsigned long hrrq_flags = 0;
5613        u32 int_reg = 0;
5614        int num_hrrq = 0;
5615        int irq_none = 0;
5616        struct ipr_cmnd *ipr_cmd, *temp;
5617        irqreturn_t rc = IRQ_NONE;
5618        LIST_HEAD(doneq);
5619
5620        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5621        /* If interrupts are disabled, ignore the interrupt */
5622        if (!hrrq->allow_interrupts) {
5623                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5624                return IRQ_NONE;
5625        }
5626
5627        while (1) {
5628                if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5629                        rc =  IRQ_HANDLED;
5630
5631                        if (!ioa_cfg->clear_isr)
5632                                break;
5633
5634                        /* Clear the PCI interrupt */
5635                        num_hrrq = 0;
5636                        do {
5637                                writel(IPR_PCII_HRRQ_UPDATED,
5638                                     ioa_cfg->regs.clr_interrupt_reg32);
5639                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5640                        } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5641                                num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5642
5643                } else if (rc == IRQ_NONE && irq_none == 0) {
5644                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5645                        irq_none++;
5646                } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5647                           int_reg & IPR_PCII_HRRQ_UPDATED) {
5648                        ipr_isr_eh(ioa_cfg,
5649                                "Error clearing HRRQ: ", num_hrrq);
5650                        rc = IRQ_HANDLED;
5651                        break;
5652                } else
5653                        break;
5654        }
5655
5656        if (unlikely(rc == IRQ_NONE))
5657                rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5658
5659        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5660        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5661                list_del(&ipr_cmd->queue);
5662                del_timer(&ipr_cmd->timer);
5663                ipr_cmd->fast_done(ipr_cmd);
5664        }
5665        return rc;
5666}
5667
5668/**
5669 * ipr_isr_mhrrq - Interrupt service routine
5670 * @irq:        irq number
5671 * @devp:       pointer to ioa config struct
5672 *
5673 * Return value:
5674 *      IRQ_NONE / IRQ_HANDLED
5675 **/
5676static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5677{
5678        struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5679        struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5680        unsigned long hrrq_flags = 0;
5681        struct ipr_cmnd *ipr_cmd, *temp;
5682        irqreturn_t rc = IRQ_NONE;
5683        LIST_HEAD(doneq);
5684
5685        spin_lock_irqsave(hrrq->lock, hrrq_flags);
5686
5687        /* If interrupts are disabled, ignore the interrupt */
5688        if (!hrrq->allow_interrupts) {
5689                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5690                return IRQ_NONE;
5691        }
5692
5693        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5694                if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5695                       hrrq->toggle_bit) {
5696                        if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5697                                blk_iopoll_sched(&hrrq->iopoll);
5698                        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5699                        return IRQ_HANDLED;
5700                }
5701        } else {
5702                if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5703                        hrrq->toggle_bit)
5704
5705                        if (ipr_process_hrrq(hrrq, -1, &doneq))
5706                                rc =  IRQ_HANDLED;
5707        }
5708
5709        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5710
5711        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5712                list_del(&ipr_cmd->queue);
5713                del_timer(&ipr_cmd->timer);
5714                ipr_cmd->fast_done(ipr_cmd);
5715        }
5716        return rc;
5717}
5718
5719/**
5720 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5721 * @ioa_cfg:    ioa config struct
5722 * @ipr_cmd:    ipr command struct
5723 *
5724 * Return value:
5725 *      0 on success / -1 on failure
5726 **/
5727static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5728                             struct ipr_cmnd *ipr_cmd)
5729{
5730        int i, nseg;
5731        struct scatterlist *sg;
5732        u32 length;
5733        u32 ioadl_flags = 0;
5734        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5735        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5736        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5737
5738        length = scsi_bufflen(scsi_cmd);
5739        if (!length)
5740                return 0;
5741
5742        nseg = scsi_dma_map(scsi_cmd);
5743        if (nseg < 0) {
5744                if (printk_ratelimit())
5745                        dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5746                return -1;
5747        }
5748
5749        ipr_cmd->dma_use_sg = nseg;
5750
5751        ioarcb->data_transfer_length = cpu_to_be32(length);
5752        ioarcb->ioadl_len =
5753                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5754
5755        if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5756                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5757                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5758        } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5759                ioadl_flags = IPR_IOADL_FLAGS_READ;
5760
5761        scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5762                ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5763                ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5764                ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5765        }
5766
5767        ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5768        return 0;
5769}
5770
5771/**
5772 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5773 * @ioa_cfg:    ioa config struct
5774 * @ipr_cmd:    ipr command struct
5775 *
5776 * Return value:
5777 *      0 on success / -1 on failure
5778 **/
5779static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5780                           struct ipr_cmnd *ipr_cmd)
5781{
5782        int i, nseg;
5783        struct scatterlist *sg;
5784        u32 length;
5785        u32 ioadl_flags = 0;
5786        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5787        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5788        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5789
5790        length = scsi_bufflen(scsi_cmd);
5791        if (!length)
5792                return 0;
5793
5794        nseg = scsi_dma_map(scsi_cmd);
5795        if (nseg < 0) {
5796                dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5797                return -1;
5798        }
5799
5800        ipr_cmd->dma_use_sg = nseg;
5801
5802        if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5803                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5804                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5805                ioarcb->data_transfer_length = cpu_to_be32(length);
5806                ioarcb->ioadl_len =
5807                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5808        } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5809                ioadl_flags = IPR_IOADL_FLAGS_READ;
5810                ioarcb->read_data_transfer_length = cpu_to_be32(length);
5811                ioarcb->read_ioadl_len =
5812                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5813        }
5814
5815        if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5816                ioadl = ioarcb->u.add_data.u.ioadl;
5817                ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5818                                    offsetof(struct ipr_ioarcb, u.add_data));
5819                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5820        }
5821
5822        scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5823                ioadl[i].flags_and_data_len =
5824                        cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5825                ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5826        }
5827
5828        ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5829        return 0;
5830}
5831
5832/**
5833 * ipr_erp_done - Process completion of ERP for a device
5834 * @ipr_cmd:            ipr command struct
5835 *
5836 * This function copies the sense buffer into the scsi_cmd
5837 * struct and pushes the scsi_done function.
5838 *
5839 * Return value:
5840 *      nothing
5841 **/
5842static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5843{
5844        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5845        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5846        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5847
5848        if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5849                scsi_cmd->result |= (DID_ERROR << 16);
5850                scmd_printk(KERN_ERR, scsi_cmd,
5851                            "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5852        } else {
5853                memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5854                       SCSI_SENSE_BUFFERSIZE);
5855        }
5856
5857        if (res) {
5858                if (!ipr_is_naca_model(res))
5859                        res->needs_sync_complete = 1;
5860                res->in_erp = 0;
5861        }
5862        scsi_dma_unmap(ipr_cmd->scsi_cmd);
5863        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5864        scsi_cmd->scsi_done(scsi_cmd);
5865}
5866
5867/**
5868 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5869 * @ipr_cmd:    ipr command struct
5870 *
5871 * Return value:
5872 *      none
5873 **/
5874static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5875{
5876        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5877        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5878        dma_addr_t dma_addr = ipr_cmd->dma_addr;
5879
5880        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5881        ioarcb->data_transfer_length = 0;
5882        ioarcb->read_data_transfer_length = 0;
5883        ioarcb->ioadl_len = 0;
5884        ioarcb->read_ioadl_len = 0;
5885        ioasa->hdr.ioasc = 0;
5886        ioasa->hdr.residual_data_len = 0;
5887
5888        if (ipr_cmd->ioa_cfg->sis64)
5889                ioarcb->u.sis64_addr_data.data_ioadl_addr =
5890                        cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5891        else {
5892                ioarcb->write_ioadl_addr =
5893                        cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5894                ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5895        }
5896}
5897
5898/**
5899 * ipr_erp_request_sense - Send request sense to a device
5900 * @ipr_cmd:    ipr command struct
5901 *
5902 * This function sends a request sense to a device as a result
5903 * of a check condition.
5904 *
5905 * Return value:
5906 *      nothing
5907 **/
5908static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5909{
5910        struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5911        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5912
5913        if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5914                ipr_erp_done(ipr_cmd);
5915                return;
5916        }
5917
5918        ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5919
5920        cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5921        cmd_pkt->cdb[0] = REQUEST_SENSE;
5922        cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5923        cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5924        cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5925        cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5926
5927        ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5928                       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5929
5930        ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5931                   IPR_REQUEST_SENSE_TIMEOUT * 2);
5932}
5933
5934/**
5935 * ipr_erp_cancel_all - Send cancel all to a device
5936 * @ipr_cmd:    ipr command struct
5937 *
5938 * This function sends a cancel all to a device to clear the
5939 * queue. If we are running TCQ on the device, QERR is set to 1,
5940 * which means all outstanding ops have been dropped on the floor.
5941 * Cancel all will return them to us.
5942 *
5943 * Return value:
5944 *      nothing
5945 **/
5946static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5947{
5948        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5949        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5950        struct ipr_cmd_pkt *cmd_pkt;
5951
5952        res->in_erp = 1;
5953
5954        ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5955
5956        if (!scsi_cmd->device->simple_tags) {
5957                ipr_erp_request_sense(ipr_cmd);
5958                return;
5959        }
5960
5961        cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5962        cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5963        cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5964
5965        ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5966                   IPR_CANCEL_ALL_TIMEOUT);
5967}
5968
5969/**
5970 * ipr_dump_ioasa - Dump contents of IOASA
5971 * @ioa_cfg:    ioa config struct
5972 * @ipr_cmd:    ipr command struct
5973 * @res:                resource entry struct
5974 *
5975 * This function is invoked by the interrupt handler when ops
5976 * fail. It will log the IOASA if appropriate. Only called
5977 * for GPDD ops.
5978 *
5979 * Return value:
5980 *      none
5981 **/
5982static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5983                           struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5984{
5985        int i;
5986        u16 data_len;
5987        u32 ioasc, fd_ioasc;
5988        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5989        __be32 *ioasa_data = (__be32 *)ioasa;
5990        int error_index;
5991
5992        ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5993        fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5994
5995        if (0 == ioasc)
5996                return;
5997
5998        if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5999                return;
6000
6001        if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6002                error_index = ipr_get_error(fd_ioasc);
6003        else
6004                error_index = ipr_get_error(ioasc);
6005
6006        if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6007                /* Don't log an error if the IOA already logged one */
6008                if (ioasa->hdr.ilid != 0)
6009                        return;
6010
6011                if (!ipr_is_gscsi(res))
6012                        return;
6013
6014                if (ipr_error_table[error_index].log_ioasa == 0)
6015                        return;
6016        }
6017
6018        ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6019
6020        data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6021        if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6022                data_len = sizeof(struct ipr_ioasa64);
6023        else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6024                data_len = sizeof(struct ipr_ioasa);
6025
6026        ipr_err("IOASA Dump:\n");
6027
6028        for (i = 0; i < data_len / 4; i += 4) {
6029                ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6030                        be32_to_cpu(ioasa_data[i]),
6031                        be32_to_cpu(ioasa_data[i+1]),
6032                        be32_to_cpu(ioasa_data[i+2]),
6033                        be32_to_cpu(ioasa_data[i+3]));
6034        }
6035}
6036
6037/**
6038 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6039 * @ioasa:              IOASA
6040 * @sense_buf:  sense data buffer
6041 *
6042 * Return value:
6043 *      none
6044 **/
6045static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6046{
6047        u32 failing_lba;
6048        u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6049        struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6050        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6051        u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6052
6053        memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6054
6055        if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6056                return;
6057
6058        ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6059
6060        if (ipr_is_vset_device(res) &&
6061            ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6062            ioasa->u.vset.failing_lba_hi != 0) {
6063                sense_buf[0] = 0x72;
6064                sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6065                sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6066                sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6067
6068                sense_buf[7] = 12;
6069                sense_buf[8] = 0;
6070                sense_buf[9] = 0x0A;
6071                sense_buf[10] = 0x80;
6072
6073                failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6074
6075                sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6076                sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6077                sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6078                sense_buf[15] = failing_lba & 0x000000ff;
6079
6080                failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6081
6082                sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6083                sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6084                sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6085                sense_buf[19] = failing_lba & 0x000000ff;
6086        } else {
6087                sense_buf[0] = 0x70;
6088                sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6089                sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6090                sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6091
6092                /* Illegal request */
6093                if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6094                    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6095                        sense_buf[7] = 10;      /* additional length */
6096
6097                        /* IOARCB was in error */
6098                        if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6099                                sense_buf[15] = 0xC0;
6100                        else    /* Parameter data was invalid */
6101                                sense_buf[15] = 0x80;
6102
6103                        sense_buf[16] =
6104                            ((IPR_FIELD_POINTER_MASK &
6105                              be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6106                        sense_buf[17] =
6107                            (IPR_FIELD_POINTER_MASK &
6108                             be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6109                } else {
6110                        if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6111                                if (ipr_is_vset_device(res))
6112                                        failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6113                                else
6114                                        failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6115
6116                                sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6117                                sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6118                                sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6119                                sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6120                                sense_buf[6] = failing_lba & 0x000000ff;
6121                        }
6122
6123                        sense_buf[7] = 6;       /* additional length */
6124                }
6125        }
6126}
6127
6128/**
6129 * ipr_get_autosense - Copy autosense data to sense buffer
6130 * @ipr_cmd:    ipr command struct
6131 *
6132 * This function copies the autosense buffer to the buffer
6133 * in the scsi_cmd, if there is autosense available.
6134 *
6135 * Return value:
6136 *      1 if autosense was available / 0 if not
6137 **/
6138static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6139{
6140        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6141        struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6142
6143        if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6144                return 0;
6145
6146        if (ipr_cmd->ioa_cfg->sis64)
6147                memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6148                       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6149                           SCSI_SENSE_BUFFERSIZE));
6150        else
6151                memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6152                       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6153                           SCSI_SENSE_BUFFERSIZE));
6154        return 1;
6155}
6156
6157/**
6158 * ipr_erp_start - Process an error response for a SCSI op
6159 * @ioa_cfg:    ioa config struct
6160 * @ipr_cmd:    ipr command struct
6161 *
6162 * This function determines whether or not to initiate ERP
6163 * on the affected device.
6164 *
6165 * Return value:
6166 *      nothing
6167 **/
6168static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6169                              struct ipr_cmnd *ipr_cmd)
6170{
6171        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6172        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6173        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6174        u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6175
6176        if (!res) {
6177                ipr_scsi_eh_done(ipr_cmd);
6178                return;
6179        }
6180
6181        if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6182                ipr_gen_sense(ipr_cmd);
6183
6184        ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6185
6186        switch (masked_ioasc) {
6187        case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6188                if (ipr_is_naca_model(res))
6189                        scsi_cmd->result |= (DID_ABORT << 16);
6190                else
6191                        scsi_cmd->result |= (DID_IMM_RETRY << 16);
6192                break;
6193        case IPR_IOASC_IR_RESOURCE_HANDLE:
6194        case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6195                scsi_cmd->result |= (DID_NO_CONNECT << 16);
6196                break;
6197        case IPR_IOASC_HW_SEL_TIMEOUT:
6198                scsi_cmd->result |= (DID_NO_CONNECT << 16);
6199                if (!ipr_is_naca_model(res))
6200                        res->needs_sync_complete = 1;
6201                break;
6202        case IPR_IOASC_SYNC_REQUIRED:
6203                if (!res->in_erp)
6204                        res->needs_sync_complete = 1;
6205                scsi_cmd->result |= (DID_IMM_RETRY << 16);
6206                break;
6207        case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6208        case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6209                scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6210                break;
6211        case IPR_IOASC_BUS_WAS_RESET:
6212        case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6213                /*
6214                 * Report the bus reset and ask for a retry. The device
6215                 * will give CC/UA the next command.
6216                 */
6217                if (!res->resetting_device)
6218                        scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6219                scsi_cmd->result |= (DID_ERROR << 16);
6220                if (!ipr_is_naca_model(res))
6221                        res->needs_sync_complete = 1;
6222                break;
6223        case IPR_IOASC_HW_DEV_BUS_STATUS:
6224                scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6225                if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6226                        if (!ipr_get_autosense(ipr_cmd)) {
6227                                if (!ipr_is_naca_model(res)) {
6228                                        ipr_erp_cancel_all(ipr_cmd);
6229                                        return;
6230                                }
6231                        }
6232                }
6233                if (!ipr_is_naca_model(res))
6234                        res->needs_sync_complete = 1;
6235                break;
6236        case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6237                break;
6238        case IPR_IOASC_IR_NON_OPTIMIZED:
6239                if (res->raw_mode) {
6240                        res->raw_mode = 0;
6241                        scsi_cmd->result |= (DID_IMM_RETRY << 16);
6242                } else
6243                        scsi_cmd->result |= (DID_ERROR << 16);
6244                break;
6245        default:
6246                if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6247                        scsi_cmd->result |= (DID_ERROR << 16);
6248                if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6249                        res->needs_sync_complete = 1;
6250                break;
6251        }
6252
6253        scsi_dma_unmap(ipr_cmd->scsi_cmd);
6254        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6255        scsi_cmd->scsi_done(scsi_cmd);
6256}
6257
6258/**
6259 * ipr_scsi_done - mid-layer done function
6260 * @ipr_cmd:    ipr command struct
6261 *
6262 * This function is invoked by the interrupt handler for
6263 * ops generated by the SCSI mid-layer
6264 *
6265 * Return value:
6266 *      none
6267 **/
6268static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6269{
6270        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6271        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6272        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6273        unsigned long lock_flags;
6274
6275        scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6276
6277        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6278                scsi_dma_unmap(scsi_cmd);
6279
6280                spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6281                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6282                scsi_cmd->scsi_done(scsi_cmd);
6283                spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6284        } else {
6285                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6286                spin_lock(&ipr_cmd->hrrq->_lock);
6287                ipr_erp_start(ioa_cfg, ipr_cmd);
6288                spin_unlock(&ipr_cmd->hrrq->_lock);
6289                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6290        }
6291}
6292
6293/**
6294 * ipr_queuecommand - Queue a mid-layer request
6295 * @shost:              scsi host struct
6296 * @scsi_cmd:   scsi command struct
6297 *
6298 * This function queues a request generated by the mid-layer.
6299 *
6300 * Return value:
6301 *      0 on success
6302 *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6303 *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6304 **/
6305static int ipr_queuecommand(struct Scsi_Host *shost,
6306                            struct scsi_cmnd *scsi_cmd)
6307{
6308        struct ipr_ioa_cfg *ioa_cfg;
6309        struct ipr_resource_entry *res;
6310        struct ipr_ioarcb *ioarcb;
6311        struct ipr_cmnd *ipr_cmd;
6312        unsigned long hrrq_flags, lock_flags;
6313        int rc;
6314        struct ipr_hrr_queue *hrrq;
6315        int hrrq_id;
6316
6317        ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6318
6319        scsi_cmd->result = (DID_OK << 16);
6320        res = scsi_cmd->device->hostdata;
6321
6322        if (ipr_is_gata(res) && res->sata_port) {
6323                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6324                rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6325                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6326                return rc;
6327        }
6328
6329        hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6330        hrrq = &ioa_cfg->hrrq[hrrq_id];
6331
6332        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6333        /*
6334         * We are currently blocking all devices due to a host reset
6335         * We have told the host to stop giving us new requests, but
6336         * ERP ops don't count. FIXME
6337         */
6338        if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6339                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6340                return SCSI_MLQUEUE_HOST_BUSY;
6341        }
6342
6343        /*
6344         * FIXME - Create scsi_set_host_offline interface
6345         *  and the ioa_is_dead check can be removed
6346         */
6347        if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6348                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6349                goto err_nodev;
6350        }
6351
6352        ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6353        if (ipr_cmd == NULL) {
6354                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6355                return SCSI_MLQUEUE_HOST_BUSY;
6356        }
6357        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6358
6359        ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6360        ioarcb = &ipr_cmd->ioarcb;
6361
6362        memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6363        ipr_cmd->scsi_cmd = scsi_cmd;
6364        ipr_cmd->done = ipr_scsi_eh_done;
6365
6366        if (ipr_is_gscsi(res)) {
6367                if (scsi_cmd->underflow == 0)
6368                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6369
6370                if (res->reset_occurred) {
6371                        res->reset_occurred = 0;
6372                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6373                }
6374        }
6375
6376        if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6377                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6378
6379                ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6380                if (scsi_cmd->flags & SCMD_TAGGED)
6381                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6382                else
6383                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6384        }
6385
6386        if (scsi_cmd->cmnd[0] >= 0xC0 &&
6387            (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6388                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6389        }
6390        if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6391                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6392
6393                if (scsi_cmd->underflow == 0)
6394                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6395        }
6396
6397        if (ioa_cfg->sis64)
6398                rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6399        else
6400                rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6401
6402        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6403        if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6404                list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6405                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6406                if (!rc)
6407                        scsi_dma_unmap(scsi_cmd);
6408                return SCSI_MLQUEUE_HOST_BUSY;
6409        }
6410
6411        if (unlikely(hrrq->ioa_is_dead)) {
6412                list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6413                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6414                scsi_dma_unmap(scsi_cmd);
6415                goto err_nodev;
6416        }
6417
6418        ioarcb->res_handle = res->res_handle;
6419        if (res->needs_sync_complete) {
6420                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6421                res->needs_sync_complete = 0;
6422        }
6423        list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6424        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6425        ipr_send_command(ipr_cmd);
6426        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6427        return 0;
6428
6429err_nodev:
6430        spin_lock_irqsave(hrrq->lock, hrrq_flags);
6431        memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6432        scsi_cmd->result = (DID_NO_CONNECT << 16);
6433        scsi_cmd->scsi_done(scsi_cmd);
6434        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6435        return 0;
6436}
6437
6438/**
6439 * ipr_ioctl - IOCTL handler
6440 * @sdev:       scsi device struct
6441 * @cmd:        IOCTL cmd
6442 * @arg:        IOCTL arg
6443 *
6444 * Return value:
6445 *      0 on success / other on failure
6446 **/
6447static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6448{
6449        struct ipr_resource_entry *res;
6450
6451        res = (struct ipr_resource_entry *)sdev->hostdata;
6452        if (res && ipr_is_gata(res)) {
6453                if (cmd == HDIO_GET_IDENTITY)
6454                        return -ENOTTY;
6455                return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6456        }
6457
6458        return -EINVAL;
6459}
6460
6461/**
6462 * ipr_info - Get information about the card/driver
6463 * @scsi_host:  scsi host struct
6464 *
6465 * Return value:
6466 *      pointer to buffer with description string
6467 **/
6468static const char *ipr_ioa_info(struct Scsi_Host *host)
6469{
6470        static char buffer[512];
6471        struct ipr_ioa_cfg *ioa_cfg;
6472        unsigned long lock_flags = 0;
6473
6474        ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6475
6476        spin_lock_irqsave(host->host_lock, lock_flags);
6477        sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6478        spin_unlock_irqrestore(host->host_lock, lock_flags);
6479
6480        return buffer;
6481}
6482
6483static struct scsi_host_template driver_template = {
6484        .module = THIS_MODULE,
6485        .name = "IPR",
6486        .info = ipr_ioa_info,
6487        .ioctl = ipr_ioctl,
6488        .queuecommand = ipr_queuecommand,
6489        .eh_abort_handler = ipr_eh_abort,
6490        .eh_device_reset_handler = ipr_eh_dev_reset,
6491        .eh_host_reset_handler = ipr_eh_host_reset,
6492        .slave_alloc = ipr_slave_alloc,
6493        .slave_configure = ipr_slave_configure,
6494        .slave_destroy = ipr_slave_destroy,
6495        .scan_finished = ipr_scan_finished,
6496        .target_alloc = ipr_target_alloc,
6497        .target_destroy = ipr_target_destroy,
6498        .change_queue_depth = ipr_change_queue_depth,
6499        .bios_param = ipr_biosparam,
6500        .can_queue = IPR_MAX_COMMANDS,
6501        .this_id = -1,
6502        .sg_tablesize = IPR_MAX_SGLIST,
6503        .max_sectors = IPR_IOA_MAX_SECTORS,
6504        .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6505        .use_clustering = ENABLE_CLUSTERING,
6506        .shost_attrs = ipr_ioa_attrs,
6507        .sdev_attrs = ipr_dev_attrs,
6508        .proc_name = IPR_NAME,
6509};
6510
6511/**
6512 * ipr_ata_phy_reset - libata phy_reset handler
6513 * @ap:         ata port to reset
6514 *
6515 **/
6516static void ipr_ata_phy_reset(struct ata_port *ap)
6517{
6518        unsigned long flags;
6519        struct ipr_sata_port *sata_port = ap->private_data;
6520        struct ipr_resource_entry *res = sata_port->res;
6521        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6522        int rc;
6523
6524        ENTER;
6525        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6526        while (ioa_cfg->in_reset_reload) {
6527                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6528                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6529                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6530        }
6531
6532        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6533                goto out_unlock;
6534
6535        rc = ipr_device_reset(ioa_cfg, res);
6536
6537        if (rc) {
6538                ap->link.device[0].class = ATA_DEV_NONE;
6539                goto out_unlock;
6540        }
6541
6542        ap->link.device[0].class = res->ata_class;
6543        if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6544                ap->link.device[0].class = ATA_DEV_NONE;
6545
6546out_unlock:
6547        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6548        LEAVE;
6549}
6550
6551/**
6552 * ipr_ata_post_internal - Cleanup after an internal command
6553 * @qc: ATA queued command
6554 *
6555 * Return value:
6556 *      none
6557 **/
6558static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6559{
6560        struct ipr_sata_port *sata_port = qc->ap->private_data;
6561        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6562        struct ipr_cmnd *ipr_cmd;
6563        struct ipr_hrr_queue *hrrq;
6564        unsigned long flags;
6565
6566        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6567        while (ioa_cfg->in_reset_reload) {
6568                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6569                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6570                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6571        }
6572
6573        for_each_hrrq(hrrq, ioa_cfg) {
6574                spin_lock(&hrrq->_lock);
6575                list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6576                        if (ipr_cmd->qc == qc) {
6577                                ipr_device_reset(ioa_cfg, sata_port->res);
6578                                break;
6579                        }
6580                }
6581                spin_unlock(&hrrq->_lock);
6582        }
6583        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6584}
6585
6586/**
6587 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6588 * @regs:       destination
6589 * @tf: source ATA taskfile
6590 *
6591 * Return value:
6592 *      none
6593 **/
6594static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6595                             struct ata_taskfile *tf)
6596{
6597        regs->feature = tf->feature;
6598        regs->nsect = tf->nsect;
6599        regs->lbal = tf->lbal;
6600        regs->lbam = tf->lbam;
6601        regs->lbah = tf->lbah;
6602        regs->device = tf->device;
6603        regs->command = tf->command;
6604        regs->hob_feature = tf->hob_feature;
6605        regs->hob_nsect = tf->hob_nsect;
6606        regs->hob_lbal = tf->hob_lbal;
6607        regs->hob_lbam = tf->hob_lbam;
6608        regs->hob_lbah = tf->hob_lbah;
6609        regs->ctl = tf->ctl;
6610}
6611
6612/**
6613 * ipr_sata_done - done function for SATA commands
6614 * @ipr_cmd:    ipr command struct
6615 *
6616 * This function is invoked by the interrupt handler for
6617 * ops generated by the SCSI mid-layer to SATA devices
6618 *
6619 * Return value:
6620 *      none
6621 **/
6622static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6623{
6624        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6625        struct ata_queued_cmd *qc = ipr_cmd->qc;
6626        struct ipr_sata_port *sata_port = qc->ap->private_data;
6627        struct ipr_resource_entry *res = sata_port->res;
6628        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6629
6630        spin_lock(&ipr_cmd->hrrq->_lock);
6631        if (ipr_cmd->ioa_cfg->sis64)
6632                memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6633                       sizeof(struct ipr_ioasa_gata));
6634        else
6635                memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6636                       sizeof(struct ipr_ioasa_gata));
6637        ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6638
6639        if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6640                scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6641
6642        if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6643                qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6644        else
6645                qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6646        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6647        spin_unlock(&ipr_cmd->hrrq->_lock);
6648        ata_qc_complete(qc);
6649}
6650
6651/**
6652 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6653 * @ipr_cmd:    ipr command struct
6654 * @qc:         ATA queued command
6655 *
6656 **/
6657static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6658                                  struct ata_queued_cmd *qc)
6659{
6660        u32 ioadl_flags = 0;
6661        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6662        struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6663        struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6664        int len = qc->nbytes;
6665        struct scatterlist *sg;
6666        unsigned int si;
6667        dma_addr_t dma_addr = ipr_cmd->dma_addr;
6668
6669        if (len == 0)
6670                return;
6671
6672        if (qc->dma_dir == DMA_TO_DEVICE) {
6673                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6674                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6675        } else if (qc->dma_dir == DMA_FROM_DEVICE)
6676                ioadl_flags = IPR_IOADL_FLAGS_READ;
6677
6678        ioarcb->data_transfer_length = cpu_to_be32(len);
6679        ioarcb->ioadl_len =
6680                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6681        ioarcb->u.sis64_addr_data.data_ioadl_addr =
6682                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6683
6684        for_each_sg(qc->sg, sg, qc->n_elem, si) {
6685                ioadl64->flags = cpu_to_be32(ioadl_flags);
6686                ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6687                ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6688
6689                last_ioadl64 = ioadl64;
6690                ioadl64++;
6691        }
6692
6693        if (likely(last_ioadl64))
6694                last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6695}
6696
6697/**
6698 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6699 * @ipr_cmd:    ipr command struct
6700 * @qc:         ATA queued command
6701 *
6702 **/
6703static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6704                                struct ata_queued_cmd *qc)
6705{
6706        u32 ioadl_flags = 0;
6707        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6708        struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6709        struct ipr_ioadl_desc *last_ioadl = NULL;
6710        int len = qc->nbytes;
6711        struct scatterlist *sg;
6712        unsigned int si;
6713
6714        if (len == 0)
6715                return;
6716
6717        if (qc->dma_dir == DMA_TO_DEVICE) {
6718                ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6719                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6720                ioarcb->data_transfer_length = cpu_to_be32(len);
6721                ioarcb->ioadl_len =
6722                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6723        } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6724                ioadl_flags = IPR_IOADL_FLAGS_READ;
6725                ioarcb->read_data_transfer_length = cpu_to_be32(len);
6726                ioarcb->read_ioadl_len =
6727                        cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6728        }
6729
6730        for_each_sg(qc->sg, sg, qc->n_elem, si) {
6731                ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6732                ioadl->address = cpu_to_be32(sg_dma_address(sg));
6733
6734                last_ioadl = ioadl;
6735                ioadl++;
6736        }
6737
6738        if (likely(last_ioadl))
6739                last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6740}
6741
6742/**
6743 * ipr_qc_defer - Get a free ipr_cmd
6744 * @qc: queued command
6745 *
6746 * Return value:
6747 *      0 if success
6748 **/
6749static int ipr_qc_defer(struct ata_queued_cmd *qc)
6750{
6751        struct ata_port *ap = qc->ap;
6752        struct ipr_sata_port *sata_port = ap->private_data;
6753        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6754        struct ipr_cmnd *ipr_cmd;
6755        struct ipr_hrr_queue *hrrq;
6756        int hrrq_id;
6757
6758        hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6759        hrrq = &ioa_cfg->hrrq[hrrq_id];
6760
6761        qc->lldd_task = NULL;
6762        spin_lock(&hrrq->_lock);
6763        if (unlikely(hrrq->ioa_is_dead)) {
6764                spin_unlock(&hrrq->_lock);
6765                return 0;
6766        }
6767
6768        if (unlikely(!hrrq->allow_cmds)) {
6769                spin_unlock(&hrrq->_lock);
6770                return ATA_DEFER_LINK;
6771        }
6772
6773        ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6774        if (ipr_cmd == NULL) {
6775                spin_unlock(&hrrq->_lock);
6776                return ATA_DEFER_LINK;
6777        }
6778
6779        qc->lldd_task = ipr_cmd;
6780        spin_unlock(&hrrq->_lock);
6781        return 0;
6782}
6783
6784/**
6785 * ipr_qc_issue - Issue a SATA qc to a device
6786 * @qc: queued command
6787 *
6788 * Return value:
6789 *      0 if success
6790 **/
6791static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6792{
6793        struct ata_port *ap = qc->ap;
6794        struct ipr_sata_port *sata_port = ap->private_data;
6795        struct ipr_resource_entry *res = sata_port->res;
6796        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6797        struct ipr_cmnd *ipr_cmd;
6798        struct ipr_ioarcb *ioarcb;
6799        struct ipr_ioarcb_ata_regs *regs;
6800
6801        if (qc->lldd_task == NULL)
6802                ipr_qc_defer(qc);
6803
6804        ipr_cmd = qc->lldd_task;
6805        if (ipr_cmd == NULL)
6806                return AC_ERR_SYSTEM;
6807
6808        qc->lldd_task = NULL;
6809        spin_lock(&ipr_cmd->hrrq->_lock);
6810        if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6811                        ipr_cmd->hrrq->ioa_is_dead)) {
6812                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6813                spin_unlock(&ipr_cmd->hrrq->_lock);
6814                return AC_ERR_SYSTEM;
6815        }
6816
6817        ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6818        ioarcb = &ipr_cmd->ioarcb;
6819
6820        if (ioa_cfg->sis64) {
6821                regs = &ipr_cmd->i.ata_ioadl.regs;
6822                ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6823        } else
6824                regs = &ioarcb->u.add_data.u.regs;
6825
6826        memset(regs, 0, sizeof(*regs));
6827        ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6828
6829        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6830        ipr_cmd->qc = qc;
6831        ipr_cmd->done = ipr_sata_done;
6832        ipr_cmd->ioarcb.res_handle = res->res_handle;
6833        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6834        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6835        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6836        ipr_cmd->dma_use_sg = qc->n_elem;
6837
6838        if (ioa_cfg->sis64)
6839                ipr_build_ata_ioadl64(ipr_cmd, qc);
6840        else
6841                ipr_build_ata_ioadl(ipr_cmd, qc);
6842
6843        regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6844        ipr_copy_sata_tf(regs, &qc->tf);
6845        memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6846        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6847
6848        switch (qc->tf.protocol) {
6849        case ATA_PROT_NODATA:
6850        case ATA_PROT_PIO:
6851                break;
6852
6853        case ATA_PROT_DMA:
6854                regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6855                break;
6856
6857        case ATAPI_PROT_PIO:
6858        case ATAPI_PROT_NODATA:
6859                regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6860                break;
6861
6862        case ATAPI_PROT_DMA:
6863                regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6864                regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6865                break;
6866
6867        default:
6868                WARN_ON(1);
6869                spin_unlock(&ipr_cmd->hrrq->_lock);
6870                return AC_ERR_INVALID;
6871        }
6872
6873        ipr_send_command(ipr_cmd);
6874        spin_unlock(&ipr_cmd->hrrq->_lock);
6875
6876        return 0;
6877}
6878
6879/**
6880 * ipr_qc_fill_rtf - Read result TF
6881 * @qc: ATA queued command
6882 *
6883 * Return value:
6884 *      true
6885 **/
6886static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6887{
6888        struct ipr_sata_port *sata_port = qc->ap->private_data;
6889        struct ipr_ioasa_gata *g = &sata_port->ioasa;
6890        struct ata_taskfile *tf = &qc->result_tf;
6891
6892        tf->feature = g->error;
6893        tf->nsect = g->nsect;
6894        tf->lbal = g->lbal;
6895        tf->lbam = g->lbam;
6896        tf->lbah = g->lbah;
6897        tf->device = g->device;
6898        tf->command = g->status;
6899        tf->hob_nsect = g->hob_nsect;
6900        tf->hob_lbal = g->hob_lbal;
6901        tf->hob_lbam = g->hob_lbam;
6902        tf->hob_lbah = g->hob_lbah;
6903
6904        return true;
6905}
6906
6907static struct ata_port_operations ipr_sata_ops = {
6908        .phy_reset = ipr_ata_phy_reset,
6909        .hardreset = ipr_sata_reset,
6910        .post_internal_cmd = ipr_ata_post_internal,
6911        .qc_prep = ata_noop_qc_prep,
6912        .qc_defer = ipr_qc_defer,
6913        .qc_issue = ipr_qc_issue,
6914        .qc_fill_rtf = ipr_qc_fill_rtf,
6915        .port_start = ata_sas_port_start,
6916        .port_stop = ata_sas_port_stop
6917};
6918
6919static struct ata_port_info sata_port_info = {
6920        .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6921                          ATA_FLAG_SAS_HOST,
6922        .pio_mask       = ATA_PIO4_ONLY,
6923        .mwdma_mask     = ATA_MWDMA2,
6924        .udma_mask      = ATA_UDMA6,
6925        .port_ops       = &ipr_sata_ops
6926};
6927
6928#ifdef CONFIG_PPC_PSERIES
6929static const u16 ipr_blocked_processors[] = {
6930        PVR_NORTHSTAR,
6931        PVR_PULSAR,
6932        PVR_POWER4,
6933        PVR_ICESTAR,
6934        PVR_SSTAR,
6935        PVR_POWER4p,
6936        PVR_630,
6937        PVR_630p
6938};
6939
6940/**
6941 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6942 * @ioa_cfg:    ioa cfg struct
6943 *
6944 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6945 * certain pSeries hardware. This function determines if the given
6946 * adapter is in one of these confgurations or not.
6947 *
6948 * Return value:
6949 *      1 if adapter is not supported / 0 if adapter is supported
6950 **/
6951static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6952{
6953        int i;
6954
6955        if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6956                for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6957                        if (pvr_version_is(ipr_blocked_processors[i]))
6958                                return 1;
6959                }
6960        }
6961        return 0;
6962}
6963#else
6964#define ipr_invalid_adapter(ioa_cfg) 0
6965#endif
6966
6967/**
6968 * ipr_ioa_bringdown_done - IOA bring down completion.
6969 * @ipr_cmd:    ipr command struct
6970 *
6971 * This function processes the completion of an adapter bring down.
6972 * It wakes any reset sleepers.
6973 *
6974 * Return value:
6975 *      IPR_RC_JOB_RETURN
6976 **/
6977static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6978{
6979        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6980        int i;
6981
6982        ENTER;
6983        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6984                ipr_trace;
6985                spin_unlock_irq(ioa_cfg->host->host_lock);
6986                scsi_unblock_requests(ioa_cfg->host);
6987                spin_lock_irq(ioa_cfg->host->host_lock);
6988        }
6989
6990        ioa_cfg->in_reset_reload = 0;
6991        ioa_cfg->reset_retries = 0;
6992        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6993                spin_lock(&ioa_cfg->hrrq[i]._lock);
6994                ioa_cfg->hrrq[i].ioa_is_dead = 1;
6995                spin_unlock(&ioa_cfg->hrrq[i]._lock);
6996        }
6997        wmb();
6998
6999        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7000        wake_up_all(&ioa_cfg->reset_wait_q);
7001        LEAVE;
7002
7003        return IPR_RC_JOB_RETURN;
7004}
7005
7006/**
7007 * ipr_ioa_reset_done - IOA reset completion.
7008 * @ipr_cmd:    ipr command struct
7009 *
7010 * This function processes the completion of an adapter reset.
7011 * It schedules any necessary mid-layer add/removes and
7012 * wakes any reset sleepers.
7013 *
7014 * Return value:
7015 *      IPR_RC_JOB_RETURN
7016 **/
7017static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7018{
7019        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7020        struct ipr_resource_entry *res;
7021        struct ipr_hostrcb *hostrcb, *temp;
7022        int i = 0, j;
7023
7024        ENTER;
7025        ioa_cfg->in_reset_reload = 0;
7026        for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7027                spin_lock(&ioa_cfg->hrrq[j]._lock);
7028                ioa_cfg->hrrq[j].allow_cmds = 1;
7029                spin_unlock(&ioa_cfg->hrrq[j]._lock);
7030        }
7031        wmb();
7032        ioa_cfg->reset_cmd = NULL;
7033        ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7034
7035        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7036                if (res->add_to_ml || res->del_from_ml) {
7037                        ipr_trace;
7038                        break;
7039                }
7040        }
7041        schedule_work(&ioa_cfg->work_q);
7042
7043        list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
7044                list_del(&hostrcb->queue);
7045                if (i++ < IPR_NUM_LOG_HCAMS)
7046                        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
7047                else
7048                        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
7049        }
7050
7051        scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7052        dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7053
7054        ioa_cfg->reset_retries = 0;
7055        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7056        wake_up_all(&ioa_cfg->reset_wait_q);
7057
7058        spin_unlock(ioa_cfg->host->host_lock);
7059        scsi_unblock_requests(ioa_cfg->host);
7060        spin_lock(ioa_cfg->host->host_lock);
7061
7062        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7063                scsi_block_requests(ioa_cfg->host);
7064
7065        schedule_work(&ioa_cfg->work_q);
7066        LEAVE;
7067        return IPR_RC_JOB_RETURN;
7068}
7069
7070/**
7071 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7072 * @supported_dev:      supported device struct
7073 * @vpids:                      vendor product id struct
7074 *
7075 * Return value:
7076 *      none
7077 **/
7078static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7079                                 struct ipr_std_inq_vpids *vpids)
7080{
7081        memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7082        memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7083        supported_dev->num_records = 1;
7084        supported_dev->data_length =
7085                cpu_to_be16(sizeof(struct ipr_supported_device));
7086        supported_dev->reserved = 0;
7087}
7088
7089/**
7090 * ipr_set_supported_devs - Send Set Supported Devices for a device
7091 * @ipr_cmd:    ipr command struct
7092 *
7093 * This function sends a Set Supported Devices to the adapter
7094 *
7095 * Return value:
7096 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7097 **/
7098static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7099{
7100        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7101        struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7102        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7103        struct ipr_resource_entry *res = ipr_cmd->u.res;
7104
7105        ipr_cmd->job_step = ipr_ioa_reset_done;
7106
7107        list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7108                if (!ipr_is_scsi_disk(res))
7109                        continue;
7110
7111                ipr_cmd->u.res = res;
7112                ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7113
7114                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7115                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7116                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7117
7118                ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7119                ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7120                ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7121                ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7122
7123                ipr_init_ioadl(ipr_cmd,
7124                               ioa_cfg->vpd_cbs_dma +
7125                                 offsetof(struct ipr_misc_cbs, supp_dev),
7126                               sizeof(struct ipr_supported_device),
7127                               IPR_IOADL_FLAGS_WRITE_LAST);
7128
7129                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7130                           IPR_SET_SUP_DEVICE_TIMEOUT);
7131
7132                if (!ioa_cfg->sis64)
7133                        ipr_cmd->job_step = ipr_set_supported_devs;
7134                LEAVE;
7135                return IPR_RC_JOB_RETURN;
7136        }
7137
7138        LEAVE;
7139        return IPR_RC_JOB_CONTINUE;
7140}
7141
7142/**
7143 * ipr_get_mode_page - Locate specified mode page
7144 * @mode_pages: mode page buffer
7145 * @page_code:  page code to find
7146 * @len:                minimum required length for mode page
7147 *
7148 * Return value:
7149 *      pointer to mode page / NULL on failure
7150 **/
7151static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7152                               u32 page_code, u32 len)
7153{
7154        struct ipr_mode_page_hdr *mode_hdr;
7155        u32 page_length;
7156        u32 length;
7157
7158        if (!mode_pages || (mode_pages->hdr.length == 0))
7159                return NULL;
7160
7161        length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7162        mode_hdr = (struct ipr_mode_page_hdr *)
7163                (mode_pages->data + mode_pages->hdr.block_desc_len);
7164
7165        while (length) {
7166                if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7167                        if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7168                                return mode_hdr;
7169                        break;
7170                } else {
7171                        page_length = (sizeof(struct ipr_mode_page_hdr) +
7172                                       mode_hdr->page_length);
7173                        length -= page_length;
7174                        mode_hdr = (struct ipr_mode_page_hdr *)
7175                                ((unsigned long)mode_hdr + page_length);
7176                }
7177        }
7178        return NULL;
7179}
7180
7181/**
7182 * ipr_check_term_power - Check for term power errors
7183 * @ioa_cfg:    ioa config struct
7184 * @mode_pages: IOAFP mode pages buffer
7185 *
7186 * Check the IOAFP's mode page 28 for term power errors
7187 *
7188 * Return value:
7189 *      nothing
7190 **/
7191static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7192                                 struct ipr_mode_pages *mode_pages)
7193{
7194        int i;
7195        int entry_length;
7196        struct ipr_dev_bus_entry *bus;
7197        struct ipr_mode_page28 *mode_page;
7198
7199        mode_page = ipr_get_mode_page(mode_pages, 0x28,
7200                                      sizeof(struct ipr_mode_page28));
7201
7202        entry_length = mode_page->entry_length;
7203
7204        bus = mode_page->bus;
7205
7206        for (i = 0; i < mode_page->num_entries; i++) {
7207                if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7208                        dev_err(&ioa_cfg->pdev->dev,
7209                                "Term power is absent on scsi bus %d\n",
7210                                bus->res_addr.bus);
7211                }
7212
7213                bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7214        }
7215}
7216
7217/**
7218 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7219 * @ioa_cfg:    ioa config struct
7220 *
7221 * Looks through the config table checking for SES devices. If
7222 * the SES device is in the SES table indicating a maximum SCSI
7223 * bus speed, the speed is limited for the bus.
7224 *
7225 * Return value:
7226 *      none
7227 **/
7228static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7229{
7230        u32 max_xfer_rate;
7231        int i;
7232
7233        for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7234                max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7235                                                       ioa_cfg->bus_attr[i].bus_width);
7236
7237                if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7238                        ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7239        }
7240}
7241
7242/**
7243 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7244 * @ioa_cfg:    ioa config struct
7245 * @mode_pages: mode page 28 buffer
7246 *
7247 * Updates mode page 28 based on driver configuration
7248 *
7249 * Return value:
7250 *      none
7251 **/
7252static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7253                                          struct ipr_mode_pages *mode_pages)
7254{
7255        int i, entry_length;
7256        struct ipr_dev_bus_entry *bus;
7257        struct ipr_bus_attributes *bus_attr;
7258        struct ipr_mode_page28 *mode_page;
7259
7260        mode_page = ipr_get_mode_page(mode_pages, 0x28,
7261                                      sizeof(struct ipr_mode_page28));
7262
7263        entry_length = mode_page->entry_length;
7264
7265        /* Loop for each device bus entry */
7266        for (i = 0, bus = mode_page->bus;
7267             i < mode_page->num_entries;
7268             i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7269                if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7270                        dev_err(&ioa_cfg->pdev->dev,
7271                                "Invalid resource address reported: 0x%08X\n",
7272                                IPR_GET_PHYS_LOC(bus->res_addr));
7273                        continue;
7274                }
7275
7276                bus_attr = &ioa_cfg->bus_attr[i];
7277                bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7278                bus->bus_width = bus_attr->bus_width;
7279                bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7280                bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7281                if (bus_attr->qas_enabled)
7282                        bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7283                else
7284                        bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7285        }
7286}
7287
7288/**
7289 * ipr_build_mode_select - Build a mode select command
7290 * @ipr_cmd:    ipr command struct
7291 * @res_handle: resource handle to send command to
7292 * @parm:               Byte 2 of Mode Sense command
7293 * @dma_addr:   DMA buffer address
7294 * @xfer_len:   data transfer length
7295 *
7296 * Return value:
7297 *      none
7298 **/
7299static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7300                                  __be32 res_handle, u8 parm,
7301                                  dma_addr_t dma_addr, u8 xfer_len)
7302{
7303        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7304
7305        ioarcb->res_handle = res_handle;
7306        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7307        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7308        ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7309        ioarcb->cmd_pkt.cdb[1] = parm;
7310        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7311
7312        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7313}
7314
7315/**
7316 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7317 * @ipr_cmd:    ipr command struct
7318 *
7319 * This function sets up the SCSI bus attributes and sends
7320 * a Mode Select for Page 28 to activate them.
7321 *
7322 * Return value:
7323 *      IPR_RC_JOB_RETURN
7324 **/
7325static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7326{
7327        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7328        struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7329        int length;
7330
7331        ENTER;
7332        ipr_scsi_bus_speed_limit(ioa_cfg);
7333        ipr_check_term_power(ioa_cfg, mode_pages);
7334        ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7335        length = mode_pages->hdr.length + 1;
7336        mode_pages->hdr.length = 0;
7337
7338        ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7339                              ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7340                              length);
7341
7342        ipr_cmd->job_step = ipr_set_supported_devs;
7343        ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7344                                    struct ipr_resource_entry, queue);
7345        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7346
7347        LEAVE;
7348        return IPR_RC_JOB_RETURN;
7349}
7350
7351/**
7352 * ipr_build_mode_sense - Builds a mode sense command
7353 * @ipr_cmd:    ipr command struct
7354 * @res:                resource entry struct
7355 * @parm:               Byte 2 of mode sense command
7356 * @dma_addr:   DMA address of mode sense buffer
7357 * @xfer_len:   Size of DMA buffer
7358 *
7359 * Return value:
7360 *      none
7361 **/
7362static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7363                                 __be32 res_handle,
7364                                 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7365{
7366        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7367
7368        ioarcb->res_handle = res_handle;
7369        ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7370        ioarcb->cmd_pkt.cdb[2] = parm;
7371        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7372        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7373
7374        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7375}
7376
7377/**
7378 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7379 * @ipr_cmd:    ipr command struct
7380 *
7381 * This function handles the failure of an IOA bringup command.
7382 *
7383 * Return value:
7384 *      IPR_RC_JOB_RETURN
7385 **/
7386static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7387{
7388        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7389        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7390
7391        dev_err(&ioa_cfg->pdev->dev,
7392                "0x%02X failed with IOASC: 0x%08X\n",
7393                ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7394
7395        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7396        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7397        return IPR_RC_JOB_RETURN;
7398}
7399
7400/**
7401 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7402 * @ipr_cmd:    ipr command struct
7403 *
7404 * This function handles the failure of a Mode Sense to the IOAFP.
7405 * Some adapters do not handle all mode pages.
7406 *
7407 * Return value:
7408 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7409 **/
7410static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7411{
7412        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7413        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7414
7415        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7416                ipr_cmd->job_step = ipr_set_supported_devs;
7417                ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7418                                            struct ipr_resource_entry, queue);
7419                return IPR_RC_JOB_CONTINUE;
7420        }
7421
7422        return ipr_reset_cmd_failed(ipr_cmd);
7423}
7424
7425/**
7426 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7427 * @ipr_cmd:    ipr command struct
7428 *
7429 * This function send a Page 28 mode sense to the IOA to
7430 * retrieve SCSI bus attributes.
7431 *
7432 * Return value:
7433 *      IPR_RC_JOB_RETURN
7434 **/
7435static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7436{
7437        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7438
7439        ENTER;
7440        ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7441                             0x28, ioa_cfg->vpd_cbs_dma +
7442                             offsetof(struct ipr_misc_cbs, mode_pages),
7443                             sizeof(struct ipr_mode_pages));
7444
7445        ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7446        ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7447
7448        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7449
7450        LEAVE;
7451        return IPR_RC_JOB_RETURN;
7452}
7453
7454/**
7455 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7456 * @ipr_cmd:    ipr command struct
7457 *
7458 * This function enables dual IOA RAID support if possible.
7459 *
7460 * Return value:
7461 *      IPR_RC_JOB_RETURN
7462 **/
7463static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7464{
7465        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7466        struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7467        struct ipr_mode_page24 *mode_page;
7468        int length;
7469
7470        ENTER;
7471        mode_page = ipr_get_mode_page(mode_pages, 0x24,
7472                                      sizeof(struct ipr_mode_page24));
7473
7474        if (mode_page)
7475                mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7476
7477        length = mode_pages->hdr.length + 1;
7478        mode_pages->hdr.length = 0;
7479
7480        ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7481                              ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7482                              length);
7483
7484        ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7485        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7486
7487        LEAVE;
7488        return IPR_RC_JOB_RETURN;
7489}
7490
7491/**
7492 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7493 * @ipr_cmd:    ipr command struct
7494 *
7495 * This function handles the failure of a Mode Sense to the IOAFP.
7496 * Some adapters do not handle all mode pages.
7497 *
7498 * Return value:
7499 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7500 **/
7501static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7502{
7503        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7504
7505        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7506                ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7507                return IPR_RC_JOB_CONTINUE;
7508        }
7509
7510        return ipr_reset_cmd_failed(ipr_cmd);
7511}
7512
7513/**
7514 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7515 * @ipr_cmd:    ipr command struct
7516 *
7517 * This function send a mode sense to the IOA to retrieve
7518 * the IOA Advanced Function Control mode page.
7519 *
7520 * Return value:
7521 *      IPR_RC_JOB_RETURN
7522 **/
7523static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7524{
7525        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7526
7527        ENTER;
7528        ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7529                             0x24, ioa_cfg->vpd_cbs_dma +
7530                             offsetof(struct ipr_misc_cbs, mode_pages),
7531                             sizeof(struct ipr_mode_pages));
7532
7533        ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7534        ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7535
7536        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7537
7538        LEAVE;
7539        return IPR_RC_JOB_RETURN;
7540}
7541
7542/**
7543 * ipr_init_res_table - Initialize the resource table
7544 * @ipr_cmd:    ipr command struct
7545 *
7546 * This function looks through the existing resource table, comparing
7547 * it with the config table. This function will take care of old/new
7548 * devices and schedule adding/removing them from the mid-layer
7549 * as appropriate.
7550 *
7551 * Return value:
7552 *      IPR_RC_JOB_CONTINUE
7553 **/
7554static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7555{
7556        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7557        struct ipr_resource_entry *res, *temp;
7558        struct ipr_config_table_entry_wrapper cfgtew;
7559        int entries, found, flag, i;
7560        LIST_HEAD(old_res);
7561
7562        ENTER;
7563        if (ioa_cfg->sis64)
7564                flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7565        else
7566                flag = ioa_cfg->u.cfg_table->hdr.flags;
7567
7568        if (flag & IPR_UCODE_DOWNLOAD_REQ)
7569                dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7570
7571        list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7572                list_move_tail(&res->queue, &old_res);
7573
7574        if (ioa_cfg->sis64)
7575                entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7576        else
7577                entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7578
7579        for (i = 0; i < entries; i++) {
7580                if (ioa_cfg->sis64)
7581                        cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7582                else
7583                        cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7584                found = 0;
7585
7586                list_for_each_entry_safe(res, temp, &old_res, queue) {
7587                        if (ipr_is_same_device(res, &cfgtew)) {
7588                                list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7589                                found = 1;
7590                                break;
7591                        }
7592                }
7593
7594                if (!found) {
7595                        if (list_empty(&ioa_cfg->free_res_q)) {
7596                                dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7597                                break;
7598                        }
7599
7600                        found = 1;
7601                        res = list_entry(ioa_cfg->free_res_q.next,
7602                                         struct ipr_resource_entry, queue);
7603                        list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7604                        ipr_init_res_entry(res, &cfgtew);
7605                        res->add_to_ml = 1;
7606                } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7607                        res->sdev->allow_restart = 1;
7608
7609                if (found)
7610                        ipr_update_res_entry(res, &cfgtew);
7611        }
7612
7613        list_for_each_entry_safe(res, temp, &old_res, queue) {
7614                if (res->sdev) {
7615                        res->del_from_ml = 1;
7616                        res->res_handle = IPR_INVALID_RES_HANDLE;
7617                        list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7618                }
7619        }
7620
7621        list_for_each_entry_safe(res, temp, &old_res, queue) {
7622                ipr_clear_res_target(res);
7623                list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7624        }
7625
7626        if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7627                ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7628        else
7629                ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7630
7631        LEAVE;
7632        return IPR_RC_JOB_CONTINUE;
7633}
7634
7635/**
7636 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7637 * @ipr_cmd:    ipr command struct
7638 *
7639 * This function sends a Query IOA Configuration command
7640 * to the adapter to retrieve the IOA configuration table.
7641 *
7642 * Return value:
7643 *      IPR_RC_JOB_RETURN
7644 **/
7645static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7646{
7647        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7648        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7649        struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7650        struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7651
7652        ENTER;
7653        if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7654                ioa_cfg->dual_raid = 1;
7655        dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7656                 ucode_vpd->major_release, ucode_vpd->card_type,
7657                 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7658        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7659        ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7660
7661        ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7662        ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7663        ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7664        ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7665
7666        ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7667                       IPR_IOADL_FLAGS_READ_LAST);
7668
7669        ipr_cmd->job_step = ipr_init_res_table;
7670
7671        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7672
7673        LEAVE;
7674        return IPR_RC_JOB_RETURN;
7675}
7676
7677static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7678{
7679        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7680
7681        if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7682                return IPR_RC_JOB_CONTINUE;
7683
7684        return ipr_reset_cmd_failed(ipr_cmd);
7685}
7686
7687static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7688                                         __be32 res_handle, u8 sa_code)
7689{
7690        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7691
7692        ioarcb->res_handle = res_handle;
7693        ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7694        ioarcb->cmd_pkt.cdb[1] = sa_code;
7695        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7696}
7697
7698/**
7699 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7700 * action
7701 *
7702 * Return value:
7703 *      none
7704 **/
7705static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7706{
7707        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7708        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7709        struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7710
7711        ENTER;
7712
7713        ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7714
7715        if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7716                ipr_build_ioa_service_action(ipr_cmd,
7717                                             cpu_to_be32(IPR_IOA_RES_HANDLE),
7718                                             IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7719
7720                ioarcb->cmd_pkt.cdb[2] = 0x40;
7721
7722                ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7723                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7724                           IPR_SET_SUP_DEVICE_TIMEOUT);
7725
7726                LEAVE;
7727                return IPR_RC_JOB_RETURN;
7728        }
7729
7730        LEAVE;
7731        return IPR_RC_JOB_CONTINUE;
7732}
7733
7734/**
7735 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7736 * @ipr_cmd:    ipr command struct
7737 *
7738 * This utility function sends an inquiry to the adapter.
7739 *
7740 * Return value:
7741 *      none
7742 **/
7743static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7744                              dma_addr_t dma_addr, u8 xfer_len)
7745{
7746        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7747
7748        ENTER;
7749        ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7750        ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7751
7752        ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7753        ioarcb->cmd_pkt.cdb[1] = flags;
7754        ioarcb->cmd_pkt.cdb[2] = page;
7755        ioarcb->cmd_pkt.cdb[4] = xfer_len;
7756
7757        ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7758
7759        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7760        LEAVE;
7761}
7762
7763/**
7764 * ipr_inquiry_page_supported - Is the given inquiry page supported
7765 * @page0:              inquiry page 0 buffer
7766 * @page:               page code.
7767 *
7768 * This function determines if the specified inquiry page is supported.
7769 *
7770 * Return value:
7771 *      1 if page is supported / 0 if not
7772 **/
7773static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7774{
7775        int i;
7776
7777        for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7778                if (page0->page[i] == page)
7779                        return 1;
7780
7781        return 0;
7782}
7783
7784/**
7785 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7786 * @ipr_cmd:    ipr command struct
7787 *
7788 * This function sends a Page 0xC4 inquiry to the adapter
7789 * to retrieve software VPD information.
7790 *
7791 * Return value:
7792 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7793 **/
7794static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7795{
7796        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7797        struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7798        struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7799
7800        ENTER;
7801        ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
7802        memset(pageC4, 0, sizeof(*pageC4));
7803
7804        if (ipr_inquiry_page_supported(page0, 0xC4)) {
7805                ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7806                                  (ioa_cfg->vpd_cbs_dma
7807                                   + offsetof(struct ipr_misc_cbs,
7808                                              pageC4_data)),
7809                                  sizeof(struct ipr_inquiry_pageC4));
7810                return IPR_RC_JOB_RETURN;
7811        }
7812
7813        LEAVE;
7814        return IPR_RC_JOB_CONTINUE;
7815}
7816
7817/**
7818 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7819 * @ipr_cmd:    ipr command struct
7820 *
7821 * This function sends a Page 0xD0 inquiry to the adapter
7822 * to retrieve adapter capabilities.
7823 *
7824 * Return value:
7825 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7826 **/
7827static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7828{
7829        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7830        struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7831        struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7832
7833        ENTER;
7834        ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
7835        memset(cap, 0, sizeof(*cap));
7836
7837        if (ipr_inquiry_page_supported(page0, 0xD0)) {
7838                ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7839                                  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7840                                  sizeof(struct ipr_inquiry_cap));
7841                return IPR_RC_JOB_RETURN;
7842        }
7843
7844        LEAVE;
7845        return IPR_RC_JOB_CONTINUE;
7846}
7847
7848/**
7849 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7850 * @ipr_cmd:    ipr command struct
7851 *
7852 * This function sends a Page 3 inquiry to the adapter
7853 * to retrieve software VPD information.
7854 *
7855 * Return value:
7856 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7857 **/
7858static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7859{
7860        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7861
7862        ENTER;
7863
7864        ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7865
7866        ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7867                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7868                          sizeof(struct ipr_inquiry_page3));
7869
7870        LEAVE;
7871        return IPR_RC_JOB_RETURN;
7872}
7873
7874/**
7875 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7876 * @ipr_cmd:    ipr command struct
7877 *
7878 * This function sends a Page 0 inquiry to the adapter
7879 * to retrieve supported inquiry pages.
7880 *
7881 * Return value:
7882 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7883 **/
7884static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7885{
7886        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7887        char type[5];
7888
7889        ENTER;
7890
7891        /* Grab the type out of the VPD and store it away */
7892        memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7893        type[4] = '\0';
7894        ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7895
7896        if (ipr_invalid_adapter(ioa_cfg)) {
7897                dev_err(&ioa_cfg->pdev->dev,
7898                        "Adapter not supported in this hardware configuration.\n");
7899
7900                if (!ipr_testmode) {
7901                        ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7902                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7903                        list_add_tail(&ipr_cmd->queue,
7904                                        &ioa_cfg->hrrq->hrrq_free_q);
7905                        return IPR_RC_JOB_RETURN;
7906                }
7907        }
7908
7909        ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7910
7911        ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7912                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7913                          sizeof(struct ipr_inquiry_page0));
7914
7915        LEAVE;
7916        return IPR_RC_JOB_RETURN;
7917}
7918
7919/**
7920 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7921 * @ipr_cmd:    ipr command struct
7922 *
7923 * This function sends a standard inquiry to the adapter.
7924 *
7925 * Return value:
7926 *      IPR_RC_JOB_RETURN
7927 **/
7928static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7929{
7930        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7931
7932        ENTER;
7933        ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7934
7935        ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7936                          ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7937                          sizeof(struct ipr_ioa_vpd));
7938
7939        LEAVE;
7940        return IPR_RC_JOB_RETURN;
7941}
7942
7943/**
7944 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7945 * @ipr_cmd:    ipr command struct
7946 *
7947 * This function send an Identify Host Request Response Queue
7948 * command to establish the HRRQ with the adapter.
7949 *
7950 * Return value:
7951 *      IPR_RC_JOB_RETURN
7952 **/
7953static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7954{
7955        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7956        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7957        struct ipr_hrr_queue *hrrq;
7958
7959        ENTER;
7960        ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7961        dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7962
7963        if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7964                hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7965
7966                ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7967                ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7968
7969                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7970                if (ioa_cfg->sis64)
7971                        ioarcb->cmd_pkt.cdb[1] = 0x1;
7972
7973                if (ioa_cfg->nvectors == 1)
7974                        ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7975                else
7976                        ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7977
7978                ioarcb->cmd_pkt.cdb[2] =
7979                        ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7980                ioarcb->cmd_pkt.cdb[3] =
7981                        ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7982                ioarcb->cmd_pkt.cdb[4] =
7983                        ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7984                ioarcb->cmd_pkt.cdb[5] =
7985                        ((u64) hrrq->host_rrq_dma) & 0xff;
7986                ioarcb->cmd_pkt.cdb[7] =
7987                        ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7988                ioarcb->cmd_pkt.cdb[8] =
7989                        (sizeof(u32) * hrrq->size) & 0xff;
7990
7991                if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7992                        ioarcb->cmd_pkt.cdb[9] =
7993                                        ioa_cfg->identify_hrrq_index;
7994
7995                if (ioa_cfg->sis64) {
7996                        ioarcb->cmd_pkt.cdb[10] =
7997                                ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7998                        ioarcb->cmd_pkt.cdb[11] =
7999                                ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8000                        ioarcb->cmd_pkt.cdb[12] =
8001                                ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8002                        ioarcb->cmd_pkt.cdb[13] =
8003                                ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8004                }
8005
8006                if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8007                        ioarcb->cmd_pkt.cdb[14] =
8008                                        ioa_cfg->identify_hrrq_index;
8009
8010                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8011                           IPR_INTERNAL_TIMEOUT);
8012
8013                if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8014                        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8015
8016                LEAVE;
8017                return IPR_RC_JOB_RETURN;
8018        }
8019
8020        LEAVE;
8021        return IPR_RC_JOB_CONTINUE;
8022}
8023
8024/**
8025 * ipr_reset_timer_done - Adapter reset timer function
8026 * @ipr_cmd:    ipr command struct
8027 *
8028 * Description: This function is used in adapter reset processing
8029 * for timing events. If the reset_cmd pointer in the IOA
8030 * config struct is not this adapter's we are doing nested
8031 * resets and fail_all_ops will take care of freeing the
8032 * command block.
8033 *
8034 * Return value:
8035 *      none
8036 **/
8037static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8038{
8039        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8040        unsigned long lock_flags = 0;
8041
8042        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8043
8044        if (ioa_cfg->reset_cmd == ipr_cmd) {
8045                list_del(&ipr_cmd->queue);
8046                ipr_cmd->done(ipr_cmd);
8047        }
8048
8049        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8050}
8051
8052/**
8053 * ipr_reset_start_timer - Start a timer for adapter reset job
8054 * @ipr_cmd:    ipr command struct
8055 * @timeout:    timeout value
8056 *
8057 * Description: This function is used in adapter reset processing
8058 * for timing events. If the reset_cmd pointer in the IOA
8059 * config struct is not this adapter's we are doing nested
8060 * resets and fail_all_ops will take care of freeing the
8061 * command block.
8062 *
8063 * Return value:
8064 *      none
8065 **/
8066static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8067                                  unsigned long timeout)
8068{
8069
8070        ENTER;
8071        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8072        ipr_cmd->done = ipr_reset_ioa_job;
8073
8074        ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8075        ipr_cmd->timer.expires = jiffies + timeout;
8076        ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8077        add_timer(&ipr_cmd->timer);
8078}
8079
8080/**
8081 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8082 * @ioa_cfg:    ioa cfg struct
8083 *
8084 * Return value:
8085 *      nothing
8086 **/
8087static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8088{
8089        struct ipr_hrr_queue *hrrq;
8090
8091        for_each_hrrq(hrrq, ioa_cfg) {
8092                spin_lock(&hrrq->_lock);
8093                memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8094
8095                /* Initialize Host RRQ pointers */
8096                hrrq->hrrq_start = hrrq->host_rrq;
8097                hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8098                hrrq->hrrq_curr = hrrq->hrrq_start;
8099                hrrq->toggle_bit = 1;
8100                spin_unlock(&hrrq->_lock);
8101        }
8102        wmb();
8103
8104        ioa_cfg->identify_hrrq_index = 0;
8105        if (ioa_cfg->hrrq_num == 1)
8106                atomic_set(&ioa_cfg->hrrq_index, 0);
8107        else
8108                atomic_set(&ioa_cfg->hrrq_index, 1);
8109
8110        /* Zero out config table */
8111        memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8112}
8113
8114/**
8115 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8116 * @ipr_cmd:    ipr command struct
8117 *
8118 * Return value:
8119 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8120 **/
8121static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8122{
8123        unsigned long stage, stage_time;
8124        u32 feedback;
8125        volatile u32 int_reg;
8126        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8127        u64 maskval = 0;
8128
8129        feedback = readl(ioa_cfg->regs.init_feedback_reg);
8130        stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8131        stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8132
8133        ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8134
8135        /* sanity check the stage_time value */
8136        if (stage_time == 0)
8137                stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8138        else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8139                stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8140        else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8141                stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8142
8143        if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8144                writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8145                int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8146                stage_time = ioa_cfg->transop_timeout;
8147                ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8148        } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8149                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8150                if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8151                        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8152                        maskval = IPR_PCII_IPL_STAGE_CHANGE;
8153                        maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8154                        writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8155                        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8156                        return IPR_RC_JOB_CONTINUE;
8157                }
8158        }
8159
8160        ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8161        ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8162        ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8163        ipr_cmd->done = ipr_reset_ioa_job;
8164        add_timer(&ipr_cmd->timer);
8165
8166        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8167
8168        return IPR_RC_JOB_RETURN;
8169}
8170
8171/**
8172 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8173 * @ipr_cmd:    ipr command struct
8174 *
8175 * This function reinitializes some control blocks and
8176 * enables destructive diagnostics on the adapter.
8177 *
8178 * Return value:
8179 *      IPR_RC_JOB_RETURN
8180 **/
8181static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8182{
8183        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8184        volatile u32 int_reg;
8185        volatile u64 maskval;
8186        int i;
8187
8188        ENTER;
8189        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8190        ipr_init_ioa_mem(ioa_cfg);
8191
8192        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8193                spin_lock(&ioa_cfg->hrrq[i]._lock);
8194                ioa_cfg->hrrq[i].allow_interrupts = 1;
8195                spin_unlock(&ioa_cfg->hrrq[i]._lock);
8196        }
8197        wmb();
8198        if (ioa_cfg->sis64) {
8199                /* Set the adapter to the correct endian mode. */
8200                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8201                int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8202        }
8203
8204        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8205
8206        if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8207                writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8208                       ioa_cfg->regs.clr_interrupt_mask_reg32);
8209                int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8210                return IPR_RC_JOB_CONTINUE;
8211        }
8212
8213        /* Enable destructive diagnostics on IOA */
8214        writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8215
8216        if (ioa_cfg->sis64) {
8217                maskval = IPR_PCII_IPL_STAGE_CHANGE;
8218                maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8219                writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8220        } else
8221                writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8222
8223        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8224
8225        dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8226
8227        if (ioa_cfg->sis64) {
8228                ipr_cmd->job_step = ipr_reset_next_stage;
8229                return IPR_RC_JOB_CONTINUE;
8230        }
8231
8232        ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8233        ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8234        ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8235        ipr_cmd->done = ipr_reset_ioa_job;
8236        add_timer(&ipr_cmd->timer);
8237        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8238
8239        LEAVE;
8240        return IPR_RC_JOB_RETURN;
8241}
8242
8243/**
8244 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8245 * @ipr_cmd:    ipr command struct
8246 *
8247 * This function is invoked when an adapter dump has run out
8248 * of processing time.
8249 *
8250 * Return value:
8251 *      IPR_RC_JOB_CONTINUE
8252 **/
8253static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8254{
8255        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8256
8257        if (ioa_cfg->sdt_state == GET_DUMP)
8258                ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8259        else if (ioa_cfg->sdt_state == READ_DUMP)
8260                ioa_cfg->sdt_state = ABORT_DUMP;
8261
8262        ioa_cfg->dump_timeout = 1;
8263        ipr_cmd->job_step = ipr_reset_alert;
8264
8265        return IPR_RC_JOB_CONTINUE;
8266}
8267
8268/**
8269 * ipr_unit_check_no_data - Log a unit check/no data error log
8270 * @ioa_cfg:            ioa config struct
8271 *
8272 * Logs an error indicating the adapter unit checked, but for some
8273 * reason, we were unable to fetch the unit check buffer.
8274 *
8275 * Return value:
8276 *      nothing
8277 **/
8278static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8279{
8280        ioa_cfg->errors_logged++;
8281        dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8282}
8283
8284/**
8285 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8286 * @ioa_cfg:            ioa config struct
8287 *
8288 * Fetches the unit check buffer from the adapter by clocking the data
8289 * through the mailbox register.
8290 *
8291 * Return value:
8292 *      nothing
8293 **/
8294static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8295{
8296        unsigned long mailbox;
8297        struct ipr_hostrcb *hostrcb;
8298        struct ipr_uc_sdt sdt;
8299        int rc, length;
8300        u32 ioasc;
8301
8302        mailbox = readl(ioa_cfg->ioa_mailbox);
8303
8304        if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8305                ipr_unit_check_no_data(ioa_cfg);
8306                return;
8307        }
8308
8309        memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8310        rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8311                                        (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8312
8313        if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8314            ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8315            (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8316                ipr_unit_check_no_data(ioa_cfg);
8317                return;
8318        }
8319
8320        /* Find length of the first sdt entry (UC buffer) */
8321        if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8322                length = be32_to_cpu(sdt.entry[0].end_token);
8323        else
8324                length = (be32_to_cpu(sdt.entry[0].end_token) -
8325                          be32_to_cpu(sdt.entry[0].start_token)) &
8326                          IPR_FMT2_MBX_ADDR_MASK;
8327
8328        hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8329                             struct ipr_hostrcb, queue);
8330        list_del(&hostrcb->queue);
8331        memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8332
8333        rc = ipr_get_ldump_data_section(ioa_cfg,
8334                                        be32_to_cpu(sdt.entry[0].start_token),
8335                                        (__be32 *)&hostrcb->hcam,
8336                                        min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8337
8338        if (!rc) {
8339                ipr_handle_log_data(ioa_cfg, hostrcb);
8340                ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8341                if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8342                    ioa_cfg->sdt_state == GET_DUMP)
8343                        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8344        } else
8345                ipr_unit_check_no_data(ioa_cfg);
8346
8347        list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8348}
8349
8350/**
8351 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8352 * @ipr_cmd:    ipr command struct
8353 *
8354 * Description: This function will call to get the unit check buffer.
8355 *
8356 * Return value:
8357 *      IPR_RC_JOB_RETURN
8358 **/
8359static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8360{
8361        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8362
8363        ENTER;
8364        ioa_cfg->ioa_unit_checked = 0;
8365        ipr_get_unit_check_buffer(ioa_cfg);
8366        ipr_cmd->job_step = ipr_reset_alert;
8367        ipr_reset_start_timer(ipr_cmd, 0);
8368
8369        LEAVE;
8370        return IPR_RC_JOB_RETURN;
8371}
8372
8373static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8374{
8375        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8376
8377        ENTER;
8378
8379        if (ioa_cfg->sdt_state != GET_DUMP)
8380                return IPR_RC_JOB_RETURN;
8381
8382        if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8383            (readl(ioa_cfg->regs.sense_interrupt_reg) &
8384             IPR_PCII_MAILBOX_STABLE)) {
8385
8386                if (!ipr_cmd->u.time_left)
8387                        dev_err(&ioa_cfg->pdev->dev,
8388                                "Timed out waiting for Mailbox register.\n");
8389
8390                ioa_cfg->sdt_state = READ_DUMP;
8391                ioa_cfg->dump_timeout = 0;
8392                if (ioa_cfg->sis64)
8393                        ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8394                else
8395                        ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8396                ipr_cmd->job_step = ipr_reset_wait_for_dump;
8397                schedule_work(&ioa_cfg->work_q);
8398
8399        } else {
8400                ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8401                ipr_reset_start_timer(ipr_cmd,
8402                                      IPR_CHECK_FOR_RESET_TIMEOUT);
8403        }
8404
8405        LEAVE;
8406        return IPR_RC_JOB_RETURN;
8407}
8408
8409/**
8410 * ipr_reset_restore_cfg_space - Restore PCI config space.
8411 * @ipr_cmd:    ipr command struct
8412 *
8413 * Description: This function restores the saved PCI config space of
8414 * the adapter, fails all outstanding ops back to the callers, and
8415 * fetches the dump/unit check if applicable to this reset.
8416 *
8417 * Return value:
8418 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8419 **/
8420static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8421{
8422        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8423        u32 int_reg;
8424
8425        ENTER;
8426        ioa_cfg->pdev->state_saved = true;
8427        pci_restore_state(ioa_cfg->pdev);
8428
8429        if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8430                ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8431                return IPR_RC_JOB_CONTINUE;
8432        }
8433
8434        ipr_fail_all_ops(ioa_cfg);
8435
8436        if (ioa_cfg->sis64) {
8437                /* Set the adapter to the correct endian mode. */
8438                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8439                int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8440        }
8441
8442        if (ioa_cfg->ioa_unit_checked) {
8443                if (ioa_cfg->sis64) {
8444                        ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8445                        ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8446                        return IPR_RC_JOB_RETURN;
8447                } else {
8448                        ioa_cfg->ioa_unit_checked = 0;
8449                        ipr_get_unit_check_buffer(ioa_cfg);
8450                        ipr_cmd->job_step = ipr_reset_alert;
8451                        ipr_reset_start_timer(ipr_cmd, 0);
8452                        return IPR_RC_JOB_RETURN;
8453                }
8454        }
8455
8456        if (ioa_cfg->in_ioa_bringdown) {
8457                ipr_cmd->job_step = ipr_ioa_bringdown_done;
8458        } else if (ioa_cfg->sdt_state == GET_DUMP) {
8459                ipr_cmd->job_step = ipr_dump_mailbox_wait;
8460                ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8461        } else {
8462                ipr_cmd->job_step = ipr_reset_enable_ioa;
8463        }
8464
8465        LEAVE;
8466        return IPR_RC_JOB_CONTINUE;
8467}
8468
8469/**
8470 * ipr_reset_bist_done - BIST has completed on the adapter.
8471 * @ipr_cmd:    ipr command struct
8472 *
8473 * Description: Unblock config space and resume the reset process.
8474 *
8475 * Return value:
8476 *      IPR_RC_JOB_CONTINUE
8477 **/
8478static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8479{
8480        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8481
8482        ENTER;
8483        if (ioa_cfg->cfg_locked)
8484                pci_cfg_access_unlock(ioa_cfg->pdev);
8485        ioa_cfg->cfg_locked = 0;
8486        ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8487        LEAVE;
8488        return IPR_RC_JOB_CONTINUE;
8489}
8490
8491/**
8492 * ipr_reset_start_bist - Run BIST on the adapter.
8493 * @ipr_cmd:    ipr command struct
8494 *
8495 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8496 *
8497 * Return value:
8498 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8499 **/
8500static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8501{
8502        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8503        int rc = PCIBIOS_SUCCESSFUL;
8504
8505        ENTER;
8506        if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8507                writel(IPR_UPROCI_SIS64_START_BIST,
8508                       ioa_cfg->regs.set_uproc_interrupt_reg32);
8509        else
8510                rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8511
8512        if (rc == PCIBIOS_SUCCESSFUL) {
8513                ipr_cmd->job_step = ipr_reset_bist_done;
8514                ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8515                rc = IPR_RC_JOB_RETURN;
8516        } else {
8517                if (ioa_cfg->cfg_locked)
8518                        pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8519                ioa_cfg->cfg_locked = 0;
8520                ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8521                rc = IPR_RC_JOB_CONTINUE;
8522        }
8523
8524        LEAVE;
8525        return rc;
8526}
8527
8528/**
8529 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8530 * @ipr_cmd:    ipr command struct
8531 *
8532 * Description: This clears PCI reset to the adapter and delays two seconds.
8533 *
8534 * Return value:
8535 *      IPR_RC_JOB_RETURN
8536 **/
8537static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8538{
8539        ENTER;
8540        ipr_cmd->job_step = ipr_reset_bist_done;
8541        ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8542        LEAVE;
8543        return IPR_RC_JOB_RETURN;
8544}
8545
8546/**
8547 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8548 * @work:       work struct
8549 *
8550 * Description: This pulses warm reset to a slot.
8551 *
8552 **/
8553static void ipr_reset_reset_work(struct work_struct *work)
8554{
8555        struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8556        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8557        struct pci_dev *pdev = ioa_cfg->pdev;
8558        unsigned long lock_flags = 0;
8559
8560        ENTER;
8561        pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8562        msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8563        pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8564
8565        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8566        if (ioa_cfg->reset_cmd == ipr_cmd)
8567                ipr_reset_ioa_job(ipr_cmd);
8568        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8569        LEAVE;
8570}
8571
8572/**
8573 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8574 * @ipr_cmd:    ipr command struct
8575 *
8576 * Description: This asserts PCI reset to the adapter.
8577 *
8578 * Return value:
8579 *      IPR_RC_JOB_RETURN
8580 **/
8581static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8582{
8583        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8584
8585        ENTER;
8586        INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8587        queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8588        ipr_cmd->job_step = ipr_reset_slot_reset_done;
8589        LEAVE;
8590        return IPR_RC_JOB_RETURN;
8591}
8592
8593/**
8594 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8595 * @ipr_cmd:    ipr command struct
8596 *
8597 * Description: This attempts to block config access to the IOA.
8598 *
8599 * Return value:
8600 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8601 **/
8602static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8603{
8604        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8605        int rc = IPR_RC_JOB_CONTINUE;
8606
8607        if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8608                ioa_cfg->cfg_locked = 1;
8609                ipr_cmd->job_step = ioa_cfg->reset;
8610        } else {
8611                if (ipr_cmd->u.time_left) {
8612                        rc = IPR_RC_JOB_RETURN;
8613                        ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8614                        ipr_reset_start_timer(ipr_cmd,
8615                                              IPR_CHECK_FOR_RESET_TIMEOUT);
8616                } else {
8617                        ipr_cmd->job_step = ioa_cfg->reset;
8618                        dev_err(&ioa_cfg->pdev->dev,
8619                                "Timed out waiting to lock config access. Resetting anyway.\n");
8620                }
8621        }
8622
8623        return rc;
8624}
8625
8626/**
8627 * ipr_reset_block_config_access - Block config access to the IOA
8628 * @ipr_cmd:    ipr command struct
8629 *
8630 * Description: This attempts to block config access to the IOA
8631 *
8632 * Return value:
8633 *      IPR_RC_JOB_CONTINUE
8634 **/
8635static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8636{
8637        ipr_cmd->ioa_cfg->cfg_locked = 0;
8638        ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8639        ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8640        return IPR_RC_JOB_CONTINUE;
8641}
8642
8643/**
8644 * ipr_reset_allowed - Query whether or not IOA can be reset
8645 * @ioa_cfg:    ioa config struct
8646 *
8647 * Return value:
8648 *      0 if reset not allowed / non-zero if reset is allowed
8649 **/
8650static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8651{
8652        volatile u32 temp_reg;
8653
8654        temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8655        return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8656}
8657
8658/**
8659 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8660 * @ipr_cmd:    ipr command struct
8661 *
8662 * Description: This function waits for adapter permission to run BIST,
8663 * then runs BIST. If the adapter does not give permission after a
8664 * reasonable time, we will reset the adapter anyway. The impact of
8665 * resetting the adapter without warning the adapter is the risk of
8666 * losing the persistent error log on the adapter. If the adapter is
8667 * reset while it is writing to the flash on the adapter, the flash
8668 * segment will have bad ECC and be zeroed.
8669 *
8670 * Return value:
8671 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8672 **/
8673static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8674{
8675        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8676        int rc = IPR_RC_JOB_RETURN;
8677
8678        if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8679                ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8680                ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8681        } else {
8682                ipr_cmd->job_step = ipr_reset_block_config_access;
8683                rc = IPR_RC_JOB_CONTINUE;
8684        }
8685
8686        return rc;
8687}
8688
8689/**
8690 * ipr_reset_alert - Alert the adapter of a pending reset
8691 * @ipr_cmd:    ipr command struct
8692 *
8693 * Description: This function alerts the adapter that it will be reset.
8694 * If memory space is not currently enabled, proceed directly
8695 * to running BIST on the adapter. The timer must always be started
8696 * so we guarantee we do not run BIST from ipr_isr.
8697 *
8698 * Return value:
8699 *      IPR_RC_JOB_RETURN
8700 **/
8701static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8702{
8703        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8704        u16 cmd_reg;
8705        int rc;
8706
8707        ENTER;
8708        rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8709
8710        if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8711                ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8712                writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8713                ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8714        } else {
8715                ipr_cmd->job_step = ipr_reset_block_config_access;
8716        }
8717
8718        ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8719        ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8720
8721        LEAVE;
8722        return IPR_RC_JOB_RETURN;
8723}
8724
8725/**
8726 * ipr_reset_quiesce_done - Complete IOA disconnect
8727 * @ipr_cmd:    ipr command struct
8728 *
8729 * Description: Freeze the adapter to complete quiesce processing
8730 *
8731 * Return value:
8732 *      IPR_RC_JOB_CONTINUE
8733 **/
8734static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8735{
8736        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8737
8738        ENTER;
8739        ipr_cmd->job_step = ipr_ioa_bringdown_done;
8740        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8741        LEAVE;
8742        return IPR_RC_JOB_CONTINUE;
8743}
8744
8745/**
8746 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8747 * @ipr_cmd:    ipr command struct
8748 *
8749 * Description: Ensure nothing is outstanding to the IOA and
8750 *                      proceed with IOA disconnect. Otherwise reset the IOA.
8751 *
8752 * Return value:
8753 *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8754 **/
8755static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8756{
8757        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8758        struct ipr_cmnd *loop_cmd;
8759        struct ipr_hrr_queue *hrrq;
8760        int rc = IPR_RC_JOB_CONTINUE;
8761        int count = 0;
8762
8763        ENTER;
8764        ipr_cmd->job_step = ipr_reset_quiesce_done;
8765
8766        for_each_hrrq(hrrq, ioa_cfg) {
8767                spin_lock(&hrrq->_lock);
8768                list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8769                        count++;
8770                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8771                        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8772                        rc = IPR_RC_JOB_RETURN;
8773                        break;
8774                }
8775                spin_unlock(&hrrq->_lock);
8776
8777                if (count)
8778                        break;
8779        }
8780
8781        LEAVE;
8782        return rc;
8783}
8784
8785/**
8786 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8787 * @ipr_cmd:    ipr command struct
8788 *
8789 * Description: Cancel any oustanding HCAMs to the IOA.
8790 *
8791 * Return value:
8792 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8793 **/
8794static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8795{
8796        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8797        int rc = IPR_RC_JOB_CONTINUE;
8798        struct ipr_cmd_pkt *cmd_pkt;
8799        struct ipr_cmnd *hcam_cmd;
8800        struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8801
8802        ENTER;
8803        ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8804
8805        if (!hrrq->ioa_is_dead) {
8806                if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8807                        list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8808                                if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8809                                        continue;
8810
8811                                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8812                                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8813                                cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8814                                cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8815                                cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8816                                cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8817                                cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8818                                cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8819                                cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8820                                cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8821                                cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8822                                cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8823                                cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8824                                cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8825
8826                                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8827                                           IPR_CANCEL_TIMEOUT);
8828
8829                                rc = IPR_RC_JOB_RETURN;
8830                                ipr_cmd->job_step = ipr_reset_cancel_hcam;
8831                                break;
8832                        }
8833                }
8834        } else
8835                ipr_cmd->job_step = ipr_reset_alert;
8836
8837        LEAVE;
8838        return rc;
8839}
8840
8841/**
8842 * ipr_reset_ucode_download_done - Microcode download completion
8843 * @ipr_cmd:    ipr command struct
8844 *
8845 * Description: This function unmaps the microcode download buffer.
8846 *
8847 * Return value:
8848 *      IPR_RC_JOB_CONTINUE
8849 **/
8850static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8851{
8852        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8853        struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8854
8855        dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8856                     sglist->num_sg, DMA_TO_DEVICE);
8857
8858        ipr_cmd->job_step = ipr_reset_alert;
8859        return IPR_RC_JOB_CONTINUE;
8860}
8861
8862/**
8863 * ipr_reset_ucode_download - Download microcode to the adapter
8864 * @ipr_cmd:    ipr command struct
8865 *
8866 * Description: This function checks to see if it there is microcode
8867 * to download to the adapter. If there is, a download is performed.
8868 *
8869 * Return value:
8870 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8871 **/
8872static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8873{
8874        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8875        struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8876
8877        ENTER;
8878        ipr_cmd->job_step = ipr_reset_alert;
8879
8880        if (!sglist)
8881                return IPR_RC_JOB_CONTINUE;
8882
8883        ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8884        ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8885        ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8886        ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8887        ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8888        ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8889        ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8890
8891        if (ioa_cfg->sis64)
8892                ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8893        else
8894                ipr_build_ucode_ioadl(ipr_cmd, sglist);
8895        ipr_cmd->job_step = ipr_reset_ucode_download_done;
8896
8897        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8898                   IPR_WRITE_BUFFER_TIMEOUT);
8899
8900        LEAVE;
8901        return IPR_RC_JOB_RETURN;
8902}
8903
8904/**
8905 * ipr_reset_shutdown_ioa - Shutdown the adapter
8906 * @ipr_cmd:    ipr command struct
8907 *
8908 * Description: This function issues an adapter shutdown of the
8909 * specified type to the specified adapter as part of the
8910 * adapter reset job.
8911 *
8912 * Return value:
8913 *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8914 **/
8915static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8916{
8917        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8918        enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8919        unsigned long timeout;
8920        int rc = IPR_RC_JOB_CONTINUE;
8921
8922        ENTER;
8923        if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8924                ipr_cmd->job_step = ipr_reset_cancel_hcam;
8925        else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8926                        !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8927                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8928                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8929                ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8930                ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8931
8932                if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8933                        timeout = IPR_SHUTDOWN_TIMEOUT;
8934                else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8935                        timeout = IPR_INTERNAL_TIMEOUT;
8936                else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8937                        timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8938                else
8939                        timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8940
8941                ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8942
8943                rc = IPR_RC_JOB_RETURN;
8944                ipr_cmd->job_step = ipr_reset_ucode_download;
8945        } else
8946                ipr_cmd->job_step = ipr_reset_alert;
8947
8948        LEAVE;
8949        return rc;
8950}
8951
8952/**
8953 * ipr_reset_ioa_job - Adapter reset job
8954 * @ipr_cmd:    ipr command struct
8955 *
8956 * Description: This function is the job router for the adapter reset job.
8957 *
8958 * Return value:
8959 *      none
8960 **/
8961static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8962{
8963        u32 rc, ioasc;
8964        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8965
8966        do {
8967                ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8968
8969                if (ioa_cfg->reset_cmd != ipr_cmd) {
8970                        /*
8971                         * We are doing nested adapter resets and this is
8972                         * not the current reset job.
8973                         */
8974                        list_add_tail(&ipr_cmd->queue,
8975                                        &ipr_cmd->hrrq->hrrq_free_q);
8976                        return;
8977                }
8978
8979                if (IPR_IOASC_SENSE_KEY(ioasc)) {
8980                        rc = ipr_cmd->job_step_failed(ipr_cmd);
8981                        if (rc == IPR_RC_JOB_RETURN)
8982                                return;
8983                }
8984
8985                ipr_reinit_ipr_cmnd(ipr_cmd);
8986                ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8987                rc = ipr_cmd->job_step(ipr_cmd);
8988        } while (rc == IPR_RC_JOB_CONTINUE);
8989}
8990
8991/**
8992 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8993 * @ioa_cfg:            ioa config struct
8994 * @job_step:           first job step of reset job
8995 * @shutdown_type:      shutdown type
8996 *
8997 * Description: This function will initiate the reset of the given adapter
8998 * starting at the selected job step.
8999 * If the caller needs to wait on the completion of the reset,
9000 * the caller must sleep on the reset_wait_q.
9001 *
9002 * Return value:
9003 *      none
9004 **/
9005static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9006                                    int (*job_step) (struct ipr_cmnd *),
9007                                    enum ipr_shutdown_type shutdown_type)
9008{
9009        struct ipr_cmnd *ipr_cmd;
9010        int i;
9011
9012        ioa_cfg->in_reset_reload = 1;
9013        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9014                spin_lock(&ioa_cfg->hrrq[i]._lock);
9015                ioa_cfg->hrrq[i].allow_cmds = 0;
9016                spin_unlock(&ioa_cfg->hrrq[i]._lock);
9017        }
9018        wmb();
9019        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
9020                scsi_block_requests(ioa_cfg->host);
9021
9022        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9023        ioa_cfg->reset_cmd = ipr_cmd;
9024        ipr_cmd->job_step = job_step;
9025        ipr_cmd->u.shutdown_type = shutdown_type;
9026
9027        ipr_reset_ioa_job(ipr_cmd);
9028}
9029
9030/**
9031 * ipr_initiate_ioa_reset - Initiate an adapter reset
9032 * @ioa_cfg:            ioa config struct
9033 * @shutdown_type:      shutdown type
9034 *
9035 * Description: This function will initiate the reset of the given adapter.
9036 * If the caller needs to wait on the completion of the reset,
9037 * the caller must sleep on the reset_wait_q.
9038 *
9039 * Return value:
9040 *      none
9041 **/
9042static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9043                                   enum ipr_shutdown_type shutdown_type)
9044{
9045        int i;
9046
9047        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9048                return;
9049
9050        if (ioa_cfg->in_reset_reload) {
9051                if (ioa_cfg->sdt_state == GET_DUMP)
9052                        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9053                else if (ioa_cfg->sdt_state == READ_DUMP)
9054                        ioa_cfg->sdt_state = ABORT_DUMP;
9055        }
9056
9057        if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9058                dev_err(&ioa_cfg->pdev->dev,
9059                        "IOA taken offline - error recovery failed\n");
9060
9061                ioa_cfg->reset_retries = 0;
9062                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9063                        spin_lock(&ioa_cfg->hrrq[i]._lock);
9064                        ioa_cfg->hrrq[i].ioa_is_dead = 1;
9065                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
9066                }
9067                wmb();
9068
9069                if (ioa_cfg->in_ioa_bringdown) {
9070                        ioa_cfg->reset_cmd = NULL;
9071                        ioa_cfg->in_reset_reload = 0;
9072                        ipr_fail_all_ops(ioa_cfg);
9073                        wake_up_all(&ioa_cfg->reset_wait_q);
9074
9075                        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9076                                spin_unlock_irq(ioa_cfg->host->host_lock);
9077                                scsi_unblock_requests(ioa_cfg->host);
9078                                spin_lock_irq(ioa_cfg->host->host_lock);
9079                        }
9080                        return;
9081                } else {
9082                        ioa_cfg->in_ioa_bringdown = 1;
9083                        shutdown_type = IPR_SHUTDOWN_NONE;
9084                }
9085        }
9086
9087        _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9088                                shutdown_type);
9089}
9090
9091/**
9092 * ipr_reset_freeze - Hold off all I/O activity
9093 * @ipr_cmd:    ipr command struct
9094 *
9095 * Description: If the PCI slot is frozen, hold off all I/O
9096 * activity; then, as soon as the slot is available again,
9097 * initiate an adapter reset.
9098 */
9099static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9100{
9101        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9102        int i;
9103
9104        /* Disallow new interrupts, avoid loop */
9105        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9106                spin_lock(&ioa_cfg->hrrq[i]._lock);
9107                ioa_cfg->hrrq[i].allow_interrupts = 0;
9108                spin_unlock(&ioa_cfg->hrrq[i]._lock);
9109        }
9110        wmb();
9111        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9112        ipr_cmd->done = ipr_reset_ioa_job;
9113        return IPR_RC_JOB_RETURN;
9114}
9115
9116/**
9117 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9118 * @pdev:       PCI device struct
9119 *
9120 * Description: This routine is called to tell us that the MMIO
9121 * access to the IOA has been restored
9122 */
9123static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9124{
9125        unsigned long flags = 0;
9126        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9127
9128        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9129        if (!ioa_cfg->probe_done)
9130                pci_save_state(pdev);
9131        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9132        return PCI_ERS_RESULT_NEED_RESET;
9133}
9134
9135/**
9136 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9137 * @pdev:       PCI device struct
9138 *
9139 * Description: This routine is called to tell us that the PCI bus
9140 * is down. Can't do anything here, except put the device driver
9141 * into a holding pattern, waiting for the PCI bus to come back.
9142 */
9143static void ipr_pci_frozen(struct pci_dev *pdev)
9144{
9145        unsigned long flags = 0;
9146        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9147
9148        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9149        if (ioa_cfg->probe_done)
9150                _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9151        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9152}
9153
9154/**
9155 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9156 * @pdev:       PCI device struct
9157 *
9158 * Description: This routine is called by the pci error recovery
9159 * code after the PCI slot has been reset, just before we
9160 * should resume normal operations.
9161 */
9162static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9163{
9164        unsigned long flags = 0;
9165        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9166
9167        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9168        if (ioa_cfg->probe_done) {
9169                if (ioa_cfg->needs_warm_reset)
9170                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9171                else
9172                        _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9173                                                IPR_SHUTDOWN_NONE);
9174        } else
9175                wake_up_all(&ioa_cfg->eeh_wait_q);
9176        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9177        return PCI_ERS_RESULT_RECOVERED;
9178}
9179
9180/**
9181 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9182 * @pdev:       PCI device struct
9183 *
9184 * Description: This routine is called when the PCI bus has
9185 * permanently failed.
9186 */
9187static void ipr_pci_perm_failure(struct pci_dev *pdev)
9188{
9189        unsigned long flags = 0;
9190        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9191        int i;
9192
9193        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9194        if (ioa_cfg->probe_done) {
9195                if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9196                        ioa_cfg->sdt_state = ABORT_DUMP;
9197                ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9198                ioa_cfg->in_ioa_bringdown = 1;
9199                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9200                        spin_lock(&ioa_cfg->hrrq[i]._lock);
9201                        ioa_cfg->hrrq[i].allow_cmds = 0;
9202                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
9203                }
9204                wmb();
9205                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9206        } else
9207                wake_up_all(&ioa_cfg->eeh_wait_q);
9208        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9209}
9210
9211/**
9212 * ipr_pci_error_detected - Called when a PCI error is detected.
9213 * @pdev:       PCI device struct
9214 * @state:      PCI channel state
9215 *
9216 * Description: Called when a PCI error is detected.
9217 *
9218 * Return value:
9219 *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9220 */
9221static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9222                                               pci_channel_state_t state)
9223{
9224        switch (state) {
9225        case pci_channel_io_frozen:
9226                ipr_pci_frozen(pdev);
9227                return PCI_ERS_RESULT_CAN_RECOVER;
9228        case pci_channel_io_perm_failure:
9229                ipr_pci_perm_failure(pdev);
9230                return PCI_ERS_RESULT_DISCONNECT;
9231                break;
9232        default:
9233                break;
9234        }
9235        return PCI_ERS_RESULT_NEED_RESET;
9236}
9237
9238/**
9239 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9240 * @ioa_cfg:    ioa cfg struct
9241 *
9242 * Description: This is the second phase of adapter intialization
9243 * This function takes care of initilizing the adapter to the point
9244 * where it can accept new commands.
9245
9246 * Return value:
9247 *      0 on success / -EIO on failure
9248 **/
9249static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9250{
9251        int rc = 0;
9252        unsigned long host_lock_flags = 0;
9253
9254        ENTER;
9255        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9256        dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9257        ioa_cfg->probe_done = 1;
9258        if (ioa_cfg->needs_hard_reset) {
9259                ioa_cfg->needs_hard_reset = 0;
9260                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9261        } else
9262                _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9263                                        IPR_SHUTDOWN_NONE);
9264        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9265
9266        LEAVE;
9267        return rc;
9268}
9269
9270/**
9271 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9272 * @ioa_cfg:    ioa config struct
9273 *
9274 * Return value:
9275 *      none
9276 **/
9277static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9278{
9279        int i;
9280
9281        if (ioa_cfg->ipr_cmnd_list) {
9282                for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9283                        if (ioa_cfg->ipr_cmnd_list[i])
9284                                dma_pool_free(ioa_cfg->ipr_cmd_pool,
9285                                              ioa_cfg->ipr_cmnd_list[i],
9286                                              ioa_cfg->ipr_cmnd_list_dma[i]);
9287
9288                        ioa_cfg->ipr_cmnd_list[i] = NULL;
9289                }
9290        }
9291
9292        if (ioa_cfg->ipr_cmd_pool)
9293                dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9294
9295        kfree(ioa_cfg->ipr_cmnd_list);
9296        kfree(ioa_cfg->ipr_cmnd_list_dma);
9297        ioa_cfg->ipr_cmnd_list = NULL;
9298        ioa_cfg->ipr_cmnd_list_dma = NULL;
9299        ioa_cfg->ipr_cmd_pool = NULL;
9300}
9301
9302/**
9303 * ipr_free_mem - Frees memory allocated for an adapter
9304 * @ioa_cfg:    ioa cfg struct
9305 *
9306 * Return value:
9307 *      nothing
9308 **/
9309static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9310{
9311        int i;
9312
9313        kfree(ioa_cfg->res_entries);
9314        dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9315                          ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9316        ipr_free_cmd_blks(ioa_cfg);
9317
9318        for (i = 0; i < ioa_cfg->hrrq_num; i++)
9319                dma_free_coherent(&ioa_cfg->pdev->dev,
9320                                  sizeof(u32) * ioa_cfg->hrrq[i].size,
9321                                  ioa_cfg->hrrq[i].host_rrq,
9322                                  ioa_cfg->hrrq[i].host_rrq_dma);
9323
9324        dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9325                          ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9326
9327        for (i = 0; i < IPR_NUM_HCAMS; i++) {
9328                dma_free_coherent(&ioa_cfg->pdev->dev,
9329                                  sizeof(struct ipr_hostrcb),
9330                                  ioa_cfg->hostrcb[i],
9331                                  ioa_cfg->hostrcb_dma[i]);
9332        }
9333
9334        ipr_free_dump(ioa_cfg);
9335        kfree(ioa_cfg->trace);
9336}
9337
9338/**
9339 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9340 * @ioa_cfg:    ipr cfg struct
9341 *
9342 * This function frees all allocated IRQs for the
9343 * specified adapter.
9344 *
9345 * Return value:
9346 *      none
9347 **/
9348static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9349{
9350        struct pci_dev *pdev = ioa_cfg->pdev;
9351
9352        if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9353            ioa_cfg->intr_flag == IPR_USE_MSIX) {
9354                int i;
9355                for (i = 0; i < ioa_cfg->nvectors; i++)
9356                        free_irq(ioa_cfg->vectors_info[i].vec,
9357                                 &ioa_cfg->hrrq[i]);
9358        } else
9359                free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9360
9361        if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9362                pci_disable_msi(pdev);
9363                ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9364        } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9365                pci_disable_msix(pdev);
9366                ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9367        }
9368}
9369
9370/**
9371 * ipr_free_all_resources - Free all allocated resources for an adapter.
9372 * @ipr_cmd:    ipr command struct
9373 *
9374 * This function frees all allocated resources for the
9375 * specified adapter.
9376 *
9377 * Return value:
9378 *      none
9379 **/
9380static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9381{
9382        struct pci_dev *pdev = ioa_cfg->pdev;
9383
9384        ENTER;
9385        ipr_free_irqs(ioa_cfg);
9386        if (ioa_cfg->reset_work_q)
9387                destroy_workqueue(ioa_cfg->reset_work_q);
9388        iounmap(ioa_cfg->hdw_dma_regs);
9389        pci_release_regions(pdev);
9390        ipr_free_mem(ioa_cfg);
9391        scsi_host_put(ioa_cfg->host);
9392        pci_disable_device(pdev);
9393        LEAVE;
9394}
9395
9396/**
9397 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9398 * @ioa_cfg:    ioa config struct
9399 *
9400 * Return value:
9401 *      0 on success / -ENOMEM on allocation failure
9402 **/
9403static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9404{
9405        struct ipr_cmnd *ipr_cmd;
9406        struct ipr_ioarcb *ioarcb;
9407        dma_addr_t dma_addr;
9408        int i, entries_each_hrrq, hrrq_id = 0;
9409
9410        ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9411                                                sizeof(struct ipr_cmnd), 512, 0);
9412
9413        if (!ioa_cfg->ipr_cmd_pool)
9414                return -ENOMEM;
9415
9416        ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9417        ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9418
9419        if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9420                ipr_free_cmd_blks(ioa_cfg);
9421                return -ENOMEM;
9422        }
9423
9424        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9425                if (ioa_cfg->hrrq_num > 1) {
9426                        if (i == 0) {
9427                                entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9428                                ioa_cfg->hrrq[i].min_cmd_id = 0;
9429                                        ioa_cfg->hrrq[i].max_cmd_id =
9430                                                (entries_each_hrrq - 1);
9431                        } else {
9432                                entries_each_hrrq =
9433                                        IPR_NUM_BASE_CMD_BLKS/
9434                                        (ioa_cfg->hrrq_num - 1);
9435                                ioa_cfg->hrrq[i].min_cmd_id =
9436                                        IPR_NUM_INTERNAL_CMD_BLKS +
9437                                        (i - 1) * entries_each_hrrq;
9438                                ioa_cfg->hrrq[i].max_cmd_id =
9439                                        (IPR_NUM_INTERNAL_CMD_BLKS +
9440                                        i * entries_each_hrrq - 1);
9441                        }
9442                } else {
9443                        entries_each_hrrq = IPR_NUM_CMD_BLKS;
9444                        ioa_cfg->hrrq[i].min_cmd_id = 0;
9445                        ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9446                }
9447                ioa_cfg->hrrq[i].size = entries_each_hrrq;
9448        }
9449
9450        BUG_ON(ioa_cfg->hrrq_num == 0);
9451
9452        i = IPR_NUM_CMD_BLKS -
9453                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9454        if (i > 0) {
9455                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9456                ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9457        }
9458
9459        for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9460                ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9461
9462                if (!ipr_cmd) {
9463                        ipr_free_cmd_blks(ioa_cfg);
9464                        return -ENOMEM;
9465                }
9466
9467                memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9468                ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9469                ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9470
9471                ioarcb = &ipr_cmd->ioarcb;
9472                ipr_cmd->dma_addr = dma_addr;
9473                if (ioa_cfg->sis64)
9474                        ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9475                else
9476                        ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9477
9478                ioarcb->host_response_handle = cpu_to_be32(i << 2);
9479                if (ioa_cfg->sis64) {
9480                        ioarcb->u.sis64_addr_data.data_ioadl_addr =
9481                                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9482                        ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9483                                cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9484                } else {
9485                        ioarcb->write_ioadl_addr =
9486                                cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9487                        ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9488                        ioarcb->ioasa_host_pci_addr =
9489                                cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9490                }
9491                ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9492                ipr_cmd->cmd_index = i;
9493                ipr_cmd->ioa_cfg = ioa_cfg;
9494                ipr_cmd->sense_buffer_dma = dma_addr +
9495                        offsetof(struct ipr_cmnd, sense_buffer);
9496
9497                ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9498                ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9499                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9500                if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9501                        hrrq_id++;
9502        }
9503
9504        return 0;
9505}
9506
9507/**
9508 * ipr_alloc_mem - Allocate memory for an adapter
9509 * @ioa_cfg:    ioa config struct
9510 *
9511 * Return value:
9512 *      0 on success / non-zero for error
9513 **/
9514static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9515{
9516        struct pci_dev *pdev = ioa_cfg->pdev;
9517        int i, rc = -ENOMEM;
9518
9519        ENTER;
9520        ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9521                                       ioa_cfg->max_devs_supported, GFP_KERNEL);
9522
9523        if (!ioa_cfg->res_entries)
9524                goto out;
9525
9526        for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9527                list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9528                ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9529        }
9530
9531        ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9532                                              sizeof(struct ipr_misc_cbs),
9533                                              &ioa_cfg->vpd_cbs_dma,
9534                                              GFP_KERNEL);
9535
9536        if (!ioa_cfg->vpd_cbs)
9537                goto out_free_res_entries;
9538
9539        if (ipr_alloc_cmd_blks(ioa_cfg))
9540                goto out_free_vpd_cbs;
9541
9542        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9543                ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9544                                        sizeof(u32) * ioa_cfg->hrrq[i].size,
9545                                        &ioa_cfg->hrrq[i].host_rrq_dma,
9546                                        GFP_KERNEL);
9547
9548                if (!ioa_cfg->hrrq[i].host_rrq)  {
9549                        while (--i > 0)
9550                                dma_free_coherent(&pdev->dev,
9551                                        sizeof(u32) * ioa_cfg->hrrq[i].size,
9552                                        ioa_cfg->hrrq[i].host_rrq,
9553                                        ioa_cfg->hrrq[i].host_rrq_dma);
9554                        goto out_ipr_free_cmd_blocks;
9555                }
9556                ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9557        }
9558
9559        ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9560                                                  ioa_cfg->cfg_table_size,
9561                                                  &ioa_cfg->cfg_table_dma,
9562                                                  GFP_KERNEL);
9563
9564        if (!ioa_cfg->u.cfg_table)
9565                goto out_free_host_rrq;
9566
9567        for (i = 0; i < IPR_NUM_HCAMS; i++) {
9568                ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9569                                                         sizeof(struct ipr_hostrcb),
9570                                                         &ioa_cfg->hostrcb_dma[i],
9571                                                         GFP_KERNEL);
9572
9573                if (!ioa_cfg->hostrcb[i])
9574                        goto out_free_hostrcb_dma;
9575
9576                ioa_cfg->hostrcb[i]->hostrcb_dma =
9577                        ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9578                ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9579                list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9580        }
9581
9582        ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9583                                 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9584
9585        if (!ioa_cfg->trace)
9586                goto out_free_hostrcb_dma;
9587
9588        rc = 0;
9589out:
9590        LEAVE;
9591        return rc;
9592
9593out_free_hostrcb_dma:
9594        while (i-- > 0) {
9595                dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9596                                  ioa_cfg->hostrcb[i],
9597                                  ioa_cfg->hostrcb_dma[i]);
9598        }
9599        dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9600                          ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9601out_free_host_rrq:
9602        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9603                dma_free_coherent(&pdev->dev,
9604                                  sizeof(u32) * ioa_cfg->hrrq[i].size,
9605                                  ioa_cfg->hrrq[i].host_rrq,
9606                                  ioa_cfg->hrrq[i].host_rrq_dma);
9607        }
9608out_ipr_free_cmd_blocks:
9609        ipr_free_cmd_blks(ioa_cfg);
9610out_free_vpd_cbs:
9611        dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9612                          ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9613out_free_res_entries:
9614        kfree(ioa_cfg->res_entries);
9615        goto out;
9616}
9617
9618/**
9619 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9620 * @ioa_cfg:    ioa config struct
9621 *
9622 * Return value:
9623 *      none
9624 **/
9625static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9626{
9627        int i;
9628
9629        for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9630                ioa_cfg->bus_attr[i].bus = i;
9631                ioa_cfg->bus_attr[i].qas_enabled = 0;
9632                ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9633                if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9634                        ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9635                else
9636                        ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9637        }
9638}
9639
9640/**
9641 * ipr_init_regs - Initialize IOA registers
9642 * @ioa_cfg:    ioa config struct
9643 *
9644 * Return value:
9645 *      none
9646 **/
9647static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9648{
9649        const struct ipr_interrupt_offsets *p;
9650        struct ipr_interrupts *t;
9651        void __iomem *base;
9652
9653        p = &ioa_cfg->chip_cfg->regs;
9654        t = &ioa_cfg->regs;
9655        base = ioa_cfg->hdw_dma_regs;
9656
9657        t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9658        t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9659        t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9660        t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9661        t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9662        t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9663        t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9664        t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9665        t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9666        t->ioarrin_reg = base + p->ioarrin_reg;
9667        t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9668        t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9669        t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9670        t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9671        t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9672        t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9673
9674        if (ioa_cfg->sis64) {
9675                t->init_feedback_reg = base + p->init_feedback_reg;
9676                t->dump_addr_reg = base + p->dump_addr_reg;
9677                t->dump_data_reg = base + p->dump_data_reg;
9678                t->endian_swap_reg = base + p->endian_swap_reg;
9679        }
9680}
9681
9682/**
9683 * ipr_init_ioa_cfg - Initialize IOA config struct
9684 * @ioa_cfg:    ioa config struct
9685 * @host:               scsi host struct
9686 * @pdev:               PCI dev struct
9687 *
9688 * Return value:
9689 *      none
9690 **/
9691static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9692                             struct Scsi_Host *host, struct pci_dev *pdev)
9693{
9694        int i;
9695
9696        ioa_cfg->host = host;
9697        ioa_cfg->pdev = pdev;
9698        ioa_cfg->log_level = ipr_log_level;
9699        ioa_cfg->doorbell = IPR_DOORBELL;
9700        sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9701        sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9702        sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9703        sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9704        sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9705        sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9706
9707        INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9708        INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9709        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9710        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9711        INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9712        init_waitqueue_head(&ioa_cfg->reset_wait_q);
9713        init_waitqueue_head(&ioa_cfg->msi_wait_q);
9714        init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9715        ioa_cfg->sdt_state = INACTIVE;
9716
9717        ipr_initialize_bus_attr(ioa_cfg);
9718        ioa_cfg->max_devs_supported = ipr_max_devs;
9719
9720        if (ioa_cfg->sis64) {
9721                host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9722                host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9723                if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9724                        ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9725                ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9726                                           + ((sizeof(struct ipr_config_table_entry64)
9727                                               * ioa_cfg->max_devs_supported)));
9728        } else {
9729                host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9730                host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9731                if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9732                        ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9733                ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9734                                           + ((sizeof(struct ipr_config_table_entry)
9735                                               * ioa_cfg->max_devs_supported)));
9736        }
9737
9738        host->max_channel = IPR_VSET_BUS;
9739        host->unique_id = host->host_no;
9740        host->max_cmd_len = IPR_MAX_CDB_LEN;
9741        host->can_queue = ioa_cfg->max_cmds;
9742        pci_set_drvdata(pdev, ioa_cfg);
9743
9744        for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9745                INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9746                INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9747                spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9748                if (i == 0)
9749                        ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9750                else
9751                        ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9752        }
9753}
9754
9755/**
9756 * ipr_get_chip_info - Find adapter chip information
9757 * @dev_id:             PCI device id struct
9758 *
9759 * Return value:
9760 *      ptr to chip information on success / NULL on failure
9761 **/
9762static const struct ipr_chip_t *
9763ipr_get_chip_info(const struct pci_device_id *dev_id)
9764{
9765        int i;
9766
9767        for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9768                if (ipr_chip[i].vendor == dev_id->vendor &&
9769                    ipr_chip[i].device == dev_id->device)
9770                        return &ipr_chip[i];
9771        return NULL;
9772}
9773
9774/**
9775 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9776 *                                              during probe time
9777 * @ioa_cfg:    ioa config struct
9778 *
9779 * Return value:
9780 *      None
9781 **/
9782static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9783{
9784        struct pci_dev *pdev = ioa_cfg->pdev;
9785
9786        if (pci_channel_offline(pdev)) {
9787                wait_event_timeout(ioa_cfg->eeh_wait_q,
9788                                   !pci_channel_offline(pdev),
9789                                   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9790                pci_restore_state(pdev);
9791        }
9792}
9793
9794static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9795{
9796        struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9797        int i, vectors;
9798
9799        for (i = 0; i < ARRAY_SIZE(entries); ++i)
9800                entries[i].entry = i;
9801
9802        vectors = pci_enable_msix_range(ioa_cfg->pdev,
9803                                        entries, 1, ipr_number_of_msix);
9804        if (vectors < 0) {
9805                ipr_wait_for_pci_err_recovery(ioa_cfg);
9806                return vectors;
9807        }
9808
9809        for (i = 0; i < vectors; i++)
9810                ioa_cfg->vectors_info[i].vec = entries[i].vector;
9811        ioa_cfg->nvectors = vectors;
9812
9813        return 0;
9814}
9815
9816static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9817{
9818        int i, vectors;
9819
9820        vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9821        if (vectors < 0) {
9822                ipr_wait_for_pci_err_recovery(ioa_cfg);
9823                return vectors;
9824        }
9825
9826        for (i = 0; i < vectors; i++)
9827                ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9828        ioa_cfg->nvectors = vectors;
9829
9830        return 0;
9831}
9832
9833static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9834{
9835        int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9836
9837        for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9838                snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9839                         "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9840                ioa_cfg->vectors_info[vec_idx].
9841                        desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9842        }
9843}
9844
9845static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9846{
9847        int i, rc;
9848
9849        for (i = 1; i < ioa_cfg->nvectors; i++) {
9850                rc = request_irq(ioa_cfg->vectors_info[i].vec,
9851                        ipr_isr_mhrrq,
9852                        0,
9853                        ioa_cfg->vectors_info[i].desc,
9854                        &ioa_cfg->hrrq[i]);
9855                if (rc) {
9856                        while (--i >= 0)
9857                                free_irq(ioa_cfg->vectors_info[i].vec,
9858                                        &ioa_cfg->hrrq[i]);
9859                        return rc;
9860                }
9861        }
9862        return 0;
9863}
9864
9865/**
9866 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9867 * @pdev:               PCI device struct
9868 *
9869 * Description: Simply set the msi_received flag to 1 indicating that
9870 * Message Signaled Interrupts are supported.
9871 *
9872 * Return value:
9873 *      0 on success / non-zero on failure
9874 **/
9875static irqreturn_t ipr_test_intr(int irq, void *devp)
9876{
9877        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9878        unsigned long lock_flags = 0;
9879        irqreturn_t rc = IRQ_HANDLED;
9880
9881        dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9882        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9883
9884        ioa_cfg->msi_received = 1;
9885        wake_up(&ioa_cfg->msi_wait_q);
9886
9887        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9888        return rc;
9889}
9890
9891/**
9892 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9893 * @pdev:               PCI device struct
9894 *
9895 * Description: The return value from pci_enable_msi_range() can not always be
9896 * trusted.  This routine sets up and initiates a test interrupt to determine
9897 * if the interrupt is received via the ipr_test_intr() service routine.
9898 * If the tests fails, the driver will fall back to LSI.
9899 *
9900 * Return value:
9901 *      0 on success / non-zero on failure
9902 **/
9903static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9904{
9905        int rc;
9906        volatile u32 int_reg;
9907        unsigned long lock_flags = 0;
9908
9909        ENTER;
9910
9911        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9912        init_waitqueue_head(&ioa_cfg->msi_wait_q);
9913        ioa_cfg->msi_received = 0;
9914        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9915        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9916        int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9917        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9918
9919        if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9920                rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9921        else
9922                rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9923        if (rc) {
9924                dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9925                return rc;
9926        } else if (ipr_debug)
9927                dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9928
9929        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9930        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9931        wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9932        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9933        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9934
9935        if (!ioa_cfg->msi_received) {
9936                /* MSI test failed */
9937                dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9938                rc = -EOPNOTSUPP;
9939        } else if (ipr_debug)
9940                dev_info(&pdev->dev, "MSI test succeeded.\n");
9941
9942        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9943
9944        if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9945                free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9946        else
9947                free_irq(pdev->irq, ioa_cfg);
9948
9949        LEAVE;
9950
9951        return rc;
9952}
9953
9954 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9955 * @pdev:               PCI device struct
9956 * @dev_id:             PCI device id struct
9957 *
9958 * Return value:
9959 *      0 on success / non-zero on failure
9960 **/
9961static int ipr_probe_ioa(struct pci_dev *pdev,
9962                         const struct pci_device_id *dev_id)
9963{
9964        struct ipr_ioa_cfg *ioa_cfg;
9965        struct Scsi_Host *host;
9966        unsigned long ipr_regs_pci;
9967        void __iomem *ipr_regs;
9968        int rc = PCIBIOS_SUCCESSFUL;
9969        volatile u32 mask, uproc, interrupts;
9970        unsigned long lock_flags, driver_lock_flags;
9971
9972        ENTER;
9973
9974        dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9975        host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9976
9977        if (!host) {
9978                dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9979                rc = -ENOMEM;
9980                goto out;
9981        }
9982
9983        ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9984        memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9985        ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9986
9987        ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9988
9989        if (!ioa_cfg->ipr_chip) {
9990                dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9991                        dev_id->vendor, dev_id->device);
9992                goto out_scsi_host_put;
9993        }
9994
9995        /* set SIS 32 or SIS 64 */
9996        ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9997        ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9998        ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9999        ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10000
10001        if (ipr_transop_timeout)
10002                ioa_cfg->transop_timeout = ipr_transop_timeout;
10003        else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10004                ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10005        else
10006                ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10007
10008        ioa_cfg->revid = pdev->revision;
10009
10010        ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10011
10012        ipr_regs_pci = pci_resource_start(pdev, 0);
10013
10014        rc = pci_request_regions(pdev, IPR_NAME);
10015        if (rc < 0) {
10016                dev_err(&pdev->dev,
10017                        "Couldn't register memory range of registers\n");
10018                goto out_scsi_host_put;
10019        }
10020
10021        rc = pci_enable_device(pdev);
10022
10023        if (rc || pci_channel_offline(pdev)) {
10024                if (pci_channel_offline(pdev)) {
10025                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10026                        rc = pci_enable_device(pdev);
10027                }
10028
10029                if (rc) {
10030                        dev_err(&pdev->dev, "Cannot enable adapter\n");
10031                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10032                        goto out_release_regions;
10033                }
10034        }
10035
10036        ipr_regs = pci_ioremap_bar(pdev, 0);
10037
10038        if (!ipr_regs) {
10039                dev_err(&pdev->dev,
10040                        "Couldn't map memory range of registers\n");
10041                rc = -ENOMEM;
10042                goto out_disable;
10043        }
10044
10045        ioa_cfg->hdw_dma_regs = ipr_regs;
10046        ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10047        ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10048
10049        ipr_init_regs(ioa_cfg);
10050
10051        if (ioa_cfg->sis64) {
10052                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10053                if (rc < 0) {
10054                        dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10055                        rc = dma_set_mask_and_coherent(&pdev->dev,
10056                                                       DMA_BIT_MASK(32));
10057                }
10058        } else
10059                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10060
10061        if (rc < 0) {
10062                dev_err(&pdev->dev, "Failed to set DMA mask\n");
10063                goto cleanup_nomem;
10064        }
10065
10066        rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10067                                   ioa_cfg->chip_cfg->cache_line_size);
10068
10069        if (rc != PCIBIOS_SUCCESSFUL) {
10070                dev_err(&pdev->dev, "Write of cache line size failed\n");
10071                ipr_wait_for_pci_err_recovery(ioa_cfg);
10072                rc = -EIO;
10073                goto cleanup_nomem;
10074        }
10075
10076        /* Issue MMIO read to ensure card is not in EEH */
10077        interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10078        ipr_wait_for_pci_err_recovery(ioa_cfg);
10079
10080        if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10081                dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10082                        IPR_MAX_MSIX_VECTORS);
10083                ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10084        }
10085
10086        if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10087                        ipr_enable_msix(ioa_cfg) == 0)
10088                ioa_cfg->intr_flag = IPR_USE_MSIX;
10089        else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10090                        ipr_enable_msi(ioa_cfg) == 0)
10091                ioa_cfg->intr_flag = IPR_USE_MSI;
10092        else {
10093                ioa_cfg->intr_flag = IPR_USE_LSI;
10094                ioa_cfg->nvectors = 1;
10095                dev_info(&pdev->dev, "Cannot enable MSI.\n");
10096        }
10097
10098        pci_set_master(pdev);
10099
10100        if (pci_channel_offline(pdev)) {
10101                ipr_wait_for_pci_err_recovery(ioa_cfg);
10102                pci_set_master(pdev);
10103                if (pci_channel_offline(pdev)) {
10104                        rc = -EIO;
10105                        goto out_msi_disable;
10106                }
10107        }
10108
10109        if (ioa_cfg->intr_flag == IPR_USE_MSI ||
10110            ioa_cfg->intr_flag == IPR_USE_MSIX) {
10111                rc = ipr_test_msi(ioa_cfg, pdev);
10112                if (rc == -EOPNOTSUPP) {
10113                        ipr_wait_for_pci_err_recovery(ioa_cfg);
10114                        if (ioa_cfg->intr_flag == IPR_USE_MSI) {
10115                                ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10116                                pci_disable_msi(pdev);
10117                         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10118                                ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10119                                pci_disable_msix(pdev);
10120                        }
10121
10122                        ioa_cfg->intr_flag = IPR_USE_LSI;
10123                        ioa_cfg->nvectors = 1;
10124                }
10125                else if (rc)
10126                        goto out_msi_disable;
10127                else {
10128                        if (ioa_cfg->intr_flag == IPR_USE_MSI)
10129                                dev_info(&pdev->dev,
10130                                        "Request for %d MSIs succeeded with starting IRQ: %d\n",
10131                                        ioa_cfg->nvectors, pdev->irq);
10132                        else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10133                                dev_info(&pdev->dev,
10134                                        "Request for %d MSIXs succeeded.",
10135                                        ioa_cfg->nvectors);
10136                }
10137        }
10138
10139        ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10140                                (unsigned int)num_online_cpus(),
10141                                (unsigned int)IPR_MAX_HRRQ_NUM);
10142
10143        if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10144                goto out_msi_disable;
10145
10146        if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10147                goto out_msi_disable;
10148
10149        rc = ipr_alloc_mem(ioa_cfg);
10150        if (rc < 0) {
10151                dev_err(&pdev->dev,
10152                        "Couldn't allocate enough memory for device driver!\n");
10153                goto out_msi_disable;
10154        }
10155
10156        /* Save away PCI config space for use following IOA reset */
10157        rc = pci_save_state(pdev);
10158
10159        if (rc != PCIBIOS_SUCCESSFUL) {
10160                dev_err(&pdev->dev, "Failed to save PCI config space\n");
10161                rc = -EIO;
10162                goto cleanup_nolog;
10163        }
10164
10165        /*
10166         * If HRRQ updated interrupt is not masked, or reset alert is set,
10167         * the card is in an unknown state and needs a hard reset
10168         */
10169        mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10170        interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10171        uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10172        if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10173                ioa_cfg->needs_hard_reset = 1;
10174        if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10175                ioa_cfg->needs_hard_reset = 1;
10176        if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10177                ioa_cfg->ioa_unit_checked = 1;
10178
10179        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10180        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10181        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10182
10183        if (ioa_cfg->intr_flag == IPR_USE_MSI
10184                        || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10185                name_msi_vectors(ioa_cfg);
10186                rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10187                        0,
10188                        ioa_cfg->vectors_info[0].desc,
10189                        &ioa_cfg->hrrq[0]);
10190                if (!rc)
10191                        rc = ipr_request_other_msi_irqs(ioa_cfg);
10192        } else {
10193                rc = request_irq(pdev->irq, ipr_isr,
10194                         IRQF_SHARED,
10195                         IPR_NAME, &ioa_cfg->hrrq[0]);
10196        }
10197        if (rc) {
10198                dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10199                        pdev->irq, rc);
10200                goto cleanup_nolog;
10201        }
10202
10203        if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10204            (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10205                ioa_cfg->needs_warm_reset = 1;
10206                ioa_cfg->reset = ipr_reset_slot_reset;
10207
10208                ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10209                                                                WQ_MEM_RECLAIM, host->host_no);
10210
10211                if (!ioa_cfg->reset_work_q) {
10212                        dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10213                        goto out_free_irq;
10214                }
10215        } else
10216                ioa_cfg->reset = ipr_reset_start_bist;
10217
10218        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10219        list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10220        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10221
10222        LEAVE;
10223out:
10224        return rc;
10225
10226out_free_irq:
10227        ipr_free_irqs(ioa_cfg);
10228cleanup_nolog:
10229        ipr_free_mem(ioa_cfg);
10230out_msi_disable:
10231        ipr_wait_for_pci_err_recovery(ioa_cfg);
10232        if (ioa_cfg->intr_flag == IPR_USE_MSI)
10233                pci_disable_msi(pdev);
10234        else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10235                pci_disable_msix(pdev);
10236cleanup_nomem:
10237        iounmap(ipr_regs);
10238out_disable:
10239        pci_disable_device(pdev);
10240out_release_regions:
10241        pci_release_regions(pdev);
10242out_scsi_host_put:
10243        scsi_host_put(host);
10244        goto out;
10245}
10246
10247/**
10248 * ipr_initiate_ioa_bringdown - Bring down an adapter
10249 * @ioa_cfg:            ioa config struct
10250 * @shutdown_type:      shutdown type
10251 *
10252 * Description: This function will initiate bringing down the adapter.
10253 * This consists of issuing an IOA shutdown to the adapter
10254 * to flush the cache, and running BIST.
10255 * If the caller needs to wait on the completion of the reset,
10256 * the caller must sleep on the reset_wait_q.
10257 *
10258 * Return value:
10259 *      none
10260 **/
10261static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10262                                       enum ipr_shutdown_type shutdown_type)
10263{
10264        ENTER;
10265        if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10266                ioa_cfg->sdt_state = ABORT_DUMP;
10267        ioa_cfg->reset_retries = 0;
10268        ioa_cfg->in_ioa_bringdown = 1;
10269        ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10270        LEAVE;
10271}
10272
10273/**
10274 * __ipr_remove - Remove a single adapter
10275 * @pdev:       pci device struct
10276 *
10277 * Adapter hot plug remove entry point.
10278 *
10279 * Return value:
10280 *      none
10281 **/
10282static void __ipr_remove(struct pci_dev *pdev)
10283{
10284        unsigned long host_lock_flags = 0;
10285        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10286        int i;
10287        unsigned long driver_lock_flags;
10288        ENTER;
10289
10290        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10291        while (ioa_cfg->in_reset_reload) {
10292                spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10293                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10294                spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10295        }
10296
10297        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10298                spin_lock(&ioa_cfg->hrrq[i]._lock);
10299                ioa_cfg->hrrq[i].removing_ioa = 1;
10300                spin_unlock(&ioa_cfg->hrrq[i]._lock);
10301        }
10302        wmb();
10303        ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10304
10305        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10306        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10307        flush_work(&ioa_cfg->work_q);
10308        if (ioa_cfg->reset_work_q)
10309                flush_workqueue(ioa_cfg->reset_work_q);
10310        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10311        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10312
10313        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10314        list_del(&ioa_cfg->queue);
10315        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10316
10317        if (ioa_cfg->sdt_state == ABORT_DUMP)
10318                ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10319        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10320
10321        ipr_free_all_resources(ioa_cfg);
10322
10323        LEAVE;
10324}
10325
10326/**
10327 * ipr_remove - IOA hot plug remove entry point
10328 * @pdev:       pci device struct
10329 *
10330 * Adapter hot plug remove entry point.
10331 *
10332 * Return value:
10333 *      none
10334 **/
10335static void ipr_remove(struct pci_dev *pdev)
10336{
10337        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10338
10339        ENTER;
10340
10341        ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10342                              &ipr_trace_attr);
10343        ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10344                             &ipr_dump_attr);
10345        scsi_remove_host(ioa_cfg->host);
10346
10347        __ipr_remove(pdev);
10348
10349        LEAVE;
10350}
10351
10352/**
10353 * ipr_probe - Adapter hot plug add entry point
10354 *
10355 * Return value:
10356 *      0 on success / non-zero on failure
10357 **/
10358static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10359{
10360        struct ipr_ioa_cfg *ioa_cfg;
10361        int rc, i;
10362
10363        rc = ipr_probe_ioa(pdev, dev_id);
10364
10365        if (rc)
10366                return rc;
10367
10368        ioa_cfg = pci_get_drvdata(pdev);
10369        rc = ipr_probe_ioa_part2(ioa_cfg);
10370
10371        if (rc) {
10372                __ipr_remove(pdev);
10373                return rc;
10374        }
10375
10376        rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10377
10378        if (rc) {
10379                __ipr_remove(pdev);
10380                return rc;
10381        }
10382
10383        rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10384                                   &ipr_trace_attr);
10385
10386        if (rc) {
10387                scsi_remove_host(ioa_cfg->host);
10388                __ipr_remove(pdev);
10389                return rc;
10390        }
10391
10392        rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10393                                   &ipr_dump_attr);
10394
10395        if (rc) {
10396                ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10397                                      &ipr_trace_attr);
10398                scsi_remove_host(ioa_cfg->host);
10399                __ipr_remove(pdev);
10400                return rc;
10401        }
10402
10403        scsi_scan_host(ioa_cfg->host);
10404        ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10405
10406        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10407                for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10408                        blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10409                                        ioa_cfg->iopoll_weight, ipr_iopoll);
10410                        blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10411                }
10412        }
10413
10414        schedule_work(&ioa_cfg->work_q);
10415        return 0;
10416}
10417
10418/**
10419 * ipr_shutdown - Shutdown handler.
10420 * @pdev:       pci device struct
10421 *
10422 * This function is invoked upon system shutdown/reboot. It will issue
10423 * an adapter shutdown to the adapter to flush the write cache.
10424 *
10425 * Return value:
10426 *      none
10427 **/
10428static void ipr_shutdown(struct pci_dev *pdev)
10429{
10430        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10431        unsigned long lock_flags = 0;
10432        enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10433        int i;
10434
10435        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10436        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10437                ioa_cfg->iopoll_weight = 0;
10438                for (i = 1; i < ioa_cfg->hrrq_num; i++)
10439                        blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10440        }
10441
10442        while (ioa_cfg->in_reset_reload) {
10443                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10444                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10445                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10446        }
10447
10448        if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10449                shutdown_type = IPR_SHUTDOWN_QUIESCE;
10450
10451        ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10452        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10453        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10454        if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10455                ipr_free_irqs(ioa_cfg);
10456                pci_disable_device(ioa_cfg->pdev);
10457        }
10458}
10459
10460static struct pci_device_id ipr_pci_table[] = {
10461        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10462                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10463        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10464                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10465        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10466                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10467        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10468                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10469        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10470                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10471        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10472                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10473        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10474                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10475        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10476                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10477                IPR_USE_LONG_TRANSOP_TIMEOUT },
10478        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10479              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10480        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10481              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10482              IPR_USE_LONG_TRANSOP_TIMEOUT },
10483        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10484              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10485              IPR_USE_LONG_TRANSOP_TIMEOUT },
10486        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10487              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10488        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10489              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10490              IPR_USE_LONG_TRANSOP_TIMEOUT},
10491        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10492              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10493              IPR_USE_LONG_TRANSOP_TIMEOUT },
10494        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10495              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10496              IPR_USE_LONG_TRANSOP_TIMEOUT },
10497        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10498              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10499        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10500              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10501        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10502              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10503              IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10504        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10505                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10506        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10507                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10508        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10509                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10510                IPR_USE_LONG_TRANSOP_TIMEOUT },
10511        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10512                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10513                IPR_USE_LONG_TRANSOP_TIMEOUT },
10514        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10515                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10516        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10517                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10518        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10519                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10520        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10521                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10522        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10523                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10524        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10525                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10526        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10527                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10528        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10529                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10530        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10531                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10532        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10533                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10534        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10535                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10536        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10537                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10538        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10539                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10540        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10541                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10542        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10543                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10544        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10545                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10546        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10547                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10548        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10549                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10550        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10551                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10552        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10553                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10554        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10555                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10556        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10557                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10558        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10559                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10560        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10561                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10562        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10563                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10564        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10565                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10566        { }
10567};
10568MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10569
10570static const struct pci_error_handlers ipr_err_handler = {
10571        .error_detected = ipr_pci_error_detected,
10572        .mmio_enabled = ipr_pci_mmio_enabled,
10573        .slot_reset = ipr_pci_slot_reset,
10574};
10575
10576static struct pci_driver ipr_driver = {
10577        .name = IPR_NAME,
10578        .id_table = ipr_pci_table,
10579        .probe = ipr_probe,
10580        .remove = ipr_remove,
10581        .shutdown = ipr_shutdown,
10582        .err_handler = &ipr_err_handler,
10583};
10584
10585/**
10586 * ipr_halt_done - Shutdown prepare completion
10587 *
10588 * Return value:
10589 *      none
10590 **/
10591static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10592{
10593        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10594}
10595
10596/**
10597 * ipr_halt - Issue shutdown prepare to all adapters
10598 *
10599 * Return value:
10600 *      NOTIFY_OK on success / NOTIFY_DONE on failure
10601 **/
10602static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10603{
10604        struct ipr_cmnd *ipr_cmd;
10605        struct ipr_ioa_cfg *ioa_cfg;
10606        unsigned long flags = 0, driver_lock_flags;
10607
10608        if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10609                return NOTIFY_DONE;
10610
10611        spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10612
10613        list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10614                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10615                if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10616                    (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10617                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10618                        continue;
10619                }
10620
10621                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10622                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10623                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10624                ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10625                ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10626
10627                ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10628                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10629        }
10630        spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10631
10632        return NOTIFY_OK;
10633}
10634
10635static struct notifier_block ipr_notifier = {
10636        ipr_halt, NULL, 0
10637};
10638
10639/**
10640 * ipr_init - Module entry point
10641 *
10642 * Return value:
10643 *      0 on success / negative value on failure
10644 **/
10645static int __init ipr_init(void)
10646{
10647        ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10648                 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10649
10650        register_reboot_notifier(&ipr_notifier);
10651        return pci_register_driver(&ipr_driver);
10652}
10653
10654/**
10655 * ipr_exit - Module unload
10656 *
10657 * Module unload entry point.
10658 *
10659 * Return value:
10660 *      none
10661 **/
10662static void __exit ipr_exit(void)
10663{
10664        unregister_reboot_notifier(&ipr_notifier);
10665        pci_unregister_driver(&ipr_driver);
10666}
10667
10668module_init(ipr_init);
10669module_exit(ipr_exit);
10670