linux/drivers/scsi/aacraid/aachba.c
<<
>>
Prefs
   1/*
   2 *      Adaptec AAC series RAID controller driver
   3 *      (c) Copyright 2001 Red Hat Inc.
   4 *
   5 * based on the old aacraid driver that is..
   6 * Adaptec aacraid device driver for Linux.
   7 *
   8 * Copyright (c) 2000-2010 Adaptec, Inc.
   9 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10 *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; see the file COPYING.  If not, write to
  24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25 *
  26 * Module Name:
  27 *  aachba.c
  28 *
  29 * Abstract: Contains Interfaces to manage IOs.
  30 *
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/init.h>
  35#include <linux/types.h>
  36#include <linux/pci.h>
  37#include <linux/spinlock.h>
  38#include <linux/slab.h>
  39#include <linux/completion.h>
  40#include <linux/blkdev.h>
  41#include <linux/uaccess.h>
  42#include <linux/highmem.h> /* For flush_kernel_dcache_page */
  43#include <linux/module.h>
  44
  45#include <asm/unaligned.h>
  46
  47#include <scsi/scsi.h>
  48#include <scsi/scsi_cmnd.h>
  49#include <scsi/scsi_device.h>
  50#include <scsi/scsi_host.h>
  51
  52#include "aacraid.h"
  53
  54/* values for inqd_pdt: Peripheral device type in plain English */
  55#define INQD_PDT_DA     0x00    /* Direct-access (DISK) device */
  56#define INQD_PDT_PROC   0x03    /* Processor device */
  57#define INQD_PDT_CHNGR  0x08    /* Changer (jukebox, scsi2) */
  58#define INQD_PDT_COMM   0x09    /* Communication device (scsi2) */
  59#define INQD_PDT_NOLUN2 0x1f    /* Unknown Device (scsi2) */
  60#define INQD_PDT_NOLUN  0x7f    /* Logical Unit Not Present */
  61
  62#define INQD_PDT_DMASK  0x1F    /* Peripheral Device Type Mask */
  63#define INQD_PDT_QMASK  0xE0    /* Peripheral Device Qualifer Mask */
  64
  65/*
  66 *      Sense codes
  67 */
  68
  69#define SENCODE_NO_SENSE                        0x00
  70#define SENCODE_END_OF_DATA                     0x00
  71#define SENCODE_BECOMING_READY                  0x04
  72#define SENCODE_INIT_CMD_REQUIRED               0x04
  73#define SENCODE_UNRECOVERED_READ_ERROR          0x11
  74#define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
  75#define SENCODE_INVALID_COMMAND                 0x20
  76#define SENCODE_LBA_OUT_OF_RANGE                0x21
  77#define SENCODE_INVALID_CDB_FIELD               0x24
  78#define SENCODE_LUN_NOT_SUPPORTED               0x25
  79#define SENCODE_INVALID_PARAM_FIELD             0x26
  80#define SENCODE_PARAM_NOT_SUPPORTED             0x26
  81#define SENCODE_PARAM_VALUE_INVALID             0x26
  82#define SENCODE_RESET_OCCURRED                  0x29
  83#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
  84#define SENCODE_INQUIRY_DATA_CHANGED            0x3F
  85#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
  86#define SENCODE_DIAGNOSTIC_FAILURE              0x40
  87#define SENCODE_INTERNAL_TARGET_FAILURE         0x44
  88#define SENCODE_INVALID_MESSAGE_ERROR           0x49
  89#define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
  90#define SENCODE_OVERLAPPED_COMMAND              0x4E
  91
  92/*
  93 *      Additional sense codes
  94 */
  95
  96#define ASENCODE_NO_SENSE                       0x00
  97#define ASENCODE_END_OF_DATA                    0x05
  98#define ASENCODE_BECOMING_READY                 0x01
  99#define ASENCODE_INIT_CMD_REQUIRED              0x02
 100#define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
 101#define ASENCODE_INVALID_COMMAND                0x00
 102#define ASENCODE_LBA_OUT_OF_RANGE               0x00
 103#define ASENCODE_INVALID_CDB_FIELD              0x00
 104#define ASENCODE_LUN_NOT_SUPPORTED              0x00
 105#define ASENCODE_INVALID_PARAM_FIELD            0x00
 106#define ASENCODE_PARAM_NOT_SUPPORTED            0x01
 107#define ASENCODE_PARAM_VALUE_INVALID            0x02
 108#define ASENCODE_RESET_OCCURRED                 0x00
 109#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
 110#define ASENCODE_INQUIRY_DATA_CHANGED           0x03
 111#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
 112#define ASENCODE_DIAGNOSTIC_FAILURE             0x80
 113#define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
 114#define ASENCODE_INVALID_MESSAGE_ERROR          0x00
 115#define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
 116#define ASENCODE_OVERLAPPED_COMMAND             0x00
 117
 118#define BYTE0(x) (unsigned char)(x)
 119#define BYTE1(x) (unsigned char)((x) >> 8)
 120#define BYTE2(x) (unsigned char)((x) >> 16)
 121#define BYTE3(x) (unsigned char)((x) >> 24)
 122
 123/* MODE_SENSE data format */
 124typedef struct {
 125        struct {
 126                u8      data_length;
 127                u8      med_type;
 128                u8      dev_par;
 129                u8      bd_length;
 130        } __attribute__((packed)) hd;
 131        struct {
 132                u8      dens_code;
 133                u8      block_count[3];
 134                u8      reserved;
 135                u8      block_length[3];
 136        } __attribute__((packed)) bd;
 137                u8      mpc_buf[3];
 138} __attribute__((packed)) aac_modep_data;
 139
 140/* MODE_SENSE_10 data format */
 141typedef struct {
 142        struct {
 143                u8      data_length[2];
 144                u8      med_type;
 145                u8      dev_par;
 146                u8      rsrvd[2];
 147                u8      bd_length[2];
 148        } __attribute__((packed)) hd;
 149        struct {
 150                u8      dens_code;
 151                u8      block_count[3];
 152                u8      reserved;
 153                u8      block_length[3];
 154        } __attribute__((packed)) bd;
 155                u8      mpc_buf[3];
 156} __attribute__((packed)) aac_modep10_data;
 157
 158/*------------------------------------------------------------------------------
 159 *              S T R U C T S / T Y P E D E F S
 160 *----------------------------------------------------------------------------*/
 161/* SCSI inquiry data */
 162struct inquiry_data {
 163        u8 inqd_pdt;    /* Peripheral qualifier | Peripheral Device Type */
 164        u8 inqd_dtq;    /* RMB | Device Type Qualifier */
 165        u8 inqd_ver;    /* ISO version | ECMA version | ANSI-approved version */
 166        u8 inqd_rdf;    /* AENC | TrmIOP | Response data format */
 167        u8 inqd_len;    /* Additional length (n-4) */
 168        u8 inqd_pad1[2];/* Reserved - must be zero */
 169        u8 inqd_pad2;   /* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
 170        u8 inqd_vid[8]; /* Vendor ID */
 171        u8 inqd_pid[16];/* Product ID */
 172        u8 inqd_prl[4]; /* Product Revision Level */
 173};
 174
 175/* Added for VPD 0x83 */
 176struct  tvpd_id_descriptor_type_1 {
 177        u8 codeset:4;           /* VPD_CODE_SET */
 178        u8 reserved:4;
 179        u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
 180        u8 reserved2:4;
 181        u8 reserved3;
 182        u8 identifierlength;
 183        u8 venid[8];
 184        u8 productid[16];
 185        u8 serialnumber[8];     /* SN in ASCII */
 186
 187};
 188
 189struct tvpd_id_descriptor_type_2 {
 190        u8 codeset:4;           /* VPD_CODE_SET */
 191        u8 reserved:4;
 192        u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
 193        u8 reserved2:4;
 194        u8 reserved3;
 195        u8 identifierlength;
 196        struct teu64id {
 197                u32 Serial;
 198                 /* The serial number supposed to be 40 bits,
 199                  * bit we only support 32, so make the last byte zero. */
 200                u8 reserved;
 201                u8 venid[3];
 202        } eu64id;
 203
 204};
 205
 206struct tvpd_id_descriptor_type_3 {
 207        u8 codeset : 4;          /* VPD_CODE_SET */
 208        u8 reserved : 4;
 209        u8 identifiertype : 4;   /* VPD_IDENTIFIER_TYPE */
 210        u8 reserved2 : 4;
 211        u8 reserved3;
 212        u8 identifierlength;
 213        u8 Identifier[16];
 214};
 215
 216struct tvpd_page83 {
 217        u8 DeviceType:5;
 218        u8 DeviceTypeQualifier:3;
 219        u8 PageCode;
 220        u8 reserved;
 221        u8 PageLength;
 222        struct tvpd_id_descriptor_type_1 type1;
 223        struct tvpd_id_descriptor_type_2 type2;
 224        struct tvpd_id_descriptor_type_3 type3;
 225};
 226
 227/*
 228 *              M O D U L E   G L O B A L S
 229 */
 230
 231static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
 232static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
 233static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
 234static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
 235                                struct aac_raw_io2 *rio2, int sg_max);
 236static long aac_build_sghba(struct scsi_cmnd *scsicmd,
 237                                struct aac_hba_cmd_req *hbacmd,
 238                                int sg_max, u64 sg_address);
 239static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
 240                                int pages, int nseg, int nseg_new);
 241static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
 242static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
 243#ifdef AAC_DETAILED_STATUS_INFO
 244static char *aac_get_status_string(u32 status);
 245#endif
 246
 247/*
 248 *      Non dasd selection is handled entirely in aachba now
 249 */
 250
 251static int nondasd = -1;
 252static int aac_cache = 2;       /* WCE=0 to avoid performance problems */
 253static int dacmode = -1;
 254int aac_msi;
 255int aac_commit = -1;
 256int startup_timeout = 180;
 257int aif_timeout = 120;
 258int aac_sync_mode;  /* Only Sync. transfer - disabled */
 259int aac_convert_sgl = 1;        /* convert non-conformable s/g list - enabled */
 260
 261module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
 262MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
 263        " 0=off, 1=on");
 264module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
 265MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
 266        " 0=off, 1=on");
 267module_param(nondasd, int, S_IRUGO|S_IWUSR);
 268MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
 269        " 0=off, 1=on");
 270module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
 271MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
 272        "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
 273        "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
 274        "\tbit 2 - Disable only if Battery is protecting Cache");
 275module_param(dacmode, int, S_IRUGO|S_IWUSR);
 276MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
 277        " 0=off, 1=on");
 278module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
 279MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
 280        " adapter for foreign arrays.\n"
 281        "This is typically needed in systems that do not have a BIOS."
 282        " 0=off, 1=on");
 283module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
 284MODULE_PARM_DESC(msi, "IRQ handling."
 285        " 0=PIC(default), 1=MSI, 2=MSI-X)");
 286module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
 287MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
 288        " adapter to have it's kernel up and\n"
 289        "running. This is typically adjusted for large systems that do not"
 290        " have a BIOS.");
 291module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
 292MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
 293        " applications to pick up AIFs before\n"
 294        "deregistering them. This is typically adjusted for heavily burdened"
 295        " systems.");
 296
 297int aac_fib_dump;
 298module_param(aac_fib_dump, int, 0644);
 299MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on");
 300
 301int numacb = -1;
 302module_param(numacb, int, S_IRUGO|S_IWUSR);
 303MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
 304        " blocks (FIB) allocated. Valid values are 512 and down. Default is"
 305        " to use suggestion from Firmware.");
 306
 307int acbsize = -1;
 308module_param(acbsize, int, S_IRUGO|S_IWUSR);
 309MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
 310        " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
 311        " suggestion from Firmware.");
 312
 313int update_interval = 30 * 60;
 314module_param(update_interval, int, S_IRUGO|S_IWUSR);
 315MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
 316        " updates issued to adapter.");
 317
 318int check_interval = 60;
 319module_param(check_interval, int, S_IRUGO|S_IWUSR);
 320MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
 321        " checks.");
 322
 323int aac_check_reset = 1;
 324module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
 325MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
 326        " adapter. a value of -1 forces the reset to adapters programmed to"
 327        " ignore it.");
 328
 329int expose_physicals = -1;
 330module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
 331MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
 332        " -1=protect 0=off, 1=on");
 333
 334int aac_reset_devices;
 335module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
 336MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
 337
 338int aac_wwn = 1;
 339module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
 340MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
 341        "\t0 - Disable\n"
 342        "\t1 - Array Meta Data Signature (default)\n"
 343        "\t2 - Adapter Serial Number");
 344
 345
 346static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
 347                struct fib *fibptr) {
 348        struct scsi_device *device;
 349
 350        if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
 351                dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
 352                aac_fib_complete(fibptr);
 353                return 0;
 354        }
 355        scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
 356        device = scsicmd->device;
 357        if (unlikely(!device)) {
 358                dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
 359                aac_fib_complete(fibptr);
 360                return 0;
 361        }
 362        return 1;
 363}
 364
 365/**
 366 *      aac_get_config_status   -       check the adapter configuration
 367 *      @common: adapter to query
 368 *
 369 *      Query config status, and commit the configuration if needed.
 370 */
 371int aac_get_config_status(struct aac_dev *dev, int commit_flag)
 372{
 373        int status = 0;
 374        struct fib * fibptr;
 375
 376        if (!(fibptr = aac_fib_alloc(dev)))
 377                return -ENOMEM;
 378
 379        aac_fib_init(fibptr);
 380        {
 381                struct aac_get_config_status *dinfo;
 382                dinfo = (struct aac_get_config_status *) fib_data(fibptr);
 383
 384                dinfo->command = cpu_to_le32(VM_ContainerConfig);
 385                dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
 386                dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
 387        }
 388
 389        status = aac_fib_send(ContainerCommand,
 390                            fibptr,
 391                            sizeof (struct aac_get_config_status),
 392                            FsaNormal,
 393                            1, 1,
 394                            NULL, NULL);
 395        if (status < 0) {
 396                printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
 397        } else {
 398                struct aac_get_config_status_resp *reply
 399                  = (struct aac_get_config_status_resp *) fib_data(fibptr);
 400                dprintk((KERN_WARNING
 401                  "aac_get_config_status: response=%d status=%d action=%d\n",
 402                  le32_to_cpu(reply->response),
 403                  le32_to_cpu(reply->status),
 404                  le32_to_cpu(reply->data.action)));
 405                if ((le32_to_cpu(reply->response) != ST_OK) ||
 406                     (le32_to_cpu(reply->status) != CT_OK) ||
 407                     (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
 408                        printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
 409                        status = -EINVAL;
 410                }
 411        }
 412        /* Do not set XferState to zero unless receives a response from F/W */
 413        if (status >= 0)
 414                aac_fib_complete(fibptr);
 415
 416        /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
 417        if (status >= 0) {
 418                if ((aac_commit == 1) || commit_flag) {
 419                        struct aac_commit_config * dinfo;
 420                        aac_fib_init(fibptr);
 421                        dinfo = (struct aac_commit_config *) fib_data(fibptr);
 422
 423                        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 424                        dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
 425
 426                        status = aac_fib_send(ContainerCommand,
 427                                    fibptr,
 428                                    sizeof (struct aac_commit_config),
 429                                    FsaNormal,
 430                                    1, 1,
 431                                    NULL, NULL);
 432                        /* Do not set XferState to zero unless
 433                         * receives a response from F/W */
 434                        if (status >= 0)
 435                                aac_fib_complete(fibptr);
 436                } else if (aac_commit == 0) {
 437                        printk(KERN_WARNING
 438                          "aac_get_config_status: Foreign device configurations are being ignored\n");
 439                }
 440        }
 441        /* FIB should be freed only after getting the response from the F/W */
 442        if (status != -ERESTARTSYS)
 443                aac_fib_free(fibptr);
 444        return status;
 445}
 446
 447static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
 448{
 449        char inq_data;
 450        scsi_sg_copy_to_buffer(scsicmd,  &inq_data, sizeof(inq_data));
 451        if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
 452                inq_data &= 0xdf;
 453                scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
 454        }
 455}
 456
 457/**
 458 *      aac_get_containers      -       list containers
 459 *      @common: adapter to probe
 460 *
 461 *      Make a list of all containers on this controller
 462 */
 463int aac_get_containers(struct aac_dev *dev)
 464{
 465        struct fsa_dev_info *fsa_dev_ptr;
 466        u32 index;
 467        int status = 0;
 468        struct fib * fibptr;
 469        struct aac_get_container_count *dinfo;
 470        struct aac_get_container_count_resp *dresp;
 471        int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
 472
 473        if (!(fibptr = aac_fib_alloc(dev)))
 474                return -ENOMEM;
 475
 476        aac_fib_init(fibptr);
 477        dinfo = (struct aac_get_container_count *) fib_data(fibptr);
 478        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 479        dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
 480
 481        status = aac_fib_send(ContainerCommand,
 482                    fibptr,
 483                    sizeof (struct aac_get_container_count),
 484                    FsaNormal,
 485                    1, 1,
 486                    NULL, NULL);
 487        if (status >= 0) {
 488                dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
 489                maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
 490                if (fibptr->dev->supplement_adapter_info.supported_options2 &
 491                    AAC_OPTION_SUPPORTED_240_VOLUMES) {
 492                        maximum_num_containers =
 493                                le32_to_cpu(dresp->MaxSimpleVolumes);
 494                }
 495                aac_fib_complete(fibptr);
 496        }
 497        /* FIB should be freed only after getting the response from the F/W */
 498        if (status != -ERESTARTSYS)
 499                aac_fib_free(fibptr);
 500
 501        if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
 502                maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
 503        if (dev->fsa_dev == NULL ||
 504                dev->maximum_num_containers != maximum_num_containers) {
 505
 506                fsa_dev_ptr = dev->fsa_dev;
 507
 508                dev->fsa_dev = kcalloc(maximum_num_containers,
 509                                        sizeof(*fsa_dev_ptr), GFP_KERNEL);
 510
 511                kfree(fsa_dev_ptr);
 512                fsa_dev_ptr = NULL;
 513
 514
 515                if (!dev->fsa_dev)
 516                        return -ENOMEM;
 517
 518                dev->maximum_num_containers = maximum_num_containers;
 519        }
 520        for (index = 0; index < dev->maximum_num_containers; index++) {
 521                dev->fsa_dev[index].devname[0] = '\0';
 522                dev->fsa_dev[index].valid = 0;
 523
 524                status = aac_probe_container(dev, index);
 525
 526                if (status < 0) {
 527                        printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
 528                        break;
 529                }
 530        }
 531        return status;
 532}
 533
 534static void get_container_name_callback(void *context, struct fib * fibptr)
 535{
 536        struct aac_get_name_resp * get_name_reply;
 537        struct scsi_cmnd * scsicmd;
 538
 539        scsicmd = (struct scsi_cmnd *) context;
 540
 541        if (!aac_valid_context(scsicmd, fibptr))
 542                return;
 543
 544        dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
 545        BUG_ON(fibptr == NULL);
 546
 547        get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
 548        /* Failure is irrelevant, using default value instead */
 549        if ((le32_to_cpu(get_name_reply->status) == CT_OK)
 550         && (get_name_reply->data[0] != '\0')) {
 551                char *sp = get_name_reply->data;
 552                int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
 553
 554                sp[data_size - 1] = '\0';
 555                while (*sp == ' ')
 556                        ++sp;
 557                if (*sp) {
 558                        struct inquiry_data inq;
 559                        char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
 560                        int count = sizeof(d);
 561                        char *dp = d;
 562                        do {
 563                                *dp++ = (*sp) ? *sp++ : ' ';
 564                        } while (--count > 0);
 565
 566                        scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
 567                        memcpy(inq.inqd_pid, d, sizeof(d));
 568                        scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
 569                }
 570        }
 571
 572        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 573
 574        aac_fib_complete(fibptr);
 575        scsicmd->scsi_done(scsicmd);
 576}
 577
 578/**
 579 *      aac_get_container_name  -       get container name, none blocking.
 580 */
 581static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 582{
 583        int status;
 584        int data_size;
 585        struct aac_get_name *dinfo;
 586        struct fib * cmd_fibcontext;
 587        struct aac_dev * dev;
 588
 589        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 590
 591        data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
 592
 593        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
 594
 595        aac_fib_init(cmd_fibcontext);
 596        dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
 597        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 598
 599        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 600        dinfo->type = cpu_to_le32(CT_READ_NAME);
 601        dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
 602        dinfo->count = cpu_to_le32(data_size - 1);
 603
 604        status = aac_fib_send(ContainerCommand,
 605                  cmd_fibcontext,
 606                  sizeof(struct aac_get_name_resp),
 607                  FsaNormal,
 608                  0, 1,
 609                  (fib_callback)get_container_name_callback,
 610                  (void *) scsicmd);
 611
 612        /*
 613         *      Check that the command queued to the controller
 614         */
 615        if (status == -EINPROGRESS)
 616                return 0;
 617
 618        printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
 619        aac_fib_complete(cmd_fibcontext);
 620        return -1;
 621}
 622
 623static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
 624{
 625        struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
 626
 627        if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
 628                return aac_scsi_cmd(scsicmd);
 629
 630        scsicmd->result = DID_NO_CONNECT << 16;
 631        scsicmd->scsi_done(scsicmd);
 632        return 0;
 633}
 634
 635static void _aac_probe_container2(void * context, struct fib * fibptr)
 636{
 637        struct fsa_dev_info *fsa_dev_ptr;
 638        int (*callback)(struct scsi_cmnd *);
 639        struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
 640        int i;
 641
 642
 643        if (!aac_valid_context(scsicmd, fibptr))
 644                return;
 645
 646        scsicmd->SCp.Status = 0;
 647        fsa_dev_ptr = fibptr->dev->fsa_dev;
 648        if (fsa_dev_ptr) {
 649                struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
 650                __le32 sup_options2;
 651
 652                fsa_dev_ptr += scmd_id(scsicmd);
 653                sup_options2 =
 654                        fibptr->dev->supplement_adapter_info.supported_options2;
 655
 656                if ((le32_to_cpu(dresp->status) == ST_OK) &&
 657                    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
 658                    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
 659                        if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
 660                                dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
 661                                fsa_dev_ptr->block_size = 0x200;
 662                        } else {
 663                                fsa_dev_ptr->block_size =
 664                                        le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
 665                        }
 666                        for (i = 0; i < 16; i++)
 667                                fsa_dev_ptr->identifier[i] =
 668                                        dresp->mnt[0].fileinfo.bdevinfo
 669                                                                .identifier[i];
 670                        fsa_dev_ptr->valid = 1;
 671                        /* sense_key holds the current state of the spin-up */
 672                        if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
 673                                fsa_dev_ptr->sense_data.sense_key = NOT_READY;
 674                        else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
 675                                fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
 676                        fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
 677                        fsa_dev_ptr->size
 678                          = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
 679                            (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
 680                        fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
 681                }
 682                if ((fsa_dev_ptr->valid & 1) == 0)
 683                        fsa_dev_ptr->valid = 0;
 684                scsicmd->SCp.Status = le32_to_cpu(dresp->count);
 685        }
 686        aac_fib_complete(fibptr);
 687        aac_fib_free(fibptr);
 688        callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
 689        scsicmd->SCp.ptr = NULL;
 690        (*callback)(scsicmd);
 691        return;
 692}
 693
 694static void _aac_probe_container1(void * context, struct fib * fibptr)
 695{
 696        struct scsi_cmnd * scsicmd;
 697        struct aac_mount * dresp;
 698        struct aac_query_mount *dinfo;
 699        int status;
 700
 701        dresp = (struct aac_mount *) fib_data(fibptr);
 702        if (!aac_supports_2T(fibptr->dev)) {
 703                dresp->mnt[0].capacityhigh = 0;
 704                if ((le32_to_cpu(dresp->status) == ST_OK) &&
 705                        (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
 706                        _aac_probe_container2(context, fibptr);
 707                        return;
 708                }
 709        }
 710        scsicmd = (struct scsi_cmnd *) context;
 711
 712        if (!aac_valid_context(scsicmd, fibptr))
 713                return;
 714
 715        aac_fib_init(fibptr);
 716
 717        dinfo = (struct aac_query_mount *)fib_data(fibptr);
 718
 719        if (fibptr->dev->supplement_adapter_info.supported_options2 &
 720            AAC_OPTION_VARIABLE_BLOCK_SIZE)
 721                dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
 722        else
 723                dinfo->command = cpu_to_le32(VM_NameServe64);
 724
 725        dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 726        dinfo->type = cpu_to_le32(FT_FILESYS);
 727        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 728
 729        status = aac_fib_send(ContainerCommand,
 730                          fibptr,
 731                          sizeof(struct aac_query_mount),
 732                          FsaNormal,
 733                          0, 1,
 734                          _aac_probe_container2,
 735                          (void *) scsicmd);
 736        /*
 737         *      Check that the command queued to the controller
 738         */
 739        if (status < 0 && status != -EINPROGRESS) {
 740                /* Inherit results from VM_NameServe, if any */
 741                dresp->status = cpu_to_le32(ST_OK);
 742                _aac_probe_container2(context, fibptr);
 743        }
 744}
 745
 746static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
 747{
 748        struct fib * fibptr;
 749        int status = -ENOMEM;
 750
 751        if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
 752                struct aac_query_mount *dinfo;
 753
 754                aac_fib_init(fibptr);
 755
 756                dinfo = (struct aac_query_mount *)fib_data(fibptr);
 757
 758                if (fibptr->dev->supplement_adapter_info.supported_options2 &
 759                    AAC_OPTION_VARIABLE_BLOCK_SIZE)
 760                        dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
 761                else
 762                        dinfo->command = cpu_to_le32(VM_NameServe);
 763
 764                dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 765                dinfo->type = cpu_to_le32(FT_FILESYS);
 766                scsicmd->SCp.ptr = (char *)callback;
 767                scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 768
 769                status = aac_fib_send(ContainerCommand,
 770                          fibptr,
 771                          sizeof(struct aac_query_mount),
 772                          FsaNormal,
 773                          0, 1,
 774                          _aac_probe_container1,
 775                          (void *) scsicmd);
 776                /*
 777                 *      Check that the command queued to the controller
 778                 */
 779                if (status == -EINPROGRESS)
 780                        return 0;
 781
 782                if (status < 0) {
 783                        scsicmd->SCp.ptr = NULL;
 784                        aac_fib_complete(fibptr);
 785                        aac_fib_free(fibptr);
 786                }
 787        }
 788        if (status < 0) {
 789                struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
 790                if (fsa_dev_ptr) {
 791                        fsa_dev_ptr += scmd_id(scsicmd);
 792                        if ((fsa_dev_ptr->valid & 1) == 0) {
 793                                fsa_dev_ptr->valid = 0;
 794                                return (*callback)(scsicmd);
 795                        }
 796                }
 797        }
 798        return status;
 799}
 800
 801/**
 802 *      aac_probe_container             -       query a logical volume
 803 *      @dev: device to query
 804 *      @cid: container identifier
 805 *
 806 *      Queries the controller about the given volume. The volume information
 807 *      is updated in the struct fsa_dev_info structure rather than returned.
 808 */
 809static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
 810{
 811        scsicmd->device = NULL;
 812        return 0;
 813}
 814
 815int aac_probe_container(struct aac_dev *dev, int cid)
 816{
 817        struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
 818        struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
 819        int status;
 820
 821        if (!scsicmd || !scsidev) {
 822                kfree(scsicmd);
 823                kfree(scsidev);
 824                return -ENOMEM;
 825        }
 826        scsicmd->list.next = NULL;
 827        scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
 828
 829        scsicmd->device = scsidev;
 830        scsidev->sdev_state = 0;
 831        scsidev->id = cid;
 832        scsidev->host = dev->scsi_host_ptr;
 833
 834        if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
 835                while (scsicmd->device == scsidev)
 836                        schedule();
 837        kfree(scsidev);
 838        status = scsicmd->SCp.Status;
 839        kfree(scsicmd);
 840        return status;
 841}
 842
 843/* Local Structure to set SCSI inquiry data strings */
 844struct scsi_inq {
 845        char vid[8];         /* Vendor ID */
 846        char pid[16];        /* Product ID */
 847        char prl[4];         /* Product Revision Level */
 848};
 849
 850/**
 851 *      InqStrCopy      -       string merge
 852 *      @a:     string to copy from
 853 *      @b:     string to copy to
 854 *
 855 *      Copy a String from one location to another
 856 *      without copying \0
 857 */
 858
 859static void inqstrcpy(char *a, char *b)
 860{
 861
 862        while (*a != (char)0)
 863                *b++ = *a++;
 864}
 865
 866static char *container_types[] = {
 867        "None",
 868        "Volume",
 869        "Mirror",
 870        "Stripe",
 871        "RAID5",
 872        "SSRW",
 873        "SSRO",
 874        "Morph",
 875        "Legacy",
 876        "RAID4",
 877        "RAID10",
 878        "RAID00",
 879        "V-MIRRORS",
 880        "PSEUDO R4",
 881        "RAID50",
 882        "RAID5D",
 883        "RAID5D0",
 884        "RAID1E",
 885        "RAID6",
 886        "RAID60",
 887        "Unknown"
 888};
 889
 890char * get_container_type(unsigned tindex)
 891{
 892        if (tindex >= ARRAY_SIZE(container_types))
 893                tindex = ARRAY_SIZE(container_types) - 1;
 894        return container_types[tindex];
 895}
 896
 897/* Function: setinqstr
 898 *
 899 * Arguments: [1] pointer to void [1] int
 900 *
 901 * Purpose: Sets SCSI inquiry data strings for vendor, product
 902 * and revision level. Allows strings to be set in platform dependent
 903 * files instead of in OS dependent driver source.
 904 */
 905
 906static void setinqstr(struct aac_dev *dev, void *data, int tindex)
 907{
 908        struct scsi_inq *str;
 909        struct aac_supplement_adapter_info *sup_adap_info;
 910
 911        sup_adap_info = &dev->supplement_adapter_info;
 912        str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
 913        memset(str, ' ', sizeof(*str));
 914
 915        if (sup_adap_info->adapter_type_text[0]) {
 916                int c;
 917                char *cp;
 918                char *cname = kmemdup(sup_adap_info->adapter_type_text,
 919                                sizeof(sup_adap_info->adapter_type_text),
 920                                                                GFP_ATOMIC);
 921                if (!cname)
 922                        return;
 923
 924                cp = cname;
 925                if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
 926                        inqstrcpy("SMC", str->vid);
 927                else {
 928                        c = sizeof(str->vid);
 929                        while (*cp && *cp != ' ' && --c)
 930                                ++cp;
 931                        c = *cp;
 932                        *cp = '\0';
 933                        inqstrcpy(cname, str->vid);
 934                        *cp = c;
 935                        while (*cp && *cp != ' ')
 936                                ++cp;
 937                }
 938                while (*cp == ' ')
 939                        ++cp;
 940                /* last six chars reserved for vol type */
 941                if (strlen(cp) > sizeof(str->pid))
 942                        cp[sizeof(str->pid)] = '\0';
 943                inqstrcpy (cp, str->pid);
 944
 945                kfree(cname);
 946        } else {
 947                struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
 948
 949                inqstrcpy (mp->vname, str->vid);
 950                /* last six chars reserved for vol type */
 951                inqstrcpy (mp->model, str->pid);
 952        }
 953
 954        if (tindex < ARRAY_SIZE(container_types)){
 955                char *findit = str->pid;
 956
 957                for ( ; *findit != ' '; findit++); /* walk till we find a space */
 958                /* RAID is superfluous in the context of a RAID device */
 959                if (memcmp(findit-4, "RAID", 4) == 0)
 960                        *(findit -= 4) = ' ';
 961                if (((findit - str->pid) + strlen(container_types[tindex]))
 962                 < (sizeof(str->pid) + sizeof(str->prl)))
 963                        inqstrcpy (container_types[tindex], findit + 1);
 964        }
 965        inqstrcpy ("V1.0", str->prl);
 966}
 967
 968static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
 969                struct aac_dev *dev, struct scsi_cmnd *scsicmd)
 970{
 971        int container;
 972
 973        vpdpage83data->type3.codeset = 1;
 974        vpdpage83data->type3.identifiertype = 3;
 975        vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
 976                        - 4;
 977
 978        for (container = 0; container < dev->maximum_num_containers;
 979                        container++) {
 980
 981                if (scmd_id(scsicmd) == container) {
 982                        memcpy(vpdpage83data->type3.Identifier,
 983                                        dev->fsa_dev[container].identifier,
 984                                        16);
 985                        break;
 986                }
 987        }
 988}
 989
 990static void get_container_serial_callback(void *context, struct fib * fibptr)
 991{
 992        struct aac_get_serial_resp * get_serial_reply;
 993        struct scsi_cmnd * scsicmd;
 994
 995        BUG_ON(fibptr == NULL);
 996
 997        scsicmd = (struct scsi_cmnd *) context;
 998        if (!aac_valid_context(scsicmd, fibptr))
 999                return;
1000
1001        get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
1002        /* Failure is irrelevant, using default value instead */
1003        if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
1004                /*Check to see if it's for VPD 0x83 or 0x80 */
1005                if (scsicmd->cmnd[2] == 0x83) {
1006                        /* vpd page 0x83 - Device Identification Page */
1007                        struct aac_dev *dev;
1008                        int i;
1009                        struct tvpd_page83 vpdpage83data;
1010
1011                        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1012
1013                        memset(((u8 *)&vpdpage83data), 0,
1014                               sizeof(vpdpage83data));
1015
1016                        /* DIRECT_ACCESS_DEVIC */
1017                        vpdpage83data.DeviceType = 0;
1018                        /* DEVICE_CONNECTED */
1019                        vpdpage83data.DeviceTypeQualifier = 0;
1020                        /* VPD_DEVICE_IDENTIFIERS */
1021                        vpdpage83data.PageCode = 0x83;
1022                        vpdpage83data.reserved = 0;
1023                        vpdpage83data.PageLength =
1024                                sizeof(vpdpage83data.type1) +
1025                                sizeof(vpdpage83data.type2);
1026
1027                        /* VPD 83 Type 3 is not supported for ARC */
1028                        if (dev->sa_firmware)
1029                                vpdpage83data.PageLength +=
1030                                sizeof(vpdpage83data.type3);
1031
1032                        /* T10 Vendor Identifier Field Format */
1033                        /* VpdcodesetAscii */
1034                        vpdpage83data.type1.codeset = 2;
1035                        /* VpdIdentifierTypeVendorId */
1036                        vpdpage83data.type1.identifiertype = 1;
1037                        vpdpage83data.type1.identifierlength =
1038                                sizeof(vpdpage83data.type1) - 4;
1039
1040                        /* "ADAPTEC " for adaptec */
1041                        memcpy(vpdpage83data.type1.venid,
1042                                "ADAPTEC ",
1043                                sizeof(vpdpage83data.type1.venid));
1044                        memcpy(vpdpage83data.type1.productid,
1045                                "ARRAY           ",
1046                                sizeof(
1047                                vpdpage83data.type1.productid));
1048
1049                        /* Convert to ascii based serial number.
1050                         * The LSB is the the end.
1051                         */
1052                        for (i = 0; i < 8; i++) {
1053                                u8 temp =
1054                                        (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
1055                                if (temp  > 0x9) {
1056                                        vpdpage83data.type1.serialnumber[i] =
1057                                                        'A' + (temp - 0xA);
1058                                } else {
1059                                        vpdpage83data.type1.serialnumber[i] =
1060                                                        '0' + temp;
1061                                }
1062                        }
1063
1064                        /* VpdCodeSetBinary */
1065                        vpdpage83data.type2.codeset = 1;
1066                        /* VpdidentifiertypeEUI64 */
1067                        vpdpage83data.type2.identifiertype = 2;
1068                        vpdpage83data.type2.identifierlength =
1069                                sizeof(vpdpage83data.type2) - 4;
1070
1071                        vpdpage83data.type2.eu64id.venid[0] = 0xD0;
1072                        vpdpage83data.type2.eu64id.venid[1] = 0;
1073                        vpdpage83data.type2.eu64id.venid[2] = 0;
1074
1075                        vpdpage83data.type2.eu64id.Serial =
1076                                                        get_serial_reply->uid;
1077                        vpdpage83data.type2.eu64id.reserved = 0;
1078
1079                        /*
1080                         * VpdIdentifierTypeFCPHName
1081                         * VPD 0x83 Type 3 not supported for ARC
1082                         */
1083                        if (dev->sa_firmware) {
1084                                build_vpd83_type3(&vpdpage83data,
1085                                                dev, scsicmd);
1086                        }
1087
1088                        /* Move the inquiry data to the response buffer. */
1089                        scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
1090                                                 sizeof(vpdpage83data));
1091                } else {
1092                        /* It must be for VPD 0x80 */
1093                        char sp[13];
1094                        /* EVPD bit set */
1095                        sp[0] = INQD_PDT_DA;
1096                        sp[1] = scsicmd->cmnd[2];
1097                        sp[2] = 0;
1098                        sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
1099                                le32_to_cpu(get_serial_reply->uid));
1100                        scsi_sg_copy_from_buffer(scsicmd, sp,
1101                                                 sizeof(sp));
1102                }
1103        }
1104
1105        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1106
1107        aac_fib_complete(fibptr);
1108        scsicmd->scsi_done(scsicmd);
1109}
1110
1111/**
1112 *      aac_get_container_serial - get container serial, none blocking.
1113 */
1114static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
1115{
1116        int status;
1117        struct aac_get_serial *dinfo;
1118        struct fib * cmd_fibcontext;
1119        struct aac_dev * dev;
1120
1121        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1122
1123        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
1124
1125        aac_fib_init(cmd_fibcontext);
1126        dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
1127
1128        dinfo->command = cpu_to_le32(VM_ContainerConfig);
1129        dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
1130        dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
1131        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1132
1133        status = aac_fib_send(ContainerCommand,
1134                  cmd_fibcontext,
1135                  sizeof(struct aac_get_serial_resp),
1136                  FsaNormal,
1137                  0, 1,
1138                  (fib_callback) get_container_serial_callback,
1139                  (void *) scsicmd);
1140
1141        /*
1142         *      Check that the command queued to the controller
1143         */
1144        if (status == -EINPROGRESS)
1145                return 0;
1146
1147        printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
1148        aac_fib_complete(cmd_fibcontext);
1149        return -1;
1150}
1151
1152/* Function: setinqserial
1153 *
1154 * Arguments: [1] pointer to void [1] int
1155 *
1156 * Purpose: Sets SCSI Unit Serial number.
1157 *          This is a fake. We should read a proper
1158 *          serial number from the container. <SuSE>But
1159 *          without docs it's quite hard to do it :-)
1160 *          So this will have to do in the meantime.</SuSE>
1161 */
1162
1163static int setinqserial(struct aac_dev *dev, void *data, int cid)
1164{
1165        /*
1166         *      This breaks array migration.
1167         */
1168        return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
1169                        le32_to_cpu(dev->adapter_info.serial[0]), cid);
1170}
1171
1172static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
1173        u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
1174{
1175        u8 *sense_buf = (u8 *)sense_data;
1176        /* Sense data valid, err code 70h */
1177        sense_buf[0] = 0x70; /* No info field */
1178        sense_buf[1] = 0;       /* Segment number, always zero */
1179
1180        sense_buf[2] = sense_key;       /* Sense key */
1181
1182        sense_buf[12] = sense_code;     /* Additional sense code */
1183        sense_buf[13] = a_sense_code;   /* Additional sense code qualifier */
1184
1185        if (sense_key == ILLEGAL_REQUEST) {
1186                sense_buf[7] = 10;      /* Additional sense length */
1187
1188                sense_buf[15] = bit_pointer;
1189                /* Illegal parameter is in the parameter block */
1190                if (sense_code == SENCODE_INVALID_CDB_FIELD)
1191                        sense_buf[15] |= 0xc0;/* Std sense key specific field */
1192                /* Illegal parameter is in the CDB block */
1193                sense_buf[16] = field_pointer >> 8;     /* MSB */
1194                sense_buf[17] = field_pointer;          /* LSB */
1195        } else
1196                sense_buf[7] = 6;       /* Additional sense length */
1197}
1198
1199static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1200{
1201        if (lba & 0xffffffff00000000LL) {
1202                int cid = scmd_id(cmd);
1203                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
1204                cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1205                        SAM_STAT_CHECK_CONDITION;
1206                set_sense(&dev->fsa_dev[cid].sense_data,
1207                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1208                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1209                memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1210                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1211                             SCSI_SENSE_BUFFERSIZE));
1212                cmd->scsi_done(cmd);
1213                return 1;
1214        }
1215        return 0;
1216}
1217
1218static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1219{
1220        return 0;
1221}
1222
1223static void io_callback(void *context, struct fib * fibptr);
1224
1225static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1226{
1227        struct aac_dev *dev = fib->dev;
1228        u16 fibsize, command;
1229        long ret;
1230
1231        aac_fib_init(fib);
1232        if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1233                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1234                !dev->sync_mode) {
1235                struct aac_raw_io2 *readcmd2;
1236                readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
1237                memset(readcmd2, 0, sizeof(struct aac_raw_io2));
1238                readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1239                readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1240                readcmd2->byteCount = cpu_to_le32(count *
1241                        dev->fsa_dev[scmd_id(cmd)].block_size);
1242                readcmd2->cid = cpu_to_le16(scmd_id(cmd));
1243                readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
1244                ret = aac_build_sgraw2(cmd, readcmd2,
1245                                dev->scsi_host_ptr->sg_tablesize);
1246                if (ret < 0)
1247                        return ret;
1248                command = ContainerRawIo2;
1249                fibsize = sizeof(struct aac_raw_io2) +
1250                        ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1251        } else {
1252                struct aac_raw_io *readcmd;
1253                readcmd = (struct aac_raw_io *) fib_data(fib);
1254                readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1255                readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1256                readcmd->count = cpu_to_le32(count *
1257                        dev->fsa_dev[scmd_id(cmd)].block_size);
1258                readcmd->cid = cpu_to_le16(scmd_id(cmd));
1259                readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
1260                readcmd->bpTotal = 0;
1261                readcmd->bpComplete = 0;
1262                ret = aac_build_sgraw(cmd, &readcmd->sg);
1263                if (ret < 0)
1264                        return ret;
1265                command = ContainerRawIo;
1266                fibsize = sizeof(struct aac_raw_io) +
1267                        ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
1268        }
1269
1270        BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1271        /*
1272         *      Now send the Fib to the adapter
1273         */
1274        return aac_fib_send(command,
1275                          fib,
1276                          fibsize,
1277                          FsaNormal,
1278                          0, 1,
1279                          (fib_callback) io_callback,
1280                          (void *) cmd);
1281}
1282
1283static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1284{
1285        u16 fibsize;
1286        struct aac_read64 *readcmd;
1287        long ret;
1288
1289        aac_fib_init(fib);
1290        readcmd = (struct aac_read64 *) fib_data(fib);
1291        readcmd->command = cpu_to_le32(VM_CtHostRead64);
1292        readcmd->cid = cpu_to_le16(scmd_id(cmd));
1293        readcmd->sector_count = cpu_to_le16(count);
1294        readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1295        readcmd->pad   = 0;
1296        readcmd->flags = 0;
1297
1298        ret = aac_build_sg64(cmd, &readcmd->sg);
1299        if (ret < 0)
1300                return ret;
1301        fibsize = sizeof(struct aac_read64) +
1302                ((le32_to_cpu(readcmd->sg.count) - 1) *
1303                 sizeof (struct sgentry64));
1304        BUG_ON (fibsize > (fib->dev->max_fib_size -
1305                                sizeof(struct aac_fibhdr)));
1306        /*
1307         *      Now send the Fib to the adapter
1308         */
1309        return aac_fib_send(ContainerCommand64,
1310                          fib,
1311                          fibsize,
1312                          FsaNormal,
1313                          0, 1,
1314                          (fib_callback) io_callback,
1315                          (void *) cmd);
1316}
1317
1318static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1319{
1320        u16 fibsize;
1321        struct aac_read *readcmd;
1322        struct aac_dev *dev = fib->dev;
1323        long ret;
1324
1325        aac_fib_init(fib);
1326        readcmd = (struct aac_read *) fib_data(fib);
1327        readcmd->command = cpu_to_le32(VM_CtBlockRead);
1328        readcmd->cid = cpu_to_le32(scmd_id(cmd));
1329        readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1330        readcmd->count = cpu_to_le32(count *
1331                dev->fsa_dev[scmd_id(cmd)].block_size);
1332
1333        ret = aac_build_sg(cmd, &readcmd->sg);
1334        if (ret < 0)
1335                return ret;
1336        fibsize = sizeof(struct aac_read) +
1337                        ((le32_to_cpu(readcmd->sg.count) - 1) *
1338                         sizeof (struct sgentry));
1339        BUG_ON (fibsize > (fib->dev->max_fib_size -
1340                                sizeof(struct aac_fibhdr)));
1341        /*
1342         *      Now send the Fib to the adapter
1343         */
1344        return aac_fib_send(ContainerCommand,
1345                          fib,
1346                          fibsize,
1347                          FsaNormal,
1348                          0, 1,
1349                          (fib_callback) io_callback,
1350                          (void *) cmd);
1351}
1352
1353static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1354{
1355        struct aac_dev *dev = fib->dev;
1356        u16 fibsize, command;
1357        long ret;
1358
1359        aac_fib_init(fib);
1360        if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1361                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1362                !dev->sync_mode) {
1363                struct aac_raw_io2 *writecmd2;
1364                writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
1365                memset(writecmd2, 0, sizeof(struct aac_raw_io2));
1366                writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1367                writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1368                writecmd2->byteCount = cpu_to_le32(count *
1369                        dev->fsa_dev[scmd_id(cmd)].block_size);
1370                writecmd2->cid = cpu_to_le16(scmd_id(cmd));
1371                writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
1372                                                   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1373                        cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
1374                        cpu_to_le16(RIO2_IO_TYPE_WRITE);
1375                ret = aac_build_sgraw2(cmd, writecmd2,
1376                                dev->scsi_host_ptr->sg_tablesize);
1377                if (ret < 0)
1378                        return ret;
1379                command = ContainerRawIo2;
1380                fibsize = sizeof(struct aac_raw_io2) +
1381                        ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1382        } else {
1383                struct aac_raw_io *writecmd;
1384                writecmd = (struct aac_raw_io *) fib_data(fib);
1385                writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1386                writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1387                writecmd->count = cpu_to_le32(count *
1388                        dev->fsa_dev[scmd_id(cmd)].block_size);
1389                writecmd->cid = cpu_to_le16(scmd_id(cmd));
1390                writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
1391                                                   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1392                        cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
1393                        cpu_to_le16(RIO_TYPE_WRITE);
1394                writecmd->bpTotal = 0;
1395                writecmd->bpComplete = 0;
1396                ret = aac_build_sgraw(cmd, &writecmd->sg);
1397                if (ret < 0)
1398                        return ret;
1399                command = ContainerRawIo;
1400                fibsize = sizeof(struct aac_raw_io) +
1401                        ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
1402        }
1403
1404        BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1405        /*
1406         *      Now send the Fib to the adapter
1407         */
1408        return aac_fib_send(command,
1409                          fib,
1410                          fibsize,
1411                          FsaNormal,
1412                          0, 1,
1413                          (fib_callback) io_callback,
1414                          (void *) cmd);
1415}
1416
1417static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1418{
1419        u16 fibsize;
1420        struct aac_write64 *writecmd;
1421        long ret;
1422
1423        aac_fib_init(fib);
1424        writecmd = (struct aac_write64 *) fib_data(fib);
1425        writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1426        writecmd->cid = cpu_to_le16(scmd_id(cmd));
1427        writecmd->sector_count = cpu_to_le16(count);
1428        writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1429        writecmd->pad   = 0;
1430        writecmd->flags = 0;
1431
1432        ret = aac_build_sg64(cmd, &writecmd->sg);
1433        if (ret < 0)
1434                return ret;
1435        fibsize = sizeof(struct aac_write64) +
1436                ((le32_to_cpu(writecmd->sg.count) - 1) *
1437                 sizeof (struct sgentry64));
1438        BUG_ON (fibsize > (fib->dev->max_fib_size -
1439                                sizeof(struct aac_fibhdr)));
1440        /*
1441         *      Now send the Fib to the adapter
1442         */
1443        return aac_fib_send(ContainerCommand64,
1444                          fib,
1445                          fibsize,
1446                          FsaNormal,
1447                          0, 1,
1448                          (fib_callback) io_callback,
1449                          (void *) cmd);
1450}
1451
1452static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1453{
1454        u16 fibsize;
1455        struct aac_write *writecmd;
1456        struct aac_dev *dev = fib->dev;
1457        long ret;
1458
1459        aac_fib_init(fib);
1460        writecmd = (struct aac_write *) fib_data(fib);
1461        writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1462        writecmd->cid = cpu_to_le32(scmd_id(cmd));
1463        writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1464        writecmd->count = cpu_to_le32(count *
1465                dev->fsa_dev[scmd_id(cmd)].block_size);
1466        writecmd->sg.count = cpu_to_le32(1);
1467        /* ->stable is not used - it did mean which type of write */
1468
1469        ret = aac_build_sg(cmd, &writecmd->sg);
1470        if (ret < 0)
1471                return ret;
1472        fibsize = sizeof(struct aac_write) +
1473                ((le32_to_cpu(writecmd->sg.count) - 1) *
1474                 sizeof (struct sgentry));
1475        BUG_ON (fibsize > (fib->dev->max_fib_size -
1476                                sizeof(struct aac_fibhdr)));
1477        /*
1478         *      Now send the Fib to the adapter
1479         */
1480        return aac_fib_send(ContainerCommand,
1481                          fib,
1482                          fibsize,
1483                          FsaNormal,
1484                          0, 1,
1485                          (fib_callback) io_callback,
1486                          (void *) cmd);
1487}
1488
1489static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
1490{
1491        struct aac_srb * srbcmd;
1492        u32 flag;
1493        u32 timeout;
1494
1495        aac_fib_init(fib);
1496        switch(cmd->sc_data_direction){
1497        case DMA_TO_DEVICE:
1498                flag = SRB_DataOut;
1499                break;
1500        case DMA_BIDIRECTIONAL:
1501                flag = SRB_DataIn | SRB_DataOut;
1502                break;
1503        case DMA_FROM_DEVICE:
1504                flag = SRB_DataIn;
1505                break;
1506        case DMA_NONE:
1507        default:        /* shuts up some versions of gcc */
1508                flag = SRB_NoDataXfer;
1509                break;
1510        }
1511
1512        srbcmd = (struct aac_srb*) fib_data(fib);
1513        srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1514        srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
1515        srbcmd->id       = cpu_to_le32(scmd_id(cmd));
1516        srbcmd->lun      = cpu_to_le32(cmd->device->lun);
1517        srbcmd->flags    = cpu_to_le32(flag);
1518        timeout = cmd->request->timeout/HZ;
1519        if (timeout == 0)
1520                timeout = 1;
1521        srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
1522        srbcmd->retry_limit = 0; /* Obsolete parameter */
1523        srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
1524        return srbcmd;
1525}
1526
1527static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
1528                                                        struct scsi_cmnd *cmd)
1529{
1530        struct aac_hba_cmd_req *hbacmd;
1531        struct aac_dev *dev;
1532        int bus, target;
1533        u64 address;
1534
1535        dev = (struct aac_dev *)cmd->device->host->hostdata;
1536
1537        hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
1538        memset(hbacmd, 0, 96);  /* sizeof(*hbacmd) is not necessary */
1539        /* iu_type is a parameter of aac_hba_send */
1540        switch (cmd->sc_data_direction) {
1541        case DMA_TO_DEVICE:
1542                hbacmd->byte1 = 2;
1543                break;
1544        case DMA_FROM_DEVICE:
1545        case DMA_BIDIRECTIONAL:
1546                hbacmd->byte1 = 1;
1547                break;
1548        case DMA_NONE:
1549        default:
1550                break;
1551        }
1552        hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
1553
1554        bus = aac_logical_to_phys(scmd_channel(cmd));
1555        target = scmd_id(cmd);
1556        hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
1557
1558        /* we fill in reply_qid later in aac_src_deliver_message */
1559        /* we fill in iu_type, request_id later in aac_hba_send */
1560        /* we fill in emb_data_desc_count later in aac_build_sghba */
1561
1562        memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
1563        hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
1564
1565        address = (u64)fib->hw_error_pa;
1566        hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
1567        hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
1568        hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
1569
1570        return hbacmd;
1571}
1572
1573static void aac_srb_callback(void *context, struct fib * fibptr);
1574
1575static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
1576{
1577        u16 fibsize;
1578        struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1579        long ret;
1580
1581        ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
1582        if (ret < 0)
1583                return ret;
1584        srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1585
1586        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1587        memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1588        /*
1589         *      Build Scatter/Gather list
1590         */
1591        fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
1592                ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
1593                 sizeof (struct sgentry64));
1594        BUG_ON (fibsize > (fib->dev->max_fib_size -
1595                                sizeof(struct aac_fibhdr)));
1596
1597        /*
1598         *      Now send the Fib to the adapter
1599         */
1600        return aac_fib_send(ScsiPortCommand64, fib,
1601                                fibsize, FsaNormal, 0, 1,
1602                                  (fib_callback) aac_srb_callback,
1603                                  (void *) cmd);
1604}
1605
1606static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
1607{
1608        u16 fibsize;
1609        struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1610        long ret;
1611
1612        ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
1613        if (ret < 0)
1614                return ret;
1615        srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1616
1617        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1618        memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1619        /*
1620         *      Build Scatter/Gather list
1621         */
1622        fibsize = sizeof (struct aac_srb) +
1623                (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
1624                 sizeof (struct sgentry));
1625        BUG_ON (fibsize > (fib->dev->max_fib_size -
1626                                sizeof(struct aac_fibhdr)));
1627
1628        /*
1629         *      Now send the Fib to the adapter
1630         */
1631        return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
1632                                  (fib_callback) aac_srb_callback, (void *) cmd);
1633}
1634
1635static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
1636{
1637        if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
1638            (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
1639                return FAILED;
1640        return aac_scsi_32(fib, cmd);
1641}
1642
1643static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
1644{
1645        struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
1646        struct aac_dev *dev;
1647        long ret;
1648
1649        dev = (struct aac_dev *)cmd->device->host->hostdata;
1650
1651        ret = aac_build_sghba(cmd, hbacmd,
1652                dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
1653        if (ret < 0)
1654                return ret;
1655
1656        /*
1657         *      Now send the HBA command to the adapter
1658         */
1659        fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
1660                sizeof(struct aac_hba_sgl);
1661
1662        return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
1663                                  (fib_callback) aac_hba_callback,
1664                                  (void *) cmd);
1665}
1666
1667static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
1668        struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
1669{
1670        struct fib      *fibptr;
1671        dma_addr_t      addr;
1672        int             rcode;
1673        int             fibsize;
1674        struct aac_srb  *srb;
1675        struct aac_srb_reply *srb_reply;
1676        struct sgmap64  *sg64;
1677        u32 vbus;
1678        u32 vid;
1679
1680        if (!dev->sa_firmware)
1681                return 0;
1682
1683        /* allocate FIB */
1684        fibptr = aac_fib_alloc(dev);
1685        if (!fibptr)
1686                return -ENOMEM;
1687
1688        aac_fib_init(fibptr);
1689        fibptr->hw_fib_va->header.XferState &=
1690                ~cpu_to_le32(FastResponseCapable);
1691
1692        fibsize  = sizeof(struct aac_srb) - sizeof(struct sgentry) +
1693                                                sizeof(struct sgentry64);
1694
1695        /* allocate DMA buffer for response */
1696        addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
1697                                                        DMA_BIDIRECTIONAL);
1698        if (dma_mapping_error(&dev->pdev->dev, addr)) {
1699                rcode = -ENOMEM;
1700                goto fib_error;
1701        }
1702
1703        srb = fib_data(fibptr);
1704        memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
1705
1706        vbus = (u32)le16_to_cpu(
1707                        dev->supplement_adapter_info.virt_device_bus);
1708        vid  = (u32)le16_to_cpu(
1709                        dev->supplement_adapter_info.virt_device_target);
1710
1711        /* set the common request fields */
1712        srb->channel            = cpu_to_le32(vbus);
1713        srb->id                 = cpu_to_le32(vid);
1714        srb->lun                = 0;
1715        srb->function           = cpu_to_le32(SRBF_ExecuteScsi);
1716        srb->timeout            = 0;
1717        srb->retry_limit        = 0;
1718        srb->cdb_size           = cpu_to_le32(16);
1719        srb->count              = cpu_to_le32(xfer_len);
1720
1721        sg64 = (struct sgmap64 *)&srb->sg;
1722        sg64->count             = cpu_to_le32(1);
1723        sg64->sg[0].addr[1]     = cpu_to_le32(upper_32_bits(addr));
1724        sg64->sg[0].addr[0]     = cpu_to_le32(lower_32_bits(addr));
1725        sg64->sg[0].count       = cpu_to_le32(xfer_len);
1726
1727        /*
1728         * Copy the updated data for other dumping or other usage if needed
1729         */
1730        memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
1731
1732        /* issue request to the controller */
1733        rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
1734                                        1, 1, NULL, NULL);
1735
1736        if (rcode == -ERESTARTSYS)
1737                rcode = -ERESTART;
1738
1739        if (unlikely(rcode < 0))
1740                goto bmic_error;
1741
1742        srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
1743        memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
1744
1745bmic_error:
1746        dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
1747fib_error:
1748        aac_fib_complete(fibptr);
1749        aac_fib_free(fibptr);
1750        return rcode;
1751}
1752
1753static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
1754{
1755
1756        struct aac_ciss_identify_pd *identify_resp;
1757
1758        if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
1759                return;
1760
1761        identify_resp = dev->hba_map[bus][target].safw_identify_resp;
1762        if (identify_resp == NULL) {
1763                dev->hba_map[bus][target].qd_limit = 32;
1764                return;
1765        }
1766
1767        if (identify_resp->current_queue_depth_limit <= 0 ||
1768                identify_resp->current_queue_depth_limit > 255)
1769                dev->hba_map[bus][target].qd_limit = 32;
1770        else
1771                dev->hba_map[bus][target].qd_limit =
1772                        identify_resp->current_queue_depth_limit;
1773}
1774
1775static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
1776        struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
1777{
1778        int rcode = -ENOMEM;
1779        int datasize;
1780        struct aac_srb_unit srbu;
1781        struct aac_srb *srbcmd;
1782        struct aac_ciss_identify_pd *identify_reply;
1783
1784        datasize = sizeof(struct aac_ciss_identify_pd);
1785        identify_reply = kmalloc(datasize, GFP_KERNEL);
1786        if (!identify_reply)
1787                goto out;
1788
1789        memset(&srbu, 0, sizeof(struct aac_srb_unit));
1790
1791        srbcmd = &srbu.srb;
1792        srbcmd->flags   = cpu_to_le32(SRB_DataIn);
1793        srbcmd->cdb[0]  = 0x26;
1794        srbcmd->cdb[2]  = (u8)((AAC_MAX_LUN + target) & 0x00FF);
1795        srbcmd->cdb[6]  = CISS_IDENTIFY_PHYSICAL_DEVICE;
1796
1797        rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
1798        if (unlikely(rcode < 0))
1799                goto mem_free_all;
1800
1801        *identify_resp = identify_reply;
1802
1803out:
1804        return rcode;
1805mem_free_all:
1806        kfree(identify_reply);
1807        goto out;
1808}
1809
1810static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
1811{
1812        kfree(dev->safw_phys_luns);
1813        dev->safw_phys_luns = NULL;
1814}
1815
1816/**
1817 *      aac_get_safw_ciss_luns()        Process topology change
1818 *      @dev:           aac_dev structure
1819 *
1820 *      Execute a CISS REPORT PHYS LUNS and process the results into
1821 *      the current hba_map.
1822 */
1823static int aac_get_safw_ciss_luns(struct aac_dev *dev)
1824{
1825        int rcode = -ENOMEM;
1826        int datasize;
1827        struct aac_srb *srbcmd;
1828        struct aac_srb_unit srbu;
1829        struct aac_ciss_phys_luns_resp *phys_luns;
1830
1831        datasize = sizeof(struct aac_ciss_phys_luns_resp) +
1832                (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
1833        phys_luns = kmalloc(datasize, GFP_KERNEL);
1834        if (phys_luns == NULL)
1835                goto out;
1836
1837        memset(&srbu, 0, sizeof(struct aac_srb_unit));
1838
1839        srbcmd = &srbu.srb;
1840        srbcmd->flags   = cpu_to_le32(SRB_DataIn);
1841        srbcmd->cdb[0]  = CISS_REPORT_PHYSICAL_LUNS;
1842        srbcmd->cdb[1]  = 2; /* extended reporting */
1843        srbcmd->cdb[8]  = (u8)(datasize >> 8);
1844        srbcmd->cdb[9]  = (u8)(datasize);
1845
1846        rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
1847        if (unlikely(rcode < 0))
1848                goto mem_free_all;
1849
1850        if (phys_luns->resp_flag != 2) {
1851                rcode = -ENOMSG;
1852                goto mem_free_all;
1853        }
1854
1855        dev->safw_phys_luns = phys_luns;
1856
1857out:
1858        return rcode;
1859mem_free_all:
1860        kfree(phys_luns);
1861        goto out;
1862}
1863
1864static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
1865{
1866        return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
1867}
1868
1869static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
1870{
1871        return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
1872}
1873
1874static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
1875{
1876        return dev->safw_phys_luns->lun[lun].level2[0];
1877}
1878
1879static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
1880{
1881        return dev->safw_phys_luns->lun[lun].bus >> 6;
1882}
1883
1884static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
1885{
1886        return dev->safw_phys_luns->lun[lun].node_ident[9];
1887}
1888
1889static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
1890{
1891        return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
1892}
1893
1894static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
1895{
1896        return dev->safw_phys_luns->lun[lun].node_ident[8];
1897}
1898
1899static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
1900                                                int bus, int target)
1901{
1902        kfree(dev->hba_map[bus][target].safw_identify_resp);
1903        dev->hba_map[bus][target].safw_identify_resp = NULL;
1904}
1905
1906static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
1907        int lun_count)
1908{
1909        int luns;
1910        int i;
1911        u32 bus;
1912        u32 target;
1913
1914        luns = aac_get_safw_phys_lun_count(dev);
1915
1916        if (luns < lun_count)
1917                lun_count = luns;
1918        else if (lun_count < 0)
1919                lun_count = luns;
1920
1921        for (i = 0; i < lun_count; i++) {
1922                bus = aac_get_safw_phys_bus(dev, i);
1923                target = aac_get_safw_phys_target(dev, i);
1924
1925                aac_free_safw_identify_resp(dev, bus, target);
1926        }
1927}
1928
1929static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
1930{
1931        int i;
1932        int rcode = 0;
1933        u32 lun_count;
1934        u32 bus;
1935        u32 target;
1936        struct aac_ciss_identify_pd *identify_resp = NULL;
1937
1938        lun_count = aac_get_safw_phys_lun_count(dev);
1939
1940        for (i = 0; i < lun_count; ++i) {
1941
1942                bus = aac_get_safw_phys_bus(dev, i);
1943                target = aac_get_safw_phys_target(dev, i);
1944
1945                rcode = aac_issue_safw_bmic_identify(dev,
1946                                                &identify_resp, bus, target);
1947
1948                if (unlikely(rcode < 0))
1949                        goto free_identify_resp;
1950
1951                dev->hba_map[bus][target].safw_identify_resp = identify_resp;
1952        }
1953
1954out:
1955        return rcode;
1956free_identify_resp:
1957        aac_free_safw_all_identify_resp(dev, i);
1958        goto out;
1959}
1960
1961/**
1962 *      aac_set_safw_attr_all_targets-  update current hba map with data from FW
1963 *      @dev:   aac_dev structure
1964 *      @phys_luns: FW information from report phys luns
1965 *      @rescan: Indicates scan type
1966 *
1967 *      Update our hba map with the information gathered from the FW
1968 */
1969static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1970{
1971        /* ok and extended reporting */
1972        u32 lun_count, nexus;
1973        u32 i, bus, target;
1974        u8 expose_flag, attribs;
1975
1976        lun_count = aac_get_safw_phys_lun_count(dev);
1977
1978        dev->scan_counter++;
1979
1980        for (i = 0; i < lun_count; ++i) {
1981
1982                bus = aac_get_safw_phys_bus(dev, i);
1983                target = aac_get_safw_phys_target(dev, i);
1984                expose_flag = aac_get_safw_phys_expose_flag(dev, i);
1985                attribs = aac_get_safw_phys_attribs(dev, i);
1986                nexus = aac_get_safw_phys_nexus(dev, i);
1987
1988                if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
1989                        continue;
1990
1991                if (expose_flag != 0) {
1992                        dev->hba_map[bus][target].devtype =
1993                                AAC_DEVTYPE_RAID_MEMBER;
1994                        continue;
1995                }
1996
1997                if (nexus != 0 && (attribs & 8)) {
1998                        dev->hba_map[bus][target].devtype =
1999                                AAC_DEVTYPE_NATIVE_RAW;
2000                        dev->hba_map[bus][target].rmw_nexus =
2001                                        nexus;
2002                } else
2003                        dev->hba_map[bus][target].devtype =
2004                                AAC_DEVTYPE_ARC_RAW;
2005
2006                dev->hba_map[bus][target].scan_counter = dev->scan_counter;
2007
2008                aac_set_safw_target_qd(dev, bus, target);
2009        }
2010}
2011
2012static int aac_setup_safw_targets(struct aac_dev *dev)
2013{
2014        int rcode = 0;
2015
2016        rcode = aac_get_containers(dev);
2017        if (unlikely(rcode < 0))
2018                goto out;
2019
2020        rcode = aac_get_safw_ciss_luns(dev);
2021        if (unlikely(rcode < 0))
2022                goto out;
2023
2024        rcode = aac_get_safw_attr_all_targets(dev);
2025        if (unlikely(rcode < 0))
2026                goto free_ciss_luns;
2027
2028        aac_set_safw_attr_all_targets(dev);
2029
2030        aac_free_safw_all_identify_resp(dev, -1);
2031free_ciss_luns:
2032        aac_free_safw_ciss_luns(dev);
2033out:
2034        return rcode;
2035}
2036
2037int aac_setup_safw_adapter(struct aac_dev *dev)
2038{
2039        return aac_setup_safw_targets(dev);
2040}
2041
2042int aac_get_adapter_info(struct aac_dev* dev)
2043{
2044        struct fib* fibptr;
2045        int rcode;
2046        u32 tmp, bus, target;
2047        struct aac_adapter_info *info;
2048        struct aac_bus_info *command;
2049        struct aac_bus_info_response *bus_info;
2050
2051        if (!(fibptr = aac_fib_alloc(dev)))
2052                return -ENOMEM;
2053
2054        aac_fib_init(fibptr);
2055        info = (struct aac_adapter_info *) fib_data(fibptr);
2056        memset(info,0,sizeof(*info));
2057
2058        rcode = aac_fib_send(RequestAdapterInfo,
2059                         fibptr,
2060                         sizeof(*info),
2061                         FsaNormal,
2062                         -1, 1, /* First `interrupt' command uses special wait */
2063                         NULL,
2064                         NULL);
2065
2066        if (rcode < 0) {
2067                /* FIB should be freed only after
2068                 * getting the response from the F/W */
2069                if (rcode != -ERESTARTSYS) {
2070                        aac_fib_complete(fibptr);
2071                        aac_fib_free(fibptr);
2072                }
2073                return rcode;
2074        }
2075        memcpy(&dev->adapter_info, info, sizeof(*info));
2076
2077        dev->supplement_adapter_info.virt_device_bus = 0xffff;
2078        if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
2079                struct aac_supplement_adapter_info * sinfo;
2080
2081                aac_fib_init(fibptr);
2082
2083                sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
2084
2085                memset(sinfo,0,sizeof(*sinfo));
2086
2087                rcode = aac_fib_send(RequestSupplementAdapterInfo,
2088                                 fibptr,
2089                                 sizeof(*sinfo),
2090                                 FsaNormal,
2091                                 1, 1,
2092                                 NULL,
2093                                 NULL);
2094
2095                if (rcode >= 0)
2096                        memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
2097                if (rcode == -ERESTARTSYS) {
2098                        fibptr = aac_fib_alloc(dev);
2099                        if (!fibptr)
2100                                return -ENOMEM;
2101                }
2102
2103        }
2104
2105        /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
2106        for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
2107                for (target = 0; target < AAC_MAX_TARGETS; target++) {
2108                        dev->hba_map[bus][target].devtype = 0;
2109                        dev->hba_map[bus][target].qd_limit = 0;
2110                }
2111        }
2112
2113        /*
2114         * GetBusInfo
2115         */
2116
2117        aac_fib_init(fibptr);
2118
2119        bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
2120
2121        memset(bus_info, 0, sizeof(*bus_info));
2122
2123        command = (struct aac_bus_info *)bus_info;
2124
2125        command->Command = cpu_to_le32(VM_Ioctl);
2126        command->ObjType = cpu_to_le32(FT_DRIVE);
2127        command->MethodId = cpu_to_le32(1);
2128        command->CtlCmd = cpu_to_le32(GetBusInfo);
2129
2130        rcode = aac_fib_send(ContainerCommand,
2131                         fibptr,
2132                         sizeof (*bus_info),
2133                         FsaNormal,
2134                         1, 1,
2135                         NULL, NULL);
2136
2137        /* reasoned default */
2138        dev->maximum_num_physicals = 16;
2139        if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
2140                dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
2141                dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
2142        }
2143
2144        if (!dev->in_reset) {
2145                char buffer[16];
2146                tmp = le32_to_cpu(dev->adapter_info.kernelrev);
2147                printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
2148                        dev->name,
2149                        dev->id,
2150                        tmp>>24,
2151                        (tmp>>16)&0xff,
2152                        tmp&0xff,
2153                        le32_to_cpu(dev->adapter_info.kernelbuild),
2154                        (int)sizeof(dev->supplement_adapter_info.build_date),
2155                        dev->supplement_adapter_info.build_date);
2156                tmp = le32_to_cpu(dev->adapter_info.monitorrev);
2157                printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
2158                        dev->name, dev->id,
2159                        tmp>>24,(tmp>>16)&0xff,tmp&0xff,
2160                        le32_to_cpu(dev->adapter_info.monitorbuild));
2161                tmp = le32_to_cpu(dev->adapter_info.biosrev);
2162                printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
2163                        dev->name, dev->id,
2164                        tmp>>24,(tmp>>16)&0xff,tmp&0xff,
2165                        le32_to_cpu(dev->adapter_info.biosbuild));
2166                buffer[0] = '\0';
2167                if (aac_get_serial_number(
2168                  shost_to_class(dev->scsi_host_ptr), buffer))
2169                        printk(KERN_INFO "%s%d: serial %s",
2170                          dev->name, dev->id, buffer);
2171                if (dev->supplement_adapter_info.vpd_info.tsid[0]) {
2172                        printk(KERN_INFO "%s%d: TSID %.*s\n",
2173                          dev->name, dev->id,
2174                          (int)sizeof(dev->supplement_adapter_info
2175                                                        .vpd_info.tsid),
2176                                dev->supplement_adapter_info.vpd_info.tsid);
2177                }
2178                if (!aac_check_reset || ((aac_check_reset == 1) &&
2179                  (dev->supplement_adapter_info.supported_options2 &
2180                  AAC_OPTION_IGNORE_RESET))) {
2181                        printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
2182                          dev->name, dev->id);
2183                }
2184        }
2185
2186        dev->cache_protected = 0;
2187        dev->jbod = ((dev->supplement_adapter_info.feature_bits &
2188                AAC_FEATURE_JBOD) != 0);
2189        dev->nondasd_support = 0;
2190        dev->raid_scsi_mode = 0;
2191        if(dev->adapter_info.options & AAC_OPT_NONDASD)
2192                dev->nondasd_support = 1;
2193
2194        /*
2195         * If the firmware supports ROMB RAID/SCSI mode and we are currently
2196         * in RAID/SCSI mode, set the flag. For now if in this mode we will
2197         * force nondasd support on. If we decide to allow the non-dasd flag
2198         * additional changes changes will have to be made to support
2199         * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
2200         * changed to support the new dev->raid_scsi_mode flag instead of
2201         * leaching off of the dev->nondasd_support flag. Also in linit.c the
2202         * function aac_detect will have to be modified where it sets up the
2203         * max number of channels based on the aac->nondasd_support flag only.
2204         */
2205        if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
2206            (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
2207                dev->nondasd_support = 1;
2208                dev->raid_scsi_mode = 1;
2209        }
2210        if (dev->raid_scsi_mode != 0)
2211                printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
2212                                dev->name, dev->id);
2213
2214        if (nondasd != -1)
2215                dev->nondasd_support = (nondasd!=0);
2216        if (dev->nondasd_support && !dev->in_reset)
2217                printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
2218
2219        if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
2220                dev->needs_dac = 1;
2221        dev->dac_support = 0;
2222        if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
2223            (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
2224                if (!dev->in_reset)
2225                        printk(KERN_INFO "%s%d: 64bit support enabled.\n",
2226                                dev->name, dev->id);
2227                dev->dac_support = 1;
2228        }
2229
2230        if(dacmode != -1) {
2231                dev->dac_support = (dacmode!=0);
2232        }
2233
2234        /* avoid problems with AAC_QUIRK_SCSI_32 controllers */
2235        if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
2236                & AAC_QUIRK_SCSI_32)) {
2237                dev->nondasd_support = 0;
2238                dev->jbod = 0;
2239                expose_physicals = 0;
2240        }
2241
2242        if (dev->dac_support) {
2243                if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
2244                        if (!dev->in_reset)
2245                                dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
2246                } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
2247                        dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
2248                        dev->dac_support = 0;
2249                } else {
2250                        dev_info(&dev->pdev->dev, "No suitable DMA available\n");
2251                        rcode = -ENOMEM;
2252                }
2253        }
2254        /*
2255         * Deal with configuring for the individualized limits of each packet
2256         * interface.
2257         */
2258        dev->a_ops.adapter_scsi = (dev->dac_support)
2259          ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
2260                                ? aac_scsi_32_64
2261                                : aac_scsi_64)
2262                                : aac_scsi_32;
2263        if (dev->raw_io_interface) {
2264                dev->a_ops.adapter_bounds = (dev->raw_io_64)
2265                                        ? aac_bounds_64
2266                                        : aac_bounds_32;
2267                dev->a_ops.adapter_read = aac_read_raw_io;
2268                dev->a_ops.adapter_write = aac_write_raw_io;
2269        } else {
2270                dev->a_ops.adapter_bounds = aac_bounds_32;
2271                dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
2272                        sizeof(struct aac_fibhdr) -
2273                        sizeof(struct aac_write) + sizeof(struct sgentry)) /
2274                                sizeof(struct sgentry);
2275                if (dev->dac_support) {
2276                        dev->a_ops.adapter_read = aac_read_block64;
2277                        dev->a_ops.adapter_write = aac_write_block64;
2278                        /*
2279                         * 38 scatter gather elements
2280                         */
2281                        dev->scsi_host_ptr->sg_tablesize =
2282                                (dev->max_fib_size -
2283                                sizeof(struct aac_fibhdr) -
2284                                sizeof(struct aac_write64) +
2285                                sizeof(struct sgentry64)) /
2286                                        sizeof(struct sgentry64);
2287                } else {
2288                        dev->a_ops.adapter_read = aac_read_block;
2289                        dev->a_ops.adapter_write = aac_write_block;
2290                }
2291                dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
2292                if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
2293                        /*
2294                         * Worst case size that could cause sg overflow when
2295                         * we break up SG elements that are larger than 64KB.
2296                         * Would be nice if we could tell the SCSI layer what
2297                         * the maximum SG element size can be. Worst case is
2298                         * (sg_tablesize-1) 4KB elements with one 64KB
2299                         * element.
2300                         *      32bit -> 468 or 238KB   64bit -> 424 or 212KB
2301                         */
2302                        dev->scsi_host_ptr->max_sectors =
2303                          (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
2304                }
2305        }
2306        if (!dev->sync_mode && dev->sa_firmware &&
2307                dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
2308                dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
2309                        HBA_MAX_SG_SEPARATE;
2310
2311        /* FIB should be freed only after getting the response from the F/W */
2312        if (rcode != -ERESTARTSYS) {
2313                aac_fib_complete(fibptr);
2314                aac_fib_free(fibptr);
2315        }
2316
2317        return rcode;
2318}
2319
2320
2321static void io_callback(void *context, struct fib * fibptr)
2322{
2323        struct aac_dev *dev;
2324        struct aac_read_reply *readreply;
2325        struct scsi_cmnd *scsicmd;
2326        u32 cid;
2327
2328        scsicmd = (struct scsi_cmnd *) context;
2329
2330        if (!aac_valid_context(scsicmd, fibptr))
2331                return;
2332
2333        dev = fibptr->dev;
2334        cid = scmd_id(scsicmd);
2335
2336        if (nblank(dprintk(x))) {
2337                u64 lba;
2338                switch (scsicmd->cmnd[0]) {
2339                case WRITE_6:
2340                case READ_6:
2341                        lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2342                            (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2343                        break;
2344                case WRITE_16:
2345                case READ_16:
2346                        lba = ((u64)scsicmd->cmnd[2] << 56) |
2347                              ((u64)scsicmd->cmnd[3] << 48) |
2348                              ((u64)scsicmd->cmnd[4] << 40) |
2349                              ((u64)scsicmd->cmnd[5] << 32) |
2350                              ((u64)scsicmd->cmnd[6] << 24) |
2351                              (scsicmd->cmnd[7] << 16) |
2352                              (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2353                        break;
2354                case WRITE_12:
2355                case READ_12:
2356                        lba = ((u64)scsicmd->cmnd[2] << 24) |
2357                              (scsicmd->cmnd[3] << 16) |
2358                              (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2359                        break;
2360                default:
2361                        lba = ((u64)scsicmd->cmnd[2] << 24) |
2362                               (scsicmd->cmnd[3] << 16) |
2363                               (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2364                        break;
2365                }
2366                printk(KERN_DEBUG
2367                  "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
2368                  smp_processor_id(), (unsigned long long)lba, jiffies);
2369        }
2370
2371        BUG_ON(fibptr == NULL);
2372
2373        scsi_dma_unmap(scsicmd);
2374
2375        readreply = (struct aac_read_reply *)fib_data(fibptr);
2376        switch (le32_to_cpu(readreply->status)) {
2377        case ST_OK:
2378                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2379                        SAM_STAT_GOOD;
2380                dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
2381                break;
2382        case ST_NOT_READY:
2383                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2384                        SAM_STAT_CHECK_CONDITION;
2385                set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
2386                  SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
2387                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2388                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2389                             SCSI_SENSE_BUFFERSIZE));
2390                break;
2391        case ST_MEDERR:
2392                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2393                        SAM_STAT_CHECK_CONDITION;
2394                set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
2395                  SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
2396                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2397                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2398                             SCSI_SENSE_BUFFERSIZE));
2399                break;
2400        default:
2401#ifdef AAC_DETAILED_STATUS_INFO
2402                printk(KERN_WARNING "io_callback: io failed, status = %d\n",
2403                  le32_to_cpu(readreply->status));
2404#endif
2405                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2406                        SAM_STAT_CHECK_CONDITION;
2407                set_sense(&dev->fsa_dev[cid].sense_data,
2408                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2409                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2410                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2411                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2412                             SCSI_SENSE_BUFFERSIZE));
2413                break;
2414        }
2415        aac_fib_complete(fibptr);
2416
2417        scsicmd->scsi_done(scsicmd);
2418}
2419
2420static int aac_read(struct scsi_cmnd * scsicmd)
2421{
2422        u64 lba;
2423        u32 count;
2424        int status;
2425        struct aac_dev *dev;
2426        struct fib * cmd_fibcontext;
2427        int cid;
2428
2429        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2430        /*
2431         *      Get block address and transfer length
2432         */
2433        switch (scsicmd->cmnd[0]) {
2434        case READ_6:
2435                dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
2436
2437                lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2438                        (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2439                count = scsicmd->cmnd[4];
2440
2441                if (count == 0)
2442                        count = 256;
2443                break;
2444        case READ_16:
2445                dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
2446
2447                lba =   ((u64)scsicmd->cmnd[2] << 56) |
2448                        ((u64)scsicmd->cmnd[3] << 48) |
2449                        ((u64)scsicmd->cmnd[4] << 40) |
2450                        ((u64)scsicmd->cmnd[5] << 32) |
2451                        ((u64)scsicmd->cmnd[6] << 24) |
2452                        (scsicmd->cmnd[7] << 16) |
2453                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2454                count = (scsicmd->cmnd[10] << 24) |
2455                        (scsicmd->cmnd[11] << 16) |
2456                        (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2457                break;
2458        case READ_12:
2459                dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
2460
2461                lba = ((u64)scsicmd->cmnd[2] << 24) |
2462                        (scsicmd->cmnd[3] << 16) |
2463                        (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2464                count = (scsicmd->cmnd[6] << 24) |
2465                        (scsicmd->cmnd[7] << 16) |
2466                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2467                break;
2468        default:
2469                dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
2470
2471                lba = ((u64)scsicmd->cmnd[2] << 24) |
2472                        (scsicmd->cmnd[3] << 16) |
2473                        (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2474                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2475                break;
2476        }
2477
2478        if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2479                cid = scmd_id(scsicmd);
2480                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2481                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2482                        SAM_STAT_CHECK_CONDITION;
2483                set_sense(&dev->fsa_dev[cid].sense_data,
2484                          HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2485                          ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2486                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2487                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2488                             SCSI_SENSE_BUFFERSIZE));
2489                scsicmd->scsi_done(scsicmd);
2490                return 1;
2491        }
2492
2493        dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
2494          smp_processor_id(), (unsigned long long)lba, jiffies));
2495        if (aac_adapter_bounds(dev,scsicmd,lba))
2496                return 0;
2497        /*
2498         *      Alocate and initialize a Fib
2499         */
2500        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2501        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2502        status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
2503
2504        /*
2505         *      Check that the command queued to the controller
2506         */
2507        if (status == -EINPROGRESS)
2508                return 0;
2509
2510        printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
2511        /*
2512         *      For some reason, the Fib didn't queue, return QUEUE_FULL
2513         */
2514        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2515        scsicmd->scsi_done(scsicmd);
2516        aac_fib_complete(cmd_fibcontext);
2517        aac_fib_free(cmd_fibcontext);
2518        return 0;
2519}
2520
2521static int aac_write(struct scsi_cmnd * scsicmd)
2522{
2523        u64 lba;
2524        u32 count;
2525        int fua;
2526        int status;
2527        struct aac_dev *dev;
2528        struct fib * cmd_fibcontext;
2529        int cid;
2530
2531        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2532        /*
2533         *      Get block address and transfer length
2534         */
2535        if (scsicmd->cmnd[0] == WRITE_6)        /* 6 byte command */
2536        {
2537                lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2538                count = scsicmd->cmnd[4];
2539                if (count == 0)
2540                        count = 256;
2541                fua = 0;
2542        } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
2543                dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
2544
2545                lba =   ((u64)scsicmd->cmnd[2] << 56) |
2546                        ((u64)scsicmd->cmnd[3] << 48) |
2547                        ((u64)scsicmd->cmnd[4] << 40) |
2548                        ((u64)scsicmd->cmnd[5] << 32) |
2549                        ((u64)scsicmd->cmnd[6] << 24) |
2550                        (scsicmd->cmnd[7] << 16) |
2551                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2552                count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
2553                        (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2554                fua = scsicmd->cmnd[1] & 0x8;
2555        } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
2556                dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
2557
2558                lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
2559                    | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2560                count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
2561                      | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2562                fua = scsicmd->cmnd[1] & 0x8;
2563        } else {
2564                dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
2565                lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2566                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2567                fua = scsicmd->cmnd[1] & 0x8;
2568        }
2569
2570        if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2571                cid = scmd_id(scsicmd);
2572                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2573                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2574                        SAM_STAT_CHECK_CONDITION;
2575                set_sense(&dev->fsa_dev[cid].sense_data,
2576                          HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2577                          ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2578                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2579                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2580                             SCSI_SENSE_BUFFERSIZE));
2581                scsicmd->scsi_done(scsicmd);
2582                return 1;
2583        }
2584
2585        dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
2586          smp_processor_id(), (unsigned long long)lba, jiffies));
2587        if (aac_adapter_bounds(dev,scsicmd,lba))
2588                return 0;
2589        /*
2590         *      Allocate and initialize a Fib then setup a BlockWrite command
2591         */
2592        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2593        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2594        status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
2595
2596        /*
2597         *      Check that the command queued to the controller
2598         */
2599        if (status == -EINPROGRESS)
2600                return 0;
2601
2602        printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
2603        /*
2604         *      For some reason, the Fib didn't queue, return QUEUE_FULL
2605         */
2606        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2607        scsicmd->scsi_done(scsicmd);
2608
2609        aac_fib_complete(cmd_fibcontext);
2610        aac_fib_free(cmd_fibcontext);
2611        return 0;
2612}
2613
2614static void synchronize_callback(void *context, struct fib *fibptr)
2615{
2616        struct aac_synchronize_reply *synchronizereply;
2617        struct scsi_cmnd *cmd;
2618
2619        cmd = context;
2620
2621        if (!aac_valid_context(cmd, fibptr))
2622                return;
2623
2624        dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
2625                                smp_processor_id(), jiffies));
2626        BUG_ON(fibptr == NULL);
2627
2628
2629        synchronizereply = fib_data(fibptr);
2630        if (le32_to_cpu(synchronizereply->status) == CT_OK)
2631                cmd->result = DID_OK << 16 |
2632                        COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2633        else {
2634                struct scsi_device *sdev = cmd->device;
2635                struct aac_dev *dev = fibptr->dev;
2636                u32 cid = sdev_id(sdev);
2637                printk(KERN_WARNING
2638                     "synchronize_callback: synchronize failed, status = %d\n",
2639                     le32_to_cpu(synchronizereply->status));
2640                cmd->result = DID_OK << 16 |
2641                        COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2642                set_sense(&dev->fsa_dev[cid].sense_data,
2643                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2644                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2645                memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2646                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2647                             SCSI_SENSE_BUFFERSIZE));
2648        }
2649
2650        aac_fib_complete(fibptr);
2651        aac_fib_free(fibptr);
2652        cmd->scsi_done(cmd);
2653}
2654
2655static int aac_synchronize(struct scsi_cmnd *scsicmd)
2656{
2657        int status;
2658        struct fib *cmd_fibcontext;
2659        struct aac_synchronize *synchronizecmd;
2660        struct scsi_cmnd *cmd;
2661        struct scsi_device *sdev = scsicmd->device;
2662        int active = 0;
2663        struct aac_dev *aac;
2664        u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
2665                (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2666        u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2667        unsigned long flags;
2668
2669        /*
2670         * Wait for all outstanding queued commands to complete to this
2671         * specific target (block).
2672         */
2673        spin_lock_irqsave(&sdev->list_lock, flags);
2674        list_for_each_entry(cmd, &sdev->cmd_list, list)
2675                if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
2676                        u64 cmnd_lba;
2677                        u32 cmnd_count;
2678
2679                        if (cmd->cmnd[0] == WRITE_6) {
2680                                cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
2681                                        (cmd->cmnd[2] << 8) |
2682                                        cmd->cmnd[3];
2683                                cmnd_count = cmd->cmnd[4];
2684                                if (cmnd_count == 0)
2685                                        cmnd_count = 256;
2686                        } else if (cmd->cmnd[0] == WRITE_16) {
2687                                cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
2688                                        ((u64)cmd->cmnd[3] << 48) |
2689                                        ((u64)cmd->cmnd[4] << 40) |
2690                                        ((u64)cmd->cmnd[5] << 32) |
2691                                        ((u64)cmd->cmnd[6] << 24) |
2692                                        (cmd->cmnd[7] << 16) |
2693                                        (cmd->cmnd[8] << 8) |
2694                                        cmd->cmnd[9];
2695                                cmnd_count = (cmd->cmnd[10] << 24) |
2696                                        (cmd->cmnd[11] << 16) |
2697                                        (cmd->cmnd[12] << 8) |
2698                                        cmd->cmnd[13];
2699                        } else if (cmd->cmnd[0] == WRITE_12) {
2700                                cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
2701                                        (cmd->cmnd[3] << 16) |
2702                                        (cmd->cmnd[4] << 8) |
2703                                        cmd->cmnd[5];
2704                                cmnd_count = (cmd->cmnd[6] << 24) |
2705                                        (cmd->cmnd[7] << 16) |
2706                                        (cmd->cmnd[8] << 8) |
2707                                        cmd->cmnd[9];
2708                        } else if (cmd->cmnd[0] == WRITE_10) {
2709                                cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
2710                                        (cmd->cmnd[3] << 16) |
2711                                        (cmd->cmnd[4] << 8) |
2712                                        cmd->cmnd[5];
2713                                cmnd_count = (cmd->cmnd[7] << 8) |
2714                                        cmd->cmnd[8];
2715                        } else
2716                                continue;
2717                        if (((cmnd_lba + cmnd_count) < lba) ||
2718                          (count && ((lba + count) < cmnd_lba)))
2719                                continue;
2720                        ++active;
2721                        break;
2722                }
2723
2724        spin_unlock_irqrestore(&sdev->list_lock, flags);
2725
2726        /*
2727         *      Yield the processor (requeue for later)
2728         */
2729        if (active)
2730                return SCSI_MLQUEUE_DEVICE_BUSY;
2731
2732        aac = (struct aac_dev *)sdev->host->hostdata;
2733        if (aac->in_reset)
2734                return SCSI_MLQUEUE_HOST_BUSY;
2735
2736        /*
2737         *      Allocate and initialize a Fib
2738         */
2739        if (!(cmd_fibcontext = aac_fib_alloc(aac)))
2740                return SCSI_MLQUEUE_HOST_BUSY;
2741
2742        aac_fib_init(cmd_fibcontext);
2743
2744        synchronizecmd = fib_data(cmd_fibcontext);
2745        synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
2746        synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
2747        synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
2748        synchronizecmd->count =
2749             cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
2750        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2751
2752        /*
2753         *      Now send the Fib to the adapter
2754         */
2755        status = aac_fib_send(ContainerCommand,
2756                  cmd_fibcontext,
2757                  sizeof(struct aac_synchronize),
2758                  FsaNormal,
2759                  0, 1,
2760                  (fib_callback)synchronize_callback,
2761                  (void *)scsicmd);
2762
2763        /*
2764         *      Check that the command queued to the controller
2765         */
2766        if (status == -EINPROGRESS)
2767                return 0;
2768
2769        printk(KERN_WARNING
2770                "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
2771        aac_fib_complete(cmd_fibcontext);
2772        aac_fib_free(cmd_fibcontext);
2773        return SCSI_MLQUEUE_HOST_BUSY;
2774}
2775
2776static void aac_start_stop_callback(void *context, struct fib *fibptr)
2777{
2778        struct scsi_cmnd *scsicmd = context;
2779
2780        if (!aac_valid_context(scsicmd, fibptr))
2781                return;
2782
2783        BUG_ON(fibptr == NULL);
2784
2785        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2786
2787        aac_fib_complete(fibptr);
2788        aac_fib_free(fibptr);
2789        scsicmd->scsi_done(scsicmd);
2790}
2791
2792static int aac_start_stop(struct scsi_cmnd *scsicmd)
2793{
2794        int status;
2795        struct fib *cmd_fibcontext;
2796        struct aac_power_management *pmcmd;
2797        struct scsi_device *sdev = scsicmd->device;
2798        struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
2799
2800        if (!(aac->supplement_adapter_info.supported_options2 &
2801              AAC_OPTION_POWER_MANAGEMENT)) {
2802                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2803                                  SAM_STAT_GOOD;
2804                scsicmd->scsi_done(scsicmd);
2805                return 0;
2806        }
2807
2808        if (aac->in_reset)
2809                return SCSI_MLQUEUE_HOST_BUSY;
2810
2811        /*
2812         *      Allocate and initialize a Fib
2813         */
2814        cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
2815
2816        aac_fib_init(cmd_fibcontext);
2817
2818        pmcmd = fib_data(cmd_fibcontext);
2819        pmcmd->command = cpu_to_le32(VM_ContainerConfig);
2820        pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
2821        /* Eject bit ignored, not relevant */
2822        pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
2823                cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
2824        pmcmd->cid = cpu_to_le32(sdev_id(sdev));
2825        pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
2826                cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
2827        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2828
2829        /*
2830         *      Now send the Fib to the adapter
2831         */
2832        status = aac_fib_send(ContainerCommand,
2833                  cmd_fibcontext,
2834                  sizeof(struct aac_power_management),
2835                  FsaNormal,
2836                  0, 1,
2837                  (fib_callback)aac_start_stop_callback,
2838                  (void *)scsicmd);
2839
2840        /*
2841         *      Check that the command queued to the controller
2842         */
2843        if (status == -EINPROGRESS)
2844                return 0;
2845
2846        aac_fib_complete(cmd_fibcontext);
2847        aac_fib_free(cmd_fibcontext);
2848        return SCSI_MLQUEUE_HOST_BUSY;
2849}
2850
2851/**
2852 *      aac_scsi_cmd()          -       Process SCSI command
2853 *      @scsicmd:               SCSI command block
2854 *
2855 *      Emulate a SCSI command and queue the required request for the
2856 *      aacraid firmware.
2857 */
2858
2859int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2860{
2861        u32 cid, bus;
2862        struct Scsi_Host *host = scsicmd->device->host;
2863        struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2864        struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
2865
2866        if (fsa_dev_ptr == NULL)
2867                return -1;
2868        /*
2869         *      If the bus, id or lun is out of range, return fail
2870         *      Test does not apply to ID 16, the pseudo id for the controller
2871         *      itself.
2872         */
2873        cid = scmd_id(scsicmd);
2874        if (cid != host->this_id) {
2875                if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
2876                        if((cid >= dev->maximum_num_containers) ||
2877                                        (scsicmd->device->lun != 0)) {
2878                                scsicmd->result = DID_NO_CONNECT << 16;
2879                                goto scsi_done_ret;
2880                        }
2881
2882                        /*
2883                         *      If the target container doesn't exist, it may have
2884                         *      been newly created
2885                         */
2886                        if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
2887                          (fsa_dev_ptr[cid].sense_data.sense_key ==
2888                           NOT_READY)) {
2889                                switch (scsicmd->cmnd[0]) {
2890                                case SERVICE_ACTION_IN_16:
2891                                        if (!(dev->raw_io_interface) ||
2892                                            !(dev->raw_io_64) ||
2893                                            ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
2894                                                break;
2895                                case INQUIRY:
2896                                case READ_CAPACITY:
2897                                case TEST_UNIT_READY:
2898                                        if (dev->in_reset)
2899                                                return -1;
2900                                        return _aac_probe_container(scsicmd,
2901                                                        aac_probe_container_callback2);
2902                                default:
2903                                        break;
2904                                }
2905                        }
2906                } else {  /* check for physical non-dasd devices */
2907                        bus = aac_logical_to_phys(scmd_channel(scsicmd));
2908
2909                        if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2910                                dev->hba_map[bus][cid].devtype
2911                                        == AAC_DEVTYPE_NATIVE_RAW) {
2912                                if (dev->in_reset)
2913                                        return -1;
2914                                return aac_send_hba_fib(scsicmd);
2915                        } else if (dev->nondasd_support || expose_physicals ||
2916                                dev->jbod) {
2917                                if (dev->in_reset)
2918                                        return -1;
2919                                return aac_send_srb_fib(scsicmd);
2920                        } else {
2921                                scsicmd->result = DID_NO_CONNECT << 16;
2922                                goto scsi_done_ret;
2923                        }
2924                }
2925        }
2926        /*
2927         * else Command for the controller itself
2928         */
2929        else if ((scsicmd->cmnd[0] != INQUIRY) &&       /* only INQUIRY & TUR cmnd supported for controller */
2930                (scsicmd->cmnd[0] != TEST_UNIT_READY))
2931        {
2932                dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
2933                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2934                set_sense(&dev->fsa_dev[cid].sense_data,
2935                  ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2936                  ASENCODE_INVALID_COMMAND, 0, 0);
2937                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2938                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2939                             SCSI_SENSE_BUFFERSIZE));
2940                goto scsi_done_ret;
2941        }
2942
2943        switch (scsicmd->cmnd[0]) {
2944        case READ_6:
2945        case READ_10:
2946        case READ_12:
2947        case READ_16:
2948                if (dev->in_reset)
2949                        return -1;
2950                return aac_read(scsicmd);
2951
2952        case WRITE_6:
2953        case WRITE_10:
2954        case WRITE_12:
2955        case WRITE_16:
2956                if (dev->in_reset)
2957                        return -1;
2958                return aac_write(scsicmd);
2959
2960        case SYNCHRONIZE_CACHE:
2961                if (((aac_cache & 6) == 6) && dev->cache_protected) {
2962                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2963                                          SAM_STAT_GOOD;
2964                        break;
2965                }
2966                /* Issue FIB to tell Firmware to flush it's cache */
2967                if ((aac_cache & 6) != 2)
2968                        return aac_synchronize(scsicmd);
2969        case INQUIRY:
2970        {
2971                struct inquiry_data inq_data;
2972
2973                dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
2974                memset(&inq_data, 0, sizeof (struct inquiry_data));
2975
2976                if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
2977                        char *arr = (char *)&inq_data;
2978
2979                        /* EVPD bit set */
2980                        arr[0] = (scmd_id(scsicmd) == host->this_id) ?
2981                          INQD_PDT_PROC : INQD_PDT_DA;
2982                        if (scsicmd->cmnd[2] == 0) {
2983                                /* supported vital product data pages */
2984                                arr[3] = 3;
2985                                arr[4] = 0x0;
2986                                arr[5] = 0x80;
2987                                arr[6] = 0x83;
2988                                arr[1] = scsicmd->cmnd[2];
2989                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2990                                                         sizeof(inq_data));
2991                                scsicmd->result = DID_OK << 16 |
2992                                                  COMMAND_COMPLETE << 8 |
2993                                                  SAM_STAT_GOOD;
2994                        } else if (scsicmd->cmnd[2] == 0x80) {
2995                                /* unit serial number page */
2996                                arr[3] = setinqserial(dev, &arr[4],
2997                                  scmd_id(scsicmd));
2998                                arr[1] = scsicmd->cmnd[2];
2999                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
3000                                                         sizeof(inq_data));
3001                                if (aac_wwn != 2)
3002                                        return aac_get_container_serial(
3003                                                scsicmd);
3004                                scsicmd->result = DID_OK << 16 |
3005                                                  COMMAND_COMPLETE << 8 |
3006                                                  SAM_STAT_GOOD;
3007                        } else if (scsicmd->cmnd[2] == 0x83) {
3008                                /* vpd page 0x83 - Device Identification Page */
3009                                char *sno = (char *)&inq_data;
3010                                sno[3] = setinqserial(dev, &sno[4],
3011                                                      scmd_id(scsicmd));
3012                                if (aac_wwn != 2)
3013                                        return aac_get_container_serial(
3014                                                scsicmd);
3015                                scsicmd->result = DID_OK << 16 |
3016                                                  COMMAND_COMPLETE << 8 |
3017                                                  SAM_STAT_GOOD;
3018                        } else {
3019                                /* vpd page not implemented */
3020                                scsicmd->result = DID_OK << 16 |
3021                                  COMMAND_COMPLETE << 8 |
3022                                  SAM_STAT_CHECK_CONDITION;
3023                                set_sense(&dev->fsa_dev[cid].sense_data,
3024                                  ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
3025                                  ASENCODE_NO_SENSE, 7, 2);
3026                                memcpy(scsicmd->sense_buffer,
3027                                  &dev->fsa_dev[cid].sense_data,
3028                                  min_t(size_t,
3029                                        sizeof(dev->fsa_dev[cid].sense_data),
3030                                        SCSI_SENSE_BUFFERSIZE));
3031                        }
3032                        break;
3033                }
3034                inq_data.inqd_ver = 2;  /* claim compliance to SCSI-2 */
3035                inq_data.inqd_rdf = 2;  /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
3036                inq_data.inqd_len = 31;
3037                /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
3038                inq_data.inqd_pad2= 0x32 ;       /*WBus16|Sync|CmdQue */
3039                /*
3040                 *      Set the Vendor, Product, and Revision Level
3041                 *      see: <vendor>.c i.e. aac.c
3042                 */
3043                if (cid == host->this_id) {
3044                        setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
3045                        inq_data.inqd_pdt = INQD_PDT_PROC;      /* Processor device */
3046                        scsi_sg_copy_from_buffer(scsicmd, &inq_data,
3047                                                 sizeof(inq_data));
3048                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3049                                          SAM_STAT_GOOD;
3050                        break;
3051                }
3052                if (dev->in_reset)
3053                        return -1;
3054                setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
3055                inq_data.inqd_pdt = INQD_PDT_DA;        /* Direct/random access device */
3056                scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
3057                return aac_get_container_name(scsicmd);
3058        }
3059        case SERVICE_ACTION_IN_16:
3060                if (!(dev->raw_io_interface) ||
3061                    !(dev->raw_io_64) ||
3062                    ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
3063                        break;
3064        {
3065                u64 capacity;
3066                char cp[13];
3067                unsigned int alloc_len;
3068
3069                dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
3070                capacity = fsa_dev_ptr[cid].size - 1;
3071                cp[0] = (capacity >> 56) & 0xff;
3072                cp[1] = (capacity >> 48) & 0xff;
3073                cp[2] = (capacity >> 40) & 0xff;
3074                cp[3] = (capacity >> 32) & 0xff;
3075                cp[4] = (capacity >> 24) & 0xff;
3076                cp[5] = (capacity >> 16) & 0xff;
3077                cp[6] = (capacity >> 8) & 0xff;
3078                cp[7] = (capacity >> 0) & 0xff;
3079                cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
3080                cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3081                cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3082                cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
3083                cp[12] = 0;
3084
3085                alloc_len = ((scsicmd->cmnd[10] << 24)
3086                             + (scsicmd->cmnd[11] << 16)
3087                             + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
3088
3089                alloc_len = min_t(size_t, alloc_len, sizeof(cp));
3090                scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
3091                if (alloc_len < scsi_bufflen(scsicmd))
3092                        scsi_set_resid(scsicmd,
3093                                       scsi_bufflen(scsicmd) - alloc_len);
3094
3095                /* Do not cache partition table for arrays */
3096                scsicmd->device->removable = 1;
3097
3098                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3099                                  SAM_STAT_GOOD;
3100                break;
3101        }
3102
3103        case READ_CAPACITY:
3104        {
3105                u32 capacity;
3106                char cp[8];
3107
3108                dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
3109                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3110                        capacity = fsa_dev_ptr[cid].size - 1;
3111                else
3112                        capacity = (u32)-1;
3113
3114                cp[0] = (capacity >> 24) & 0xff;
3115                cp[1] = (capacity >> 16) & 0xff;
3116                cp[2] = (capacity >> 8) & 0xff;
3117                cp[3] = (capacity >> 0) & 0xff;
3118                cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
3119                cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3120                cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3121                cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
3122                scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
3123                /* Do not cache partition table for arrays */
3124                scsicmd->device->removable = 1;
3125                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3126                                  SAM_STAT_GOOD;
3127                break;
3128        }
3129
3130        case MODE_SENSE:
3131        {
3132                int mode_buf_length = 4;
3133                u32 capacity;
3134                aac_modep_data mpd;
3135
3136                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3137                        capacity = fsa_dev_ptr[cid].size - 1;
3138                else
3139                        capacity = (u32)-1;
3140
3141                dprintk((KERN_DEBUG "MODE SENSE command.\n"));
3142                memset((char *)&mpd, 0, sizeof(aac_modep_data));
3143
3144                /* Mode data length */
3145                mpd.hd.data_length = sizeof(mpd.hd) - 1;
3146                /* Medium type - default */
3147                mpd.hd.med_type = 0;
3148                /* Device-specific param,
3149                   bit 8: 0/1 = write enabled/protected
3150                   bit 4: 0/1 = FUA enabled */
3151                mpd.hd.dev_par = 0;
3152
3153                if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3154                        mpd.hd.dev_par = 0x10;
3155                if (scsicmd->cmnd[1] & 0x8)
3156                        mpd.hd.bd_length = 0;   /* Block descriptor length */
3157                else {
3158                        mpd.hd.bd_length = sizeof(mpd.bd);
3159                        mpd.hd.data_length += mpd.hd.bd_length;
3160                        mpd.bd.block_length[0] =
3161                                (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3162                        mpd.bd.block_length[1] =
3163                                (fsa_dev_ptr[cid].block_size >> 8) &  0xff;
3164                        mpd.bd.block_length[2] =
3165                                fsa_dev_ptr[cid].block_size  & 0xff;
3166
3167                        mpd.mpc_buf[0] = scsicmd->cmnd[2];
3168                        if (scsicmd->cmnd[2] == 0x1C) {
3169                                /* page length */
3170                                mpd.mpc_buf[1] = 0xa;
3171                                /* Mode data length */
3172                                mpd.hd.data_length = 23;
3173                        } else {
3174                                /* Mode data length */
3175                                mpd.hd.data_length = 15;
3176                        }
3177
3178                        if (capacity > 0xffffff) {
3179                                mpd.bd.block_count[0] = 0xff;
3180                                mpd.bd.block_count[1] = 0xff;
3181                                mpd.bd.block_count[2] = 0xff;
3182                        } else {
3183                                mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
3184                                mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
3185                                mpd.bd.block_count[2] = capacity  & 0xff;
3186                        }
3187                }
3188                if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3189                  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3190                        mpd.hd.data_length += 3;
3191                        mpd.mpc_buf[0] = 8;
3192                        mpd.mpc_buf[1] = 1;
3193                        mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
3194                                ? 0 : 0x04; /* WCE */
3195                        mode_buf_length = sizeof(mpd);
3196                }
3197
3198                if (mode_buf_length > scsicmd->cmnd[4])
3199                        mode_buf_length = scsicmd->cmnd[4];
3200                else
3201                        mode_buf_length = sizeof(mpd);
3202                scsi_sg_copy_from_buffer(scsicmd,
3203                                         (char *)&mpd,
3204                                         mode_buf_length);
3205                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3206                                  SAM_STAT_GOOD;
3207                break;
3208        }
3209        case MODE_SENSE_10:
3210        {
3211                u32 capacity;
3212                int mode_buf_length = 8;
3213                aac_modep10_data mpd10;
3214
3215                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3216                        capacity = fsa_dev_ptr[cid].size - 1;
3217                else
3218                        capacity = (u32)-1;
3219
3220                dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
3221                memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
3222                /* Mode data length (MSB) */
3223                mpd10.hd.data_length[0] = 0;
3224                /* Mode data length (LSB) */
3225                mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
3226                /* Medium type - default */
3227                mpd10.hd.med_type = 0;
3228                /* Device-specific param,
3229                   bit 8: 0/1 = write enabled/protected
3230                   bit 4: 0/1 = FUA enabled */
3231                mpd10.hd.dev_par = 0;
3232
3233                if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3234                        mpd10.hd.dev_par = 0x10;
3235                mpd10.hd.rsrvd[0] = 0;  /* reserved */
3236                mpd10.hd.rsrvd[1] = 0;  /* reserved */
3237                if (scsicmd->cmnd[1] & 0x8) {
3238                        /* Block descriptor length (MSB) */
3239                        mpd10.hd.bd_length[0] = 0;
3240                        /* Block descriptor length (LSB) */
3241                        mpd10.hd.bd_length[1] = 0;
3242                } else {
3243                        mpd10.hd.bd_length[0] = 0;
3244                        mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
3245
3246                        mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
3247
3248                        mpd10.bd.block_length[0] =
3249                                (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3250                        mpd10.bd.block_length[1] =
3251                                (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3252                        mpd10.bd.block_length[2] =
3253                                fsa_dev_ptr[cid].block_size  & 0xff;
3254
3255                        if (capacity > 0xffffff) {
3256                                mpd10.bd.block_count[0] = 0xff;
3257                                mpd10.bd.block_count[1] = 0xff;
3258                                mpd10.bd.block_count[2] = 0xff;
3259                        } else {
3260                                mpd10.bd.block_count[0] =
3261                                        (capacity >> 16) & 0xff;
3262                                mpd10.bd.block_count[1] =
3263                                        (capacity >> 8) & 0xff;
3264                                mpd10.bd.block_count[2] =
3265                                        capacity  & 0xff;
3266                        }
3267                }
3268                if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3269                  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3270                        mpd10.hd.data_length[1] += 3;
3271                        mpd10.mpc_buf[0] = 8;
3272                        mpd10.mpc_buf[1] = 1;
3273                        mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
3274                                ? 0 : 0x04; /* WCE */
3275                        mode_buf_length = sizeof(mpd10);
3276                        if (mode_buf_length > scsicmd->cmnd[8])
3277                                mode_buf_length = scsicmd->cmnd[8];
3278                }
3279                scsi_sg_copy_from_buffer(scsicmd,
3280                                         (char *)&mpd10,
3281                                         mode_buf_length);
3282
3283                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3284                                  SAM_STAT_GOOD;
3285                break;
3286        }
3287        case REQUEST_SENSE:
3288                dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
3289                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3290                                sizeof(struct sense_data));
3291                memset(&dev->fsa_dev[cid].sense_data, 0,
3292                                sizeof(struct sense_data));
3293                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3294                                  SAM_STAT_GOOD;
3295                break;
3296
3297        case ALLOW_MEDIUM_REMOVAL:
3298                dprintk((KERN_DEBUG "LOCK command.\n"));
3299                if (scsicmd->cmnd[4])
3300                        fsa_dev_ptr[cid].locked = 1;
3301                else
3302                        fsa_dev_ptr[cid].locked = 0;
3303
3304                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3305                                  SAM_STAT_GOOD;
3306                break;
3307        /*
3308         *      These commands are all No-Ops
3309         */
3310        case TEST_UNIT_READY:
3311                if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
3312                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3313                                SAM_STAT_CHECK_CONDITION;
3314                        set_sense(&dev->fsa_dev[cid].sense_data,
3315                                  NOT_READY, SENCODE_BECOMING_READY,
3316                                  ASENCODE_BECOMING_READY, 0, 0);
3317                        memcpy(scsicmd->sense_buffer,
3318                               &dev->fsa_dev[cid].sense_data,
3319                               min_t(size_t,
3320                                     sizeof(dev->fsa_dev[cid].sense_data),
3321                                     SCSI_SENSE_BUFFERSIZE));
3322                break;
3323                }
3324        case RESERVE:
3325        case RELEASE:
3326        case REZERO_UNIT:
3327        case REASSIGN_BLOCKS:
3328        case SEEK_10:
3329                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3330                                  SAM_STAT_GOOD;
3331                break;
3332
3333        case START_STOP:
3334                return aac_start_stop(scsicmd);
3335
3336        /* FALLTHRU */
3337        default:
3338        /*
3339         *      Unhandled commands
3340         */
3341                dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
3342                                scsicmd->cmnd[0]));
3343                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3344                                SAM_STAT_CHECK_CONDITION;
3345                set_sense(&dev->fsa_dev[cid].sense_data,
3346                          ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
3347                          ASENCODE_INVALID_COMMAND, 0, 0);
3348                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3349                                min_t(size_t,
3350                                      sizeof(dev->fsa_dev[cid].sense_data),
3351                                      SCSI_SENSE_BUFFERSIZE));
3352        }
3353
3354scsi_done_ret:
3355
3356        scsicmd->scsi_done(scsicmd);
3357        return 0;
3358}
3359
3360static int query_disk(struct aac_dev *dev, void __user *arg)
3361{
3362        struct aac_query_disk qd;
3363        struct fsa_dev_info *fsa_dev_ptr;
3364
3365        fsa_dev_ptr = dev->fsa_dev;
3366        if (!fsa_dev_ptr)
3367                return -EBUSY;
3368        if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
3369                return -EFAULT;
3370        if (qd.cnum == -1) {
3371                if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
3372                        return -EINVAL;
3373                qd.cnum = qd.id;
3374        } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
3375                if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
3376                        return -EINVAL;
3377                qd.instance = dev->scsi_host_ptr->host_no;
3378                qd.bus = 0;
3379                qd.id = CONTAINER_TO_ID(qd.cnum);
3380                qd.lun = CONTAINER_TO_LUN(qd.cnum);
3381        }
3382        else return -EINVAL;
3383
3384        qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
3385        qd.locked = fsa_dev_ptr[qd.cnum].locked;
3386        qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
3387
3388        if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
3389                qd.unmapped = 1;
3390        else
3391                qd.unmapped = 0;
3392
3393        strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
3394          min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
3395
3396        if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
3397                return -EFAULT;
3398        return 0;
3399}
3400
3401static int force_delete_disk(struct aac_dev *dev, void __user *arg)
3402{
3403        struct aac_delete_disk dd;
3404        struct fsa_dev_info *fsa_dev_ptr;
3405
3406        fsa_dev_ptr = dev->fsa_dev;
3407        if (!fsa_dev_ptr)
3408                return -EBUSY;
3409
3410        if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3411                return -EFAULT;
3412
3413        if (dd.cnum >= dev->maximum_num_containers)
3414                return -EINVAL;
3415        /*
3416         *      Mark this container as being deleted.
3417         */
3418        fsa_dev_ptr[dd.cnum].deleted = 1;
3419        /*
3420         *      Mark the container as no longer valid
3421         */
3422        fsa_dev_ptr[dd.cnum].valid = 0;
3423        return 0;
3424}
3425
3426static int delete_disk(struct aac_dev *dev, void __user *arg)
3427{
3428        struct aac_delete_disk dd;
3429        struct fsa_dev_info *fsa_dev_ptr;
3430
3431        fsa_dev_ptr = dev->fsa_dev;
3432        if (!fsa_dev_ptr)
3433                return -EBUSY;
3434
3435        if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3436                return -EFAULT;
3437
3438        if (dd.cnum >= dev->maximum_num_containers)
3439                return -EINVAL;
3440        /*
3441         *      If the container is locked, it can not be deleted by the API.
3442         */
3443        if (fsa_dev_ptr[dd.cnum].locked)
3444                return -EBUSY;
3445        else {
3446                /*
3447                 *      Mark the container as no longer being valid.
3448                 */
3449                fsa_dev_ptr[dd.cnum].valid = 0;
3450                fsa_dev_ptr[dd.cnum].devname[0] = '\0';
3451                return 0;
3452        }
3453}
3454
3455int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
3456{
3457        switch (cmd) {
3458        case FSACTL_QUERY_DISK:
3459                return query_disk(dev, arg);
3460        case FSACTL_DELETE_DISK:
3461                return delete_disk(dev, arg);
3462        case FSACTL_FORCE_DELETE_DISK:
3463                return force_delete_disk(dev, arg);
3464        case FSACTL_GET_CONTAINERS:
3465                return aac_get_containers(dev);
3466        default:
3467                return -ENOTTY;
3468        }
3469}
3470
3471/**
3472 *
3473 * aac_srb_callback
3474 * @context: the context set in the fib - here it is scsi cmd
3475 * @fibptr: pointer to the fib
3476 *
3477 * Handles the completion of a scsi command to a non dasd device
3478 *
3479 */
3480
3481static void aac_srb_callback(void *context, struct fib * fibptr)
3482{
3483        struct aac_dev *dev;
3484        struct aac_srb_reply *srbreply;
3485        struct scsi_cmnd *scsicmd;
3486
3487        scsicmd = (struct scsi_cmnd *) context;
3488
3489        if (!aac_valid_context(scsicmd, fibptr))
3490                return;
3491
3492        BUG_ON(fibptr == NULL);
3493
3494        dev = fibptr->dev;
3495
3496        srbreply = (struct aac_srb_reply *) fib_data(fibptr);
3497
3498        scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
3499
3500        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3501                /* fast response */
3502                srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
3503                srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
3504        } else {
3505                /*
3506                 *      Calculate resid for sg
3507                 */
3508                scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
3509                                   - le32_to_cpu(srbreply->data_xfer_length));
3510        }
3511
3512
3513        scsi_dma_unmap(scsicmd);
3514
3515        /* expose physical device if expose_physicald flag is on */
3516        if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
3517          && expose_physicals > 0)
3518                aac_expose_phy_device(scsicmd);
3519
3520        /*
3521         * First check the fib status
3522         */
3523
3524        if (le32_to_cpu(srbreply->status) != ST_OK) {
3525                int len;
3526
3527                pr_warn("aac_srb_callback: srb failed, status = %d\n",
3528                                le32_to_cpu(srbreply->status));
3529                len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3530                            SCSI_SENSE_BUFFERSIZE);
3531                scsicmd->result = DID_ERROR << 16
3532                                | COMMAND_COMPLETE << 8
3533                                | SAM_STAT_CHECK_CONDITION;
3534                memcpy(scsicmd->sense_buffer,
3535                                srbreply->sense_data, len);
3536        }
3537
3538        /*
3539         * Next check the srb status
3540         */
3541        switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
3542        case SRB_STATUS_ERROR_RECOVERY:
3543        case SRB_STATUS_PENDING:
3544        case SRB_STATUS_SUCCESS:
3545                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3546                break;
3547        case SRB_STATUS_DATA_OVERRUN:
3548                switch (scsicmd->cmnd[0]) {
3549                case  READ_6:
3550                case  WRITE_6:
3551                case  READ_10:
3552                case  WRITE_10:
3553                case  READ_12:
3554                case  WRITE_12:
3555                case  READ_16:
3556                case  WRITE_16:
3557                        if (le32_to_cpu(srbreply->data_xfer_length)
3558                                                < scsicmd->underflow)
3559                                pr_warn("aacraid: SCSI CMD underflow\n");
3560                        else
3561                                pr_warn("aacraid: SCSI CMD Data Overrun\n");
3562                        scsicmd->result = DID_ERROR << 16
3563                                        | COMMAND_COMPLETE << 8;
3564                        break;
3565                case INQUIRY:
3566                        scsicmd->result = DID_OK << 16
3567                                        | COMMAND_COMPLETE << 8;
3568                        break;
3569                default:
3570                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3571                        break;
3572                }
3573                break;
3574        case SRB_STATUS_ABORTED:
3575                scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3576                break;
3577        case SRB_STATUS_ABORT_FAILED:
3578                /*
3579                 * Not sure about this one - but assuming the
3580                 * hba was trying to abort for some reason
3581                 */
3582                scsicmd->result = DID_ERROR << 16 | ABORT << 8;
3583                break;
3584        case SRB_STATUS_PARITY_ERROR:
3585                scsicmd->result = DID_PARITY << 16
3586                                | MSG_PARITY_ERROR << 8;
3587                break;
3588        case SRB_STATUS_NO_DEVICE:
3589        case SRB_STATUS_INVALID_PATH_ID:
3590        case SRB_STATUS_INVALID_TARGET_ID:
3591        case SRB_STATUS_INVALID_LUN:
3592        case SRB_STATUS_SELECTION_TIMEOUT:
3593                scsicmd->result = DID_NO_CONNECT << 16
3594                                | COMMAND_COMPLETE << 8;
3595                break;
3596
3597        case SRB_STATUS_COMMAND_TIMEOUT:
3598        case SRB_STATUS_TIMEOUT:
3599                scsicmd->result = DID_TIME_OUT << 16
3600                                | COMMAND_COMPLETE << 8;
3601                break;
3602
3603        case SRB_STATUS_BUSY:
3604                scsicmd->result = DID_BUS_BUSY << 16
3605                                | COMMAND_COMPLETE << 8;
3606                break;
3607
3608        case SRB_STATUS_BUS_RESET:
3609                scsicmd->result = DID_RESET << 16
3610                                | COMMAND_COMPLETE << 8;
3611                break;
3612
3613        case SRB_STATUS_MESSAGE_REJECTED:
3614                scsicmd->result = DID_ERROR << 16
3615                                | MESSAGE_REJECT << 8;
3616                break;
3617        case SRB_STATUS_REQUEST_FLUSHED:
3618        case SRB_STATUS_ERROR:
3619        case SRB_STATUS_INVALID_REQUEST:
3620        case SRB_STATUS_REQUEST_SENSE_FAILED:
3621        case SRB_STATUS_NO_HBA:
3622        case SRB_STATUS_UNEXPECTED_BUS_FREE:
3623        case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
3624        case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
3625        case SRB_STATUS_DELAYED_RETRY:
3626        case SRB_STATUS_BAD_FUNCTION:
3627        case SRB_STATUS_NOT_STARTED:
3628        case SRB_STATUS_NOT_IN_USE:
3629        case SRB_STATUS_FORCE_ABORT:
3630        case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
3631        default:
3632#ifdef AAC_DETAILED_STATUS_INFO
3633                pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
3634                        le32_to_cpu(srbreply->srb_status) & 0x3F,
3635                        aac_get_status_string(
3636                                le32_to_cpu(srbreply->srb_status) & 0x3F),
3637                        scsicmd->cmnd[0],
3638                        le32_to_cpu(srbreply->scsi_status));
3639#endif
3640                /*
3641                 * When the CC bit is SET by the host in ATA pass thru CDB,
3642                 *  driver is supposed to return DID_OK
3643                 *
3644                 * When the CC bit is RESET by the host, driver should
3645                 *  return DID_ERROR
3646                 */
3647                if ((scsicmd->cmnd[0] == ATA_12)
3648                        || (scsicmd->cmnd[0] == ATA_16)) {
3649
3650                        if (scsicmd->cmnd[2] & (0x01 << 5)) {
3651                                scsicmd->result = DID_OK << 16
3652                                        | COMMAND_COMPLETE << 8;
3653                        break;
3654                        } else {
3655                                scsicmd->result = DID_ERROR << 16
3656                                        | COMMAND_COMPLETE << 8;
3657                        break;
3658                        }
3659                } else {
3660                        scsicmd->result = DID_ERROR << 16
3661                                | COMMAND_COMPLETE << 8;
3662                        break;
3663                }
3664        }
3665        if (le32_to_cpu(srbreply->scsi_status)
3666                        == SAM_STAT_CHECK_CONDITION) {
3667                int len;
3668
3669                scsicmd->result |= SAM_STAT_CHECK_CONDITION;
3670                len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3671                            SCSI_SENSE_BUFFERSIZE);
3672#ifdef AAC_DETAILED_STATUS_INFO
3673                pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
3674                                        le32_to_cpu(srbreply->status), len);
3675#endif
3676                memcpy(scsicmd->sense_buffer,
3677                                srbreply->sense_data, len);
3678        }
3679
3680        /*
3681         * OR in the scsi status (already shifted up a bit)
3682         */
3683        scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
3684
3685        aac_fib_complete(fibptr);
3686        scsicmd->scsi_done(scsicmd);
3687}
3688
3689static void hba_resp_task_complete(struct aac_dev *dev,
3690                                        struct scsi_cmnd *scsicmd,
3691                                        struct aac_hba_resp *err) {
3692
3693        scsicmd->result = err->status;
3694        /* set residual count */
3695        scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
3696
3697        switch (err->status) {
3698        case SAM_STAT_GOOD:
3699                scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3700                break;
3701        case SAM_STAT_CHECK_CONDITION:
3702        {
3703                int len;
3704
3705                len = min_t(u8, err->sense_response_data_len,
3706                        SCSI_SENSE_BUFFERSIZE);
3707                if (len)
3708                        memcpy(scsicmd->sense_buffer,
3709                                err->sense_response_buf, len);
3710                scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3711                break;
3712        }
3713        case SAM_STAT_BUSY:
3714                scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
3715                break;
3716        case SAM_STAT_TASK_ABORTED:
3717                scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
3718                break;
3719        case SAM_STAT_RESERVATION_CONFLICT:
3720        case SAM_STAT_TASK_SET_FULL:
3721        default:
3722                scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3723                break;
3724        }
3725}
3726
3727static void hba_resp_task_failure(struct aac_dev *dev,
3728                                        struct scsi_cmnd *scsicmd,
3729                                        struct aac_hba_resp *err)
3730{
3731        switch (err->status) {
3732        case HBA_RESP_STAT_HBAMODE_DISABLED:
3733        {
3734                u32 bus, cid;
3735
3736                bus = aac_logical_to_phys(scmd_channel(scsicmd));
3737                cid = scmd_id(scsicmd);
3738                if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
3739                        dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
3740                        dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
3741                }
3742                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3743                break;
3744        }
3745        case HBA_RESP_STAT_IO_ERROR:
3746        case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
3747                scsicmd->result = DID_OK << 16 |
3748                        COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
3749                break;
3750        case HBA_RESP_STAT_IO_ABORTED:
3751                scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3752                break;
3753        case HBA_RESP_STAT_INVALID_DEVICE:
3754                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3755                break;
3756        case HBA_RESP_STAT_UNDERRUN:
3757                /* UNDERRUN is OK */
3758                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3759                break;
3760        case HBA_RESP_STAT_OVERRUN:
3761        default:
3762                scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3763                break;
3764        }
3765}
3766
3767/**
3768 *
3769 * aac_hba_callback
3770 * @context: the context set in the fib - here it is scsi cmd
3771 * @fibptr: pointer to the fib
3772 *
3773 * Handles the completion of a native HBA scsi command
3774 *
3775 */
3776void aac_hba_callback(void *context, struct fib *fibptr)
3777{
3778        struct aac_dev *dev;
3779        struct scsi_cmnd *scsicmd;
3780
3781        struct aac_hba_resp *err =
3782                        &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
3783
3784        scsicmd = (struct scsi_cmnd *) context;
3785
3786        if (!aac_valid_context(scsicmd, fibptr))
3787                return;
3788
3789        WARN_ON(fibptr == NULL);
3790        dev = fibptr->dev;
3791
3792        if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
3793                scsi_dma_unmap(scsicmd);
3794
3795        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3796                /* fast response */
3797                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3798                goto out;
3799        }
3800
3801        switch (err->service_response) {
3802        case HBA_RESP_SVCRES_TASK_COMPLETE:
3803                hba_resp_task_complete(dev, scsicmd, err);
3804                break;
3805        case HBA_RESP_SVCRES_FAILURE:
3806                hba_resp_task_failure(dev, scsicmd, err);
3807                break;
3808        case HBA_RESP_SVCRES_TMF_REJECTED:
3809                scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
3810                break;
3811        case HBA_RESP_SVCRES_TMF_LUN_INVALID:
3812                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3813                break;
3814        case HBA_RESP_SVCRES_TMF_COMPLETE:
3815        case HBA_RESP_SVCRES_TMF_SUCCEEDED:
3816                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3817                break;
3818        default:
3819                scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3820                break;
3821        }
3822
3823out:
3824        aac_fib_complete(fibptr);
3825
3826        if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
3827                scsicmd->SCp.sent_command = 1;
3828        else
3829                scsicmd->scsi_done(scsicmd);
3830}
3831
3832/**
3833 *
3834 * aac_send_srb_fib
3835 * @scsicmd: the scsi command block
3836 *
3837 * This routine will form a FIB and fill in the aac_srb from the
3838 * scsicmd passed in.
3839 */
3840
3841static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
3842{
3843        struct fib* cmd_fibcontext;
3844        struct aac_dev* dev;
3845        int status;
3846
3847        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3848        if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3849                        scsicmd->device->lun > 7) {
3850                scsicmd->result = DID_NO_CONNECT << 16;
3851                scsicmd->scsi_done(scsicmd);
3852                return 0;
3853        }
3854
3855        /*
3856         *      Allocate and initialize a Fib then setup a BlockWrite command
3857         */
3858        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3859        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3860        status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
3861
3862        /*
3863         *      Check that the command queued to the controller
3864         */
3865        if (status == -EINPROGRESS)
3866                return 0;
3867
3868        printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
3869        aac_fib_complete(cmd_fibcontext);
3870        aac_fib_free(cmd_fibcontext);
3871
3872        return -1;
3873}
3874
3875/**
3876 *
3877 * aac_send_hba_fib
3878 * @scsicmd: the scsi command block
3879 *
3880 * This routine will form a FIB and fill in the aac_hba_cmd_req from the
3881 * scsicmd passed in.
3882 */
3883static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
3884{
3885        struct fib *cmd_fibcontext;
3886        struct aac_dev *dev;
3887        int status;
3888
3889        dev = shost_priv(scsicmd->device->host);
3890        if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3891                        scsicmd->device->lun > AAC_MAX_LUN - 1) {
3892                scsicmd->result = DID_NO_CONNECT << 16;
3893                scsicmd->scsi_done(scsicmd);
3894                return 0;
3895        }
3896
3897        /*
3898         *      Allocate and initialize a Fib then setup a BlockWrite command
3899         */
3900        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3901        if (!cmd_fibcontext)
3902                return -1;
3903
3904        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3905        status = aac_adapter_hba(cmd_fibcontext, scsicmd);
3906
3907        /*
3908         *      Check that the command queued to the controller
3909         */
3910        if (status == -EINPROGRESS)
3911                return 0;
3912
3913        pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
3914                status);
3915        aac_fib_complete(cmd_fibcontext);
3916        aac_fib_free(cmd_fibcontext);
3917
3918        return -1;
3919}
3920
3921
3922static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
3923{
3924        struct aac_dev *dev;
3925        unsigned long byte_count = 0;
3926        int nseg;
3927        struct scatterlist *sg;
3928        int i;
3929
3930        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3931        // Get rid of old data
3932        psg->count = 0;
3933        psg->sg[0].addr = 0;
3934        psg->sg[0].count = 0;
3935
3936        nseg = scsi_dma_map(scsicmd);
3937        if (nseg <= 0)
3938                return nseg;
3939
3940        psg->count = cpu_to_le32(nseg);
3941
3942        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3943                psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
3944                psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
3945                byte_count += sg_dma_len(sg);
3946        }
3947        /* hba wants the size to be exact */
3948        if (byte_count > scsi_bufflen(scsicmd)) {
3949                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3950                        (byte_count - scsi_bufflen(scsicmd));
3951                psg->sg[i-1].count = cpu_to_le32(temp);
3952                byte_count = scsi_bufflen(scsicmd);
3953        }
3954        /* Check for command underflow */
3955        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3956                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3957                       byte_count, scsicmd->underflow);
3958        }
3959
3960        return byte_count;
3961}
3962
3963
3964static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
3965{
3966        struct aac_dev *dev;
3967        unsigned long byte_count = 0;
3968        u64 addr;
3969        int nseg;
3970        struct scatterlist *sg;
3971        int i;
3972
3973        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3974        // Get rid of old data
3975        psg->count = 0;
3976        psg->sg[0].addr[0] = 0;
3977        psg->sg[0].addr[1] = 0;
3978        psg->sg[0].count = 0;
3979
3980        nseg = scsi_dma_map(scsicmd);
3981        if (nseg <= 0)
3982                return nseg;
3983
3984        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3985                int count = sg_dma_len(sg);
3986                addr = sg_dma_address(sg);
3987                psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
3988                psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
3989                psg->sg[i].count = cpu_to_le32(count);
3990                byte_count += count;
3991        }
3992        psg->count = cpu_to_le32(nseg);
3993        /* hba wants the size to be exact */
3994        if (byte_count > scsi_bufflen(scsicmd)) {
3995                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3996                        (byte_count - scsi_bufflen(scsicmd));
3997                psg->sg[i-1].count = cpu_to_le32(temp);
3998                byte_count = scsi_bufflen(scsicmd);
3999        }
4000        /* Check for command underflow */
4001        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4002                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
4003                       byte_count, scsicmd->underflow);
4004        }
4005
4006        return byte_count;
4007}
4008
4009static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
4010{
4011        unsigned long byte_count = 0;
4012        int nseg;
4013        struct scatterlist *sg;
4014        int i;
4015
4016        // Get rid of old data
4017        psg->count = 0;
4018        psg->sg[0].next = 0;
4019        psg->sg[0].prev = 0;
4020        psg->sg[0].addr[0] = 0;
4021        psg->sg[0].addr[1] = 0;
4022        psg->sg[0].count = 0;
4023        psg->sg[0].flags = 0;
4024
4025        nseg = scsi_dma_map(scsicmd);
4026        if (nseg <= 0)
4027                return nseg;
4028
4029        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4030                int count = sg_dma_len(sg);
4031                u64 addr = sg_dma_address(sg);
4032                psg->sg[i].next = 0;
4033                psg->sg[i].prev = 0;
4034                psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
4035                psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
4036                psg->sg[i].count = cpu_to_le32(count);
4037                psg->sg[i].flags = 0;
4038                byte_count += count;
4039        }
4040        psg->count = cpu_to_le32(nseg);
4041        /* hba wants the size to be exact */
4042        if (byte_count > scsi_bufflen(scsicmd)) {
4043                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
4044                        (byte_count - scsi_bufflen(scsicmd));
4045                psg->sg[i-1].count = cpu_to_le32(temp);
4046                byte_count = scsi_bufflen(scsicmd);
4047        }
4048        /* Check for command underflow */
4049        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4050                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
4051                       byte_count, scsicmd->underflow);
4052        }
4053
4054        return byte_count;
4055}
4056
4057static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
4058                                struct aac_raw_io2 *rio2, int sg_max)
4059{
4060        unsigned long byte_count = 0;
4061        int nseg;
4062        struct scatterlist *sg;
4063        int i, conformable = 0;
4064        u32 min_size = PAGE_SIZE, cur_size;
4065
4066        nseg = scsi_dma_map(scsicmd);
4067        if (nseg <= 0)
4068                return nseg;
4069
4070        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4071                int count = sg_dma_len(sg);
4072                u64 addr = sg_dma_address(sg);
4073
4074                BUG_ON(i >= sg_max);
4075                rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
4076                rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
4077                cur_size = cpu_to_le32(count);
4078                rio2->sge[i].length = cur_size;
4079                rio2->sge[i].flags = 0;
4080                if (i == 0) {
4081                        conformable = 1;
4082                        rio2->sgeFirstSize = cur_size;
4083                } else if (i == 1) {
4084                        rio2->sgeNominalSize = cur_size;
4085                        min_size = cur_size;
4086                } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
4087                        conformable = 0;
4088                        if (cur_size < min_size)
4089                                min_size = cur_size;
4090                }
4091                byte_count += count;
4092        }
4093
4094        /* hba wants the size to be exact */
4095        if (byte_count > scsi_bufflen(scsicmd)) {
4096                u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
4097                        (byte_count - scsi_bufflen(scsicmd));
4098                rio2->sge[i-1].length = cpu_to_le32(temp);
4099                byte_count = scsi_bufflen(scsicmd);
4100        }
4101
4102        rio2->sgeCnt = cpu_to_le32(nseg);
4103        rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
4104        /* not conformable: evaluate required sg elements */
4105        if (!conformable) {
4106                int j, nseg_new = nseg, err_found;
4107                for (i = min_size / PAGE_SIZE; i >= 1; --i) {
4108                        err_found = 0;
4109                        nseg_new = 2;
4110                        for (j = 1; j < nseg - 1; ++j) {
4111                                if (rio2->sge[j].length % (i*PAGE_SIZE)) {
4112                                        err_found = 1;
4113                                        break;
4114                                }
4115                                nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
4116                        }
4117                        if (!err_found)
4118                                break;
4119                }
4120                if (i > 0 && nseg_new <= sg_max) {
4121                        int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new);
4122
4123                        if (ret < 0)
4124                                return ret;
4125                }
4126        } else
4127                rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4128
4129        /* Check for command underflow */
4130        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4131                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
4132                       byte_count, scsicmd->underflow);
4133        }
4134
4135        return byte_count;
4136}
4137
4138static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
4139{
4140        struct sge_ieee1212 *sge;
4141        int i, j, pos;
4142        u32 addr_low;
4143
4144        if (aac_convert_sgl == 0)
4145                return 0;
4146
4147        sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
4148        if (sge == NULL)
4149                return -ENOMEM;
4150
4151        for (i = 1, pos = 1; i < nseg-1; ++i) {
4152                for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
4153                        addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
4154                        sge[pos].addrLow = addr_low;
4155                        sge[pos].addrHigh = rio2->sge[i].addrHigh;
4156                        if (addr_low < rio2->sge[i].addrLow)
4157                                sge[pos].addrHigh++;
4158                        sge[pos].length = pages * PAGE_SIZE;
4159                        sge[pos].flags = 0;
4160                        pos++;
4161                }
4162        }
4163        sge[pos] = rio2->sge[nseg-1];
4164        memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
4165
4166        kfree(sge);
4167        rio2->sgeCnt = cpu_to_le32(nseg_new);
4168        rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4169        rio2->sgeNominalSize = pages * PAGE_SIZE;
4170        return 0;
4171}
4172
4173static long aac_build_sghba(struct scsi_cmnd *scsicmd,
4174                        struct aac_hba_cmd_req *hbacmd,
4175                        int sg_max,
4176                        u64 sg_address)
4177{
4178        unsigned long byte_count = 0;
4179        int nseg;
4180        struct scatterlist *sg;
4181        int i;
4182        u32 cur_size;
4183        struct aac_hba_sgl *sge;
4184
4185        nseg = scsi_dma_map(scsicmd);
4186        if (nseg <= 0) {
4187                byte_count = nseg;
4188                goto out;
4189        }
4190
4191        if (nseg > HBA_MAX_SG_EMBEDDED)
4192                sge = &hbacmd->sge[2];
4193        else
4194                sge = &hbacmd->sge[0];
4195
4196        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4197                int count = sg_dma_len(sg);
4198                u64 addr = sg_dma_address(sg);
4199
4200                WARN_ON(i >= sg_max);
4201                sge->addr_hi = cpu_to_le32((u32)(addr>>32));
4202                sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
4203                cur_size = cpu_to_le32(count);
4204                sge->len = cur_size;
4205                sge->flags = 0;
4206                byte_count += count;
4207                sge++;
4208        }
4209
4210        sge--;
4211        /* hba wants the size to be exact */
4212        if (byte_count > scsi_bufflen(scsicmd)) {
4213                u32 temp;
4214
4215                temp = le32_to_cpu(sge->len) - byte_count
4216                                                - scsi_bufflen(scsicmd);
4217                sge->len = cpu_to_le32(temp);
4218                byte_count = scsi_bufflen(scsicmd);
4219        }
4220
4221        if (nseg <= HBA_MAX_SG_EMBEDDED) {
4222                hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
4223                sge->flags = cpu_to_le32(0x40000000);
4224        } else {
4225                /* not embedded */
4226                hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
4227                hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
4228                hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
4229                hbacmd->sge[0].addr_lo =
4230                        cpu_to_le32((u32)(sg_address & 0xffffffff));
4231        }
4232
4233        /* Check for command underflow */
4234        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4235                pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
4236                                byte_count, scsicmd->underflow);
4237        }
4238out:
4239        return byte_count;
4240}
4241
4242#ifdef AAC_DETAILED_STATUS_INFO
4243
4244struct aac_srb_status_info {
4245        u32     status;
4246        char    *str;
4247};
4248
4249
4250static struct aac_srb_status_info srb_status_info[] = {
4251        { SRB_STATUS_PENDING,           "Pending Status"},
4252        { SRB_STATUS_SUCCESS,           "Success"},
4253        { SRB_STATUS_ABORTED,           "Aborted Command"},
4254        { SRB_STATUS_ABORT_FAILED,      "Abort Failed"},
4255        { SRB_STATUS_ERROR,             "Error Event"},
4256        { SRB_STATUS_BUSY,              "Device Busy"},
4257        { SRB_STATUS_INVALID_REQUEST,   "Invalid Request"},
4258        { SRB_STATUS_INVALID_PATH_ID,   "Invalid Path ID"},
4259        { SRB_STATUS_NO_DEVICE,         "No Device"},
4260        { SRB_STATUS_TIMEOUT,           "Timeout"},
4261        { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
4262        { SRB_STATUS_COMMAND_TIMEOUT,   "Command Timeout"},
4263        { SRB_STATUS_MESSAGE_REJECTED,  "Message Rejected"},
4264        { SRB_STATUS_BUS_RESET,         "Bus Reset"},
4265        { SRB_STATUS_PARITY_ERROR,      "Parity Error"},
4266        { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
4267        { SRB_STATUS_NO_HBA,            "No HBA"},
4268        { SRB_STATUS_DATA_OVERRUN,      "Data Overrun/Data Underrun"},
4269        { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
4270        { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
4271        { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
4272        { SRB_STATUS_REQUEST_FLUSHED,   "Request Flushed"},
4273        { SRB_STATUS_DELAYED_RETRY,     "Delayed Retry"},
4274        { SRB_STATUS_INVALID_LUN,       "Invalid LUN"},
4275        { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
4276        { SRB_STATUS_BAD_FUNCTION,      "Bad Function"},
4277        { SRB_STATUS_ERROR_RECOVERY,    "Error Recovery"},
4278        { SRB_STATUS_NOT_STARTED,       "Not Started"},
4279        { SRB_STATUS_NOT_IN_USE,        "Not In Use"},
4280        { SRB_STATUS_FORCE_ABORT,       "Force Abort"},
4281        { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
4282        { 0xff,                         "Unknown Error"}
4283};
4284
4285char *aac_get_status_string(u32 status)
4286{
4287        int i;
4288
4289        for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
4290                if (srb_status_info[i].status == status)
4291                        return srb_status_info[i].str;
4292
4293        return "Bad Status Code";
4294}
4295
4296#endif
4297