linux/drivers/scsi/aacraid/aachba.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *      Adaptec AAC series RAID controller driver
   4 *      (c) Copyright 2001 Red Hat Inc.
   5 *
   6 * based on the old aacraid driver that is..
   7 * Adaptec aacraid device driver for Linux.
   8 *
   9 * Copyright (c) 2000-2010 Adaptec, Inc.
  10 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  11 *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  12 *
  13 * Module Name:
  14 *  aachba.c
  15 *
  16 * Abstract: Contains Interfaces to manage IOs.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/init.h>
  21#include <linux/types.h>
  22#include <linux/pci.h>
  23#include <linux/spinlock.h>
  24#include <linux/slab.h>
  25#include <linux/completion.h>
  26#include <linux/blkdev.h>
  27#include <linux/uaccess.h>
  28#include <linux/highmem.h> /* For flush_kernel_dcache_page */
  29#include <linux/module.h>
  30
  31#include <asm/unaligned.h>
  32
  33#include <scsi/scsi.h>
  34#include <scsi/scsi_cmnd.h>
  35#include <scsi/scsi_device.h>
  36#include <scsi/scsi_host.h>
  37
  38#include "aacraid.h"
  39
  40/* values for inqd_pdt: Peripheral device type in plain English */
  41#define INQD_PDT_DA     0x00    /* Direct-access (DISK) device */
  42#define INQD_PDT_PROC   0x03    /* Processor device */
  43#define INQD_PDT_CHNGR  0x08    /* Changer (jukebox, scsi2) */
  44#define INQD_PDT_COMM   0x09    /* Communication device (scsi2) */
  45#define INQD_PDT_NOLUN2 0x1f    /* Unknown Device (scsi2) */
  46#define INQD_PDT_NOLUN  0x7f    /* Logical Unit Not Present */
  47
  48#define INQD_PDT_DMASK  0x1F    /* Peripheral Device Type Mask */
  49#define INQD_PDT_QMASK  0xE0    /* Peripheral Device Qualifer Mask */
  50
  51/*
  52 *      Sense codes
  53 */
  54
  55#define SENCODE_NO_SENSE                        0x00
  56#define SENCODE_END_OF_DATA                     0x00
  57#define SENCODE_BECOMING_READY                  0x04
  58#define SENCODE_INIT_CMD_REQUIRED               0x04
  59#define SENCODE_UNRECOVERED_READ_ERROR          0x11
  60#define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
  61#define SENCODE_INVALID_COMMAND                 0x20
  62#define SENCODE_LBA_OUT_OF_RANGE                0x21
  63#define SENCODE_INVALID_CDB_FIELD               0x24
  64#define SENCODE_LUN_NOT_SUPPORTED               0x25
  65#define SENCODE_INVALID_PARAM_FIELD             0x26
  66#define SENCODE_PARAM_NOT_SUPPORTED             0x26
  67#define SENCODE_PARAM_VALUE_INVALID             0x26
  68#define SENCODE_RESET_OCCURRED                  0x29
  69#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
  70#define SENCODE_INQUIRY_DATA_CHANGED            0x3F
  71#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
  72#define SENCODE_DIAGNOSTIC_FAILURE              0x40
  73#define SENCODE_INTERNAL_TARGET_FAILURE         0x44
  74#define SENCODE_INVALID_MESSAGE_ERROR           0x49
  75#define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
  76#define SENCODE_OVERLAPPED_COMMAND              0x4E
  77
  78/*
  79 *      Additional sense codes
  80 */
  81
  82#define ASENCODE_NO_SENSE                       0x00
  83#define ASENCODE_END_OF_DATA                    0x05
  84#define ASENCODE_BECOMING_READY                 0x01
  85#define ASENCODE_INIT_CMD_REQUIRED              0x02
  86#define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
  87#define ASENCODE_INVALID_COMMAND                0x00
  88#define ASENCODE_LBA_OUT_OF_RANGE               0x00
  89#define ASENCODE_INVALID_CDB_FIELD              0x00
  90#define ASENCODE_LUN_NOT_SUPPORTED              0x00
  91#define ASENCODE_INVALID_PARAM_FIELD            0x00
  92#define ASENCODE_PARAM_NOT_SUPPORTED            0x01
  93#define ASENCODE_PARAM_VALUE_INVALID            0x02
  94#define ASENCODE_RESET_OCCURRED                 0x00
  95#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
  96#define ASENCODE_INQUIRY_DATA_CHANGED           0x03
  97#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
  98#define ASENCODE_DIAGNOSTIC_FAILURE             0x80
  99#define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
 100#define ASENCODE_INVALID_MESSAGE_ERROR          0x00
 101#define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
 102#define ASENCODE_OVERLAPPED_COMMAND             0x00
 103
 104#define BYTE0(x) (unsigned char)(x)
 105#define BYTE1(x) (unsigned char)((x) >> 8)
 106#define BYTE2(x) (unsigned char)((x) >> 16)
 107#define BYTE3(x) (unsigned char)((x) >> 24)
 108
 109/* MODE_SENSE data format */
 110typedef struct {
 111        struct {
 112                u8      data_length;
 113                u8      med_type;
 114                u8      dev_par;
 115                u8      bd_length;
 116        } __attribute__((packed)) hd;
 117        struct {
 118                u8      dens_code;
 119                u8      block_count[3];
 120                u8      reserved;
 121                u8      block_length[3];
 122        } __attribute__((packed)) bd;
 123                u8      mpc_buf[3];
 124} __attribute__((packed)) aac_modep_data;
 125
 126/* MODE_SENSE_10 data format */
 127typedef struct {
 128        struct {
 129                u8      data_length[2];
 130                u8      med_type;
 131                u8      dev_par;
 132                u8      rsrvd[2];
 133                u8      bd_length[2];
 134        } __attribute__((packed)) hd;
 135        struct {
 136                u8      dens_code;
 137                u8      block_count[3];
 138                u8      reserved;
 139                u8      block_length[3];
 140        } __attribute__((packed)) bd;
 141                u8      mpc_buf[3];
 142} __attribute__((packed)) aac_modep10_data;
 143
 144/*------------------------------------------------------------------------------
 145 *              S T R U C T S / T Y P E D E F S
 146 *----------------------------------------------------------------------------*/
 147/* SCSI inquiry data */
 148struct inquiry_data {
 149        u8 inqd_pdt;    /* Peripheral qualifier | Peripheral Device Type */
 150        u8 inqd_dtq;    /* RMB | Device Type Qualifier */
 151        u8 inqd_ver;    /* ISO version | ECMA version | ANSI-approved version */
 152        u8 inqd_rdf;    /* AENC | TrmIOP | Response data format */
 153        u8 inqd_len;    /* Additional length (n-4) */
 154        u8 inqd_pad1[2];/* Reserved - must be zero */
 155        u8 inqd_pad2;   /* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
 156        u8 inqd_vid[8]; /* Vendor ID */
 157        u8 inqd_pid[16];/* Product ID */
 158        u8 inqd_prl[4]; /* Product Revision Level */
 159};
 160
 161/* Added for VPD 0x83 */
 162struct  tvpd_id_descriptor_type_1 {
 163        u8 codeset:4;           /* VPD_CODE_SET */
 164        u8 reserved:4;
 165        u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
 166        u8 reserved2:4;
 167        u8 reserved3;
 168        u8 identifierlength;
 169        u8 venid[8];
 170        u8 productid[16];
 171        u8 serialnumber[8];     /* SN in ASCII */
 172
 173};
 174
 175struct tvpd_id_descriptor_type_2 {
 176        u8 codeset:4;           /* VPD_CODE_SET */
 177        u8 reserved:4;
 178        u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
 179        u8 reserved2:4;
 180        u8 reserved3;
 181        u8 identifierlength;
 182        struct teu64id {
 183                u32 Serial;
 184                 /* The serial number supposed to be 40 bits,
 185                  * bit we only support 32, so make the last byte zero. */
 186                u8 reserved;
 187                u8 venid[3];
 188        } eu64id;
 189
 190};
 191
 192struct tvpd_id_descriptor_type_3 {
 193        u8 codeset : 4;          /* VPD_CODE_SET */
 194        u8 reserved : 4;
 195        u8 identifiertype : 4;   /* VPD_IDENTIFIER_TYPE */
 196        u8 reserved2 : 4;
 197        u8 reserved3;
 198        u8 identifierlength;
 199        u8 Identifier[16];
 200};
 201
 202struct tvpd_page83 {
 203        u8 DeviceType:5;
 204        u8 DeviceTypeQualifier:3;
 205        u8 PageCode;
 206        u8 reserved;
 207        u8 PageLength;
 208        struct tvpd_id_descriptor_type_1 type1;
 209        struct tvpd_id_descriptor_type_2 type2;
 210        struct tvpd_id_descriptor_type_3 type3;
 211};
 212
 213/*
 214 *              M O D U L E   G L O B A L S
 215 */
 216
 217static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
 218static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
 219static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
 220static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
 221                                struct aac_raw_io2 *rio2, int sg_max);
 222static long aac_build_sghba(struct scsi_cmnd *scsicmd,
 223                                struct aac_hba_cmd_req *hbacmd,
 224                                int sg_max, u64 sg_address);
 225static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
 226                                int pages, int nseg, int nseg_new);
 227static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
 228static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
 229#ifdef AAC_DETAILED_STATUS_INFO
 230static char *aac_get_status_string(u32 status);
 231#endif
 232
 233/*
 234 *      Non dasd selection is handled entirely in aachba now
 235 */
 236
 237static int nondasd = -1;
 238static int aac_cache = 2;       /* WCE=0 to avoid performance problems */
 239static int dacmode = -1;
 240int aac_msi;
 241int aac_commit = -1;
 242int startup_timeout = 180;
 243int aif_timeout = 120;
 244int aac_sync_mode;  /* Only Sync. transfer - disabled */
 245int aac_convert_sgl = 1;        /* convert non-conformable s/g list - enabled */
 246
 247module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
 248MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
 249        " 0=off, 1=on");
 250module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
 251MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
 252        " 0=off, 1=on");
 253module_param(nondasd, int, S_IRUGO|S_IWUSR);
 254MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
 255        " 0=off, 1=on");
 256module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
 257MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
 258        "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
 259        "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
 260        "\tbit 2 - Disable only if Battery is protecting Cache");
 261module_param(dacmode, int, S_IRUGO|S_IWUSR);
 262MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
 263        " 0=off, 1=on");
 264module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
 265MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
 266        " adapter for foreign arrays.\n"
 267        "This is typically needed in systems that do not have a BIOS."
 268        " 0=off, 1=on");
 269module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
 270MODULE_PARM_DESC(msi, "IRQ handling."
 271        " 0=PIC(default), 1=MSI, 2=MSI-X)");
 272module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
 273MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
 274        " adapter to have it's kernel up and\n"
 275        "running. This is typically adjusted for large systems that do not"
 276        " have a BIOS.");
 277module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
 278MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
 279        " applications to pick up AIFs before\n"
 280        "deregistering them. This is typically adjusted for heavily burdened"
 281        " systems.");
 282
 283int aac_fib_dump;
 284module_param(aac_fib_dump, int, 0644);
 285MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on");
 286
 287int numacb = -1;
 288module_param(numacb, int, S_IRUGO|S_IWUSR);
 289MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
 290        " blocks (FIB) allocated. Valid values are 512 and down. Default is"
 291        " to use suggestion from Firmware.");
 292
 293int acbsize = -1;
 294module_param(acbsize, int, S_IRUGO|S_IWUSR);
 295MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
 296        " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
 297        " suggestion from Firmware.");
 298
 299int update_interval = 30 * 60;
 300module_param(update_interval, int, S_IRUGO|S_IWUSR);
 301MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
 302        " updates issued to adapter.");
 303
 304int check_interval = 60;
 305module_param(check_interval, int, S_IRUGO|S_IWUSR);
 306MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
 307        " checks.");
 308
 309int aac_check_reset = 1;
 310module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
 311MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
 312        " adapter. a value of -1 forces the reset to adapters programmed to"
 313        " ignore it.");
 314
 315int expose_physicals = -1;
 316module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
 317MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
 318        " -1=protect 0=off, 1=on");
 319
 320int aac_reset_devices;
 321module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
 322MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
 323
 324int aac_wwn = 1;
 325module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
 326MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
 327        "\t0 - Disable\n"
 328        "\t1 - Array Meta Data Signature (default)\n"
 329        "\t2 - Adapter Serial Number");
 330
 331
 332static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
 333                struct fib *fibptr) {
 334        struct scsi_device *device;
 335
 336        if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
 337                dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
 338                aac_fib_complete(fibptr);
 339                return 0;
 340        }
 341        scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
 342        device = scsicmd->device;
 343        if (unlikely(!device)) {
 344                dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
 345                aac_fib_complete(fibptr);
 346                return 0;
 347        }
 348        return 1;
 349}
 350
 351/**
 352 *      aac_get_config_status   -       check the adapter configuration
 353 *      @common: adapter to query
 354 *
 355 *      Query config status, and commit the configuration if needed.
 356 */
 357int aac_get_config_status(struct aac_dev *dev, int commit_flag)
 358{
 359        int status = 0;
 360        struct fib * fibptr;
 361
 362        if (!(fibptr = aac_fib_alloc(dev)))
 363                return -ENOMEM;
 364
 365        aac_fib_init(fibptr);
 366        {
 367                struct aac_get_config_status *dinfo;
 368                dinfo = (struct aac_get_config_status *) fib_data(fibptr);
 369
 370                dinfo->command = cpu_to_le32(VM_ContainerConfig);
 371                dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
 372                dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
 373        }
 374
 375        status = aac_fib_send(ContainerCommand,
 376                            fibptr,
 377                            sizeof (struct aac_get_config_status),
 378                            FsaNormal,
 379                            1, 1,
 380                            NULL, NULL);
 381        if (status < 0) {
 382                printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
 383        } else {
 384                struct aac_get_config_status_resp *reply
 385                  = (struct aac_get_config_status_resp *) fib_data(fibptr);
 386                dprintk((KERN_WARNING
 387                  "aac_get_config_status: response=%d status=%d action=%d\n",
 388                  le32_to_cpu(reply->response),
 389                  le32_to_cpu(reply->status),
 390                  le32_to_cpu(reply->data.action)));
 391                if ((le32_to_cpu(reply->response) != ST_OK) ||
 392                     (le32_to_cpu(reply->status) != CT_OK) ||
 393                     (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
 394                        printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
 395                        status = -EINVAL;
 396                }
 397        }
 398        /* Do not set XferState to zero unless receives a response from F/W */
 399        if (status >= 0)
 400                aac_fib_complete(fibptr);
 401
 402        /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
 403        if (status >= 0) {
 404                if ((aac_commit == 1) || commit_flag) {
 405                        struct aac_commit_config * dinfo;
 406                        aac_fib_init(fibptr);
 407                        dinfo = (struct aac_commit_config *) fib_data(fibptr);
 408
 409                        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 410                        dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
 411
 412                        status = aac_fib_send(ContainerCommand,
 413                                    fibptr,
 414                                    sizeof (struct aac_commit_config),
 415                                    FsaNormal,
 416                                    1, 1,
 417                                    NULL, NULL);
 418                        /* Do not set XferState to zero unless
 419                         * receives a response from F/W */
 420                        if (status >= 0)
 421                                aac_fib_complete(fibptr);
 422                } else if (aac_commit == 0) {
 423                        printk(KERN_WARNING
 424                          "aac_get_config_status: Foreign device configurations are being ignored\n");
 425                }
 426        }
 427        /* FIB should be freed only after getting the response from the F/W */
 428        if (status != -ERESTARTSYS)
 429                aac_fib_free(fibptr);
 430        return status;
 431}
 432
 433static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
 434{
 435        char inq_data;
 436        scsi_sg_copy_to_buffer(scsicmd,  &inq_data, sizeof(inq_data));
 437        if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
 438                inq_data &= 0xdf;
 439                scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
 440        }
 441}
 442
 443/**
 444 *      aac_get_containers      -       list containers
 445 *      @common: adapter to probe
 446 *
 447 *      Make a list of all containers on this controller
 448 */
 449int aac_get_containers(struct aac_dev *dev)
 450{
 451        struct fsa_dev_info *fsa_dev_ptr;
 452        u32 index;
 453        int status = 0;
 454        struct fib * fibptr;
 455        struct aac_get_container_count *dinfo;
 456        struct aac_get_container_count_resp *dresp;
 457        int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
 458
 459        if (!(fibptr = aac_fib_alloc(dev)))
 460                return -ENOMEM;
 461
 462        aac_fib_init(fibptr);
 463        dinfo = (struct aac_get_container_count *) fib_data(fibptr);
 464        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 465        dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
 466
 467        status = aac_fib_send(ContainerCommand,
 468                    fibptr,
 469                    sizeof (struct aac_get_container_count),
 470                    FsaNormal,
 471                    1, 1,
 472                    NULL, NULL);
 473        if (status >= 0) {
 474                dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
 475                maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
 476                if (fibptr->dev->supplement_adapter_info.supported_options2 &
 477                    AAC_OPTION_SUPPORTED_240_VOLUMES) {
 478                        maximum_num_containers =
 479                                le32_to_cpu(dresp->MaxSimpleVolumes);
 480                }
 481                aac_fib_complete(fibptr);
 482        }
 483        /* FIB should be freed only after getting the response from the F/W */
 484        if (status != -ERESTARTSYS)
 485                aac_fib_free(fibptr);
 486
 487        if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
 488                maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
 489        if (dev->fsa_dev == NULL ||
 490                dev->maximum_num_containers != maximum_num_containers) {
 491
 492                fsa_dev_ptr = dev->fsa_dev;
 493
 494                dev->fsa_dev = kcalloc(maximum_num_containers,
 495                                        sizeof(*fsa_dev_ptr), GFP_KERNEL);
 496
 497                kfree(fsa_dev_ptr);
 498                fsa_dev_ptr = NULL;
 499
 500
 501                if (!dev->fsa_dev)
 502                        return -ENOMEM;
 503
 504                dev->maximum_num_containers = maximum_num_containers;
 505        }
 506        for (index = 0; index < dev->maximum_num_containers; index++) {
 507                dev->fsa_dev[index].devname[0] = '\0';
 508                dev->fsa_dev[index].valid = 0;
 509
 510                status = aac_probe_container(dev, index);
 511
 512                if (status < 0) {
 513                        printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
 514                        break;
 515                }
 516        }
 517        return status;
 518}
 519
 520static void get_container_name_callback(void *context, struct fib * fibptr)
 521{
 522        struct aac_get_name_resp * get_name_reply;
 523        struct scsi_cmnd * scsicmd;
 524
 525        scsicmd = (struct scsi_cmnd *) context;
 526
 527        if (!aac_valid_context(scsicmd, fibptr))
 528                return;
 529
 530        dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
 531        BUG_ON(fibptr == NULL);
 532
 533        get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
 534        /* Failure is irrelevant, using default value instead */
 535        if ((le32_to_cpu(get_name_reply->status) == CT_OK)
 536         && (get_name_reply->data[0] != '\0')) {
 537                char *sp = get_name_reply->data;
 538                int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
 539
 540                sp[data_size - 1] = '\0';
 541                while (*sp == ' ')
 542                        ++sp;
 543                if (*sp) {
 544                        struct inquiry_data inq;
 545                        char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
 546                        int count = sizeof(d);
 547                        char *dp = d;
 548                        do {
 549                                *dp++ = (*sp) ? *sp++ : ' ';
 550                        } while (--count > 0);
 551
 552                        scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
 553                        memcpy(inq.inqd_pid, d, sizeof(d));
 554                        scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
 555                }
 556        }
 557
 558        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 559
 560        aac_fib_complete(fibptr);
 561        scsicmd->scsi_done(scsicmd);
 562}
 563
 564/**
 565 *      aac_get_container_name  -       get container name, none blocking.
 566 */
 567static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 568{
 569        int status;
 570        int data_size;
 571        struct aac_get_name *dinfo;
 572        struct fib * cmd_fibcontext;
 573        struct aac_dev * dev;
 574
 575        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 576
 577        data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
 578
 579        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
 580
 581        aac_fib_init(cmd_fibcontext);
 582        dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
 583        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 584
 585        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 586        dinfo->type = cpu_to_le32(CT_READ_NAME);
 587        dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
 588        dinfo->count = cpu_to_le32(data_size - 1);
 589
 590        status = aac_fib_send(ContainerCommand,
 591                  cmd_fibcontext,
 592                  sizeof(struct aac_get_name_resp),
 593                  FsaNormal,
 594                  0, 1,
 595                  (fib_callback)get_container_name_callback,
 596                  (void *) scsicmd);
 597
 598        /*
 599         *      Check that the command queued to the controller
 600         */
 601        if (status == -EINPROGRESS)
 602                return 0;
 603
 604        printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
 605        aac_fib_complete(cmd_fibcontext);
 606        return -1;
 607}
 608
 609static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
 610{
 611        struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
 612
 613        if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
 614                return aac_scsi_cmd(scsicmd);
 615
 616        scsicmd->result = DID_NO_CONNECT << 16;
 617        scsicmd->scsi_done(scsicmd);
 618        return 0;
 619}
 620
 621static void _aac_probe_container2(void * context, struct fib * fibptr)
 622{
 623        struct fsa_dev_info *fsa_dev_ptr;
 624        int (*callback)(struct scsi_cmnd *);
 625        struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
 626        int i;
 627
 628
 629        if (!aac_valid_context(scsicmd, fibptr))
 630                return;
 631
 632        scsicmd->SCp.Status = 0;
 633        fsa_dev_ptr = fibptr->dev->fsa_dev;
 634        if (fsa_dev_ptr) {
 635                struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
 636                __le32 sup_options2;
 637
 638                fsa_dev_ptr += scmd_id(scsicmd);
 639                sup_options2 =
 640                        fibptr->dev->supplement_adapter_info.supported_options2;
 641
 642                if ((le32_to_cpu(dresp->status) == ST_OK) &&
 643                    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
 644                    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
 645                        if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
 646                                dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
 647                                fsa_dev_ptr->block_size = 0x200;
 648                        } else {
 649                                fsa_dev_ptr->block_size =
 650                                        le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
 651                        }
 652                        for (i = 0; i < 16; i++)
 653                                fsa_dev_ptr->identifier[i] =
 654                                        dresp->mnt[0].fileinfo.bdevinfo
 655                                                                .identifier[i];
 656                        fsa_dev_ptr->valid = 1;
 657                        /* sense_key holds the current state of the spin-up */
 658                        if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
 659                                fsa_dev_ptr->sense_data.sense_key = NOT_READY;
 660                        else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
 661                                fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
 662                        fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
 663                        fsa_dev_ptr->size
 664                          = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
 665                            (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
 666                        fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
 667                }
 668                if ((fsa_dev_ptr->valid & 1) == 0)
 669                        fsa_dev_ptr->valid = 0;
 670                scsicmd->SCp.Status = le32_to_cpu(dresp->count);
 671        }
 672        aac_fib_complete(fibptr);
 673        aac_fib_free(fibptr);
 674        callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
 675        scsicmd->SCp.ptr = NULL;
 676        (*callback)(scsicmd);
 677        return;
 678}
 679
 680static void _aac_probe_container1(void * context, struct fib * fibptr)
 681{
 682        struct scsi_cmnd * scsicmd;
 683        struct aac_mount * dresp;
 684        struct aac_query_mount *dinfo;
 685        int status;
 686
 687        dresp = (struct aac_mount *) fib_data(fibptr);
 688        if (!aac_supports_2T(fibptr->dev)) {
 689                dresp->mnt[0].capacityhigh = 0;
 690                if ((le32_to_cpu(dresp->status) == ST_OK) &&
 691                        (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
 692                        _aac_probe_container2(context, fibptr);
 693                        return;
 694                }
 695        }
 696        scsicmd = (struct scsi_cmnd *) context;
 697
 698        if (!aac_valid_context(scsicmd, fibptr))
 699                return;
 700
 701        aac_fib_init(fibptr);
 702
 703        dinfo = (struct aac_query_mount *)fib_data(fibptr);
 704
 705        if (fibptr->dev->supplement_adapter_info.supported_options2 &
 706            AAC_OPTION_VARIABLE_BLOCK_SIZE)
 707                dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
 708        else
 709                dinfo->command = cpu_to_le32(VM_NameServe64);
 710
 711        dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 712        dinfo->type = cpu_to_le32(FT_FILESYS);
 713        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 714
 715        status = aac_fib_send(ContainerCommand,
 716                          fibptr,
 717                          sizeof(struct aac_query_mount),
 718                          FsaNormal,
 719                          0, 1,
 720                          _aac_probe_container2,
 721                          (void *) scsicmd);
 722        /*
 723         *      Check that the command queued to the controller
 724         */
 725        if (status < 0 && status != -EINPROGRESS) {
 726                /* Inherit results from VM_NameServe, if any */
 727                dresp->status = cpu_to_le32(ST_OK);
 728                _aac_probe_container2(context, fibptr);
 729        }
 730}
 731
 732static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
 733{
 734        struct fib * fibptr;
 735        int status = -ENOMEM;
 736
 737        if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
 738                struct aac_query_mount *dinfo;
 739
 740                aac_fib_init(fibptr);
 741
 742                dinfo = (struct aac_query_mount *)fib_data(fibptr);
 743
 744                if (fibptr->dev->supplement_adapter_info.supported_options2 &
 745                    AAC_OPTION_VARIABLE_BLOCK_SIZE)
 746                        dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
 747                else
 748                        dinfo->command = cpu_to_le32(VM_NameServe);
 749
 750                dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 751                dinfo->type = cpu_to_le32(FT_FILESYS);
 752                scsicmd->SCp.ptr = (char *)callback;
 753                scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 754
 755                status = aac_fib_send(ContainerCommand,
 756                          fibptr,
 757                          sizeof(struct aac_query_mount),
 758                          FsaNormal,
 759                          0, 1,
 760                          _aac_probe_container1,
 761                          (void *) scsicmd);
 762                /*
 763                 *      Check that the command queued to the controller
 764                 */
 765                if (status == -EINPROGRESS)
 766                        return 0;
 767
 768                if (status < 0) {
 769                        scsicmd->SCp.ptr = NULL;
 770                        aac_fib_complete(fibptr);
 771                        aac_fib_free(fibptr);
 772                }
 773        }
 774        if (status < 0) {
 775                struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
 776                if (fsa_dev_ptr) {
 777                        fsa_dev_ptr += scmd_id(scsicmd);
 778                        if ((fsa_dev_ptr->valid & 1) == 0) {
 779                                fsa_dev_ptr->valid = 0;
 780                                return (*callback)(scsicmd);
 781                        }
 782                }
 783        }
 784        return status;
 785}
 786
 787/**
 788 *      aac_probe_container             -       query a logical volume
 789 *      @dev: device to query
 790 *      @cid: container identifier
 791 *
 792 *      Queries the controller about the given volume. The volume information
 793 *      is updated in the struct fsa_dev_info structure rather than returned.
 794 */
 795static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
 796{
 797        scsicmd->device = NULL;
 798        return 0;
 799}
 800
 801int aac_probe_container(struct aac_dev *dev, int cid)
 802{
 803        struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
 804        struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
 805        int status;
 806
 807        if (!scsicmd || !scsidev) {
 808                kfree(scsicmd);
 809                kfree(scsidev);
 810                return -ENOMEM;
 811        }
 812        scsicmd->list.next = NULL;
 813        scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
 814
 815        scsicmd->device = scsidev;
 816        scsidev->sdev_state = 0;
 817        scsidev->id = cid;
 818        scsidev->host = dev->scsi_host_ptr;
 819
 820        if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
 821                while (scsicmd->device == scsidev)
 822                        schedule();
 823        kfree(scsidev);
 824        status = scsicmd->SCp.Status;
 825        kfree(scsicmd);
 826        return status;
 827}
 828
 829/* Local Structure to set SCSI inquiry data strings */
 830struct scsi_inq {
 831        char vid[8];         /* Vendor ID */
 832        char pid[16];        /* Product ID */
 833        char prl[4];         /* Product Revision Level */
 834};
 835
 836/**
 837 *      InqStrCopy      -       string merge
 838 *      @a:     string to copy from
 839 *      @b:     string to copy to
 840 *
 841 *      Copy a String from one location to another
 842 *      without copying \0
 843 */
 844
 845static void inqstrcpy(char *a, char *b)
 846{
 847
 848        while (*a != (char)0)
 849                *b++ = *a++;
 850}
 851
 852static char *container_types[] = {
 853        "None",
 854        "Volume",
 855        "Mirror",
 856        "Stripe",
 857        "RAID5",
 858        "SSRW",
 859        "SSRO",
 860        "Morph",
 861        "Legacy",
 862        "RAID4",
 863        "RAID10",
 864        "RAID00",
 865        "V-MIRRORS",
 866        "PSEUDO R4",
 867        "RAID50",
 868        "RAID5D",
 869        "RAID5D0",
 870        "RAID1E",
 871        "RAID6",
 872        "RAID60",
 873        "Unknown"
 874};
 875
 876char * get_container_type(unsigned tindex)
 877{
 878        if (tindex >= ARRAY_SIZE(container_types))
 879                tindex = ARRAY_SIZE(container_types) - 1;
 880        return container_types[tindex];
 881}
 882
 883/* Function: setinqstr
 884 *
 885 * Arguments: [1] pointer to void [1] int
 886 *
 887 * Purpose: Sets SCSI inquiry data strings for vendor, product
 888 * and revision level. Allows strings to be set in platform dependent
 889 * files instead of in OS dependent driver source.
 890 */
 891
 892static void setinqstr(struct aac_dev *dev, void *data, int tindex)
 893{
 894        struct scsi_inq *str;
 895        struct aac_supplement_adapter_info *sup_adap_info;
 896
 897        sup_adap_info = &dev->supplement_adapter_info;
 898        str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
 899        memset(str, ' ', sizeof(*str));
 900
 901        if (sup_adap_info->adapter_type_text[0]) {
 902                int c;
 903                char *cp;
 904                char *cname = kmemdup(sup_adap_info->adapter_type_text,
 905                                sizeof(sup_adap_info->adapter_type_text),
 906                                                                GFP_ATOMIC);
 907                if (!cname)
 908                        return;
 909
 910                cp = cname;
 911                if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
 912                        inqstrcpy("SMC", str->vid);
 913                else {
 914                        c = sizeof(str->vid);
 915                        while (*cp && *cp != ' ' && --c)
 916                                ++cp;
 917                        c = *cp;
 918                        *cp = '\0';
 919                        inqstrcpy(cname, str->vid);
 920                        *cp = c;
 921                        while (*cp && *cp != ' ')
 922                                ++cp;
 923                }
 924                while (*cp == ' ')
 925                        ++cp;
 926                /* last six chars reserved for vol type */
 927                if (strlen(cp) > sizeof(str->pid))
 928                        cp[sizeof(str->pid)] = '\0';
 929                inqstrcpy (cp, str->pid);
 930
 931                kfree(cname);
 932        } else {
 933                struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
 934
 935                inqstrcpy (mp->vname, str->vid);
 936                /* last six chars reserved for vol type */
 937                inqstrcpy (mp->model, str->pid);
 938        }
 939
 940        if (tindex < ARRAY_SIZE(container_types)){
 941                char *findit = str->pid;
 942
 943                for ( ; *findit != ' '; findit++); /* walk till we find a space */
 944                /* RAID is superfluous in the context of a RAID device */
 945                if (memcmp(findit-4, "RAID", 4) == 0)
 946                        *(findit -= 4) = ' ';
 947                if (((findit - str->pid) + strlen(container_types[tindex]))
 948                 < (sizeof(str->pid) + sizeof(str->prl)))
 949                        inqstrcpy (container_types[tindex], findit + 1);
 950        }
 951        inqstrcpy ("V1.0", str->prl);
 952}
 953
 954static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
 955                struct aac_dev *dev, struct scsi_cmnd *scsicmd)
 956{
 957        int container;
 958
 959        vpdpage83data->type3.codeset = 1;
 960        vpdpage83data->type3.identifiertype = 3;
 961        vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
 962                        - 4;
 963
 964        for (container = 0; container < dev->maximum_num_containers;
 965                        container++) {
 966
 967                if (scmd_id(scsicmd) == container) {
 968                        memcpy(vpdpage83data->type3.Identifier,
 969                                        dev->fsa_dev[container].identifier,
 970                                        16);
 971                        break;
 972                }
 973        }
 974}
 975
 976static void get_container_serial_callback(void *context, struct fib * fibptr)
 977{
 978        struct aac_get_serial_resp * get_serial_reply;
 979        struct scsi_cmnd * scsicmd;
 980
 981        BUG_ON(fibptr == NULL);
 982
 983        scsicmd = (struct scsi_cmnd *) context;
 984        if (!aac_valid_context(scsicmd, fibptr))
 985                return;
 986
 987        get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
 988        /* Failure is irrelevant, using default value instead */
 989        if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
 990                /*Check to see if it's for VPD 0x83 or 0x80 */
 991                if (scsicmd->cmnd[2] == 0x83) {
 992                        /* vpd page 0x83 - Device Identification Page */
 993                        struct aac_dev *dev;
 994                        int i;
 995                        struct tvpd_page83 vpdpage83data;
 996
 997                        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 998
 999                        memset(((u8 *)&vpdpage83data), 0,
1000                               sizeof(vpdpage83data));
1001
1002                        /* DIRECT_ACCESS_DEVIC */
1003                        vpdpage83data.DeviceType = 0;
1004                        /* DEVICE_CONNECTED */
1005                        vpdpage83data.DeviceTypeQualifier = 0;
1006                        /* VPD_DEVICE_IDENTIFIERS */
1007                        vpdpage83data.PageCode = 0x83;
1008                        vpdpage83data.reserved = 0;
1009                        vpdpage83data.PageLength =
1010                                sizeof(vpdpage83data.type1) +
1011                                sizeof(vpdpage83data.type2);
1012
1013                        /* VPD 83 Type 3 is not supported for ARC */
1014                        if (dev->sa_firmware)
1015                                vpdpage83data.PageLength +=
1016                                sizeof(vpdpage83data.type3);
1017
1018                        /* T10 Vendor Identifier Field Format */
1019                        /* VpdcodesetAscii */
1020                        vpdpage83data.type1.codeset = 2;
1021                        /* VpdIdentifierTypeVendorId */
1022                        vpdpage83data.type1.identifiertype = 1;
1023                        vpdpage83data.type1.identifierlength =
1024                                sizeof(vpdpage83data.type1) - 4;
1025
1026                        /* "ADAPTEC " for adaptec */
1027                        memcpy(vpdpage83data.type1.venid,
1028                                "ADAPTEC ",
1029                                sizeof(vpdpage83data.type1.venid));
1030                        memcpy(vpdpage83data.type1.productid,
1031                                "ARRAY           ",
1032                                sizeof(
1033                                vpdpage83data.type1.productid));
1034
1035                        /* Convert to ascii based serial number.
1036                         * The LSB is the the end.
1037                         */
1038                        for (i = 0; i < 8; i++) {
1039                                u8 temp =
1040                                        (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
1041                                if (temp  > 0x9) {
1042                                        vpdpage83data.type1.serialnumber[i] =
1043                                                        'A' + (temp - 0xA);
1044                                } else {
1045                                        vpdpage83data.type1.serialnumber[i] =
1046                                                        '0' + temp;
1047                                }
1048                        }
1049
1050                        /* VpdCodeSetBinary */
1051                        vpdpage83data.type2.codeset = 1;
1052                        /* VpdidentifiertypeEUI64 */
1053                        vpdpage83data.type2.identifiertype = 2;
1054                        vpdpage83data.type2.identifierlength =
1055                                sizeof(vpdpage83data.type2) - 4;
1056
1057                        vpdpage83data.type2.eu64id.venid[0] = 0xD0;
1058                        vpdpage83data.type2.eu64id.venid[1] = 0;
1059                        vpdpage83data.type2.eu64id.venid[2] = 0;
1060
1061                        vpdpage83data.type2.eu64id.Serial =
1062                                                        get_serial_reply->uid;
1063                        vpdpage83data.type2.eu64id.reserved = 0;
1064
1065                        /*
1066                         * VpdIdentifierTypeFCPHName
1067                         * VPD 0x83 Type 3 not supported for ARC
1068                         */
1069                        if (dev->sa_firmware) {
1070                                build_vpd83_type3(&vpdpage83data,
1071                                                dev, scsicmd);
1072                        }
1073
1074                        /* Move the inquiry data to the response buffer. */
1075                        scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
1076                                                 sizeof(vpdpage83data));
1077                } else {
1078                        /* It must be for VPD 0x80 */
1079                        char sp[13];
1080                        /* EVPD bit set */
1081                        sp[0] = INQD_PDT_DA;
1082                        sp[1] = scsicmd->cmnd[2];
1083                        sp[2] = 0;
1084                        sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
1085                                le32_to_cpu(get_serial_reply->uid));
1086                        scsi_sg_copy_from_buffer(scsicmd, sp,
1087                                                 sizeof(sp));
1088                }
1089        }
1090
1091        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1092
1093        aac_fib_complete(fibptr);
1094        scsicmd->scsi_done(scsicmd);
1095}
1096
1097/**
1098 *      aac_get_container_serial - get container serial, none blocking.
1099 */
1100static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
1101{
1102        int status;
1103        struct aac_get_serial *dinfo;
1104        struct fib * cmd_fibcontext;
1105        struct aac_dev * dev;
1106
1107        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1108
1109        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
1110
1111        aac_fib_init(cmd_fibcontext);
1112        dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
1113
1114        dinfo->command = cpu_to_le32(VM_ContainerConfig);
1115        dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
1116        dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
1117        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1118
1119        status = aac_fib_send(ContainerCommand,
1120                  cmd_fibcontext,
1121                  sizeof(struct aac_get_serial_resp),
1122                  FsaNormal,
1123                  0, 1,
1124                  (fib_callback) get_container_serial_callback,
1125                  (void *) scsicmd);
1126
1127        /*
1128         *      Check that the command queued to the controller
1129         */
1130        if (status == -EINPROGRESS)
1131                return 0;
1132
1133        printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
1134        aac_fib_complete(cmd_fibcontext);
1135        return -1;
1136}
1137
1138/* Function: setinqserial
1139 *
1140 * Arguments: [1] pointer to void [1] int
1141 *
1142 * Purpose: Sets SCSI Unit Serial number.
1143 *          This is a fake. We should read a proper
1144 *          serial number from the container. <SuSE>But
1145 *          without docs it's quite hard to do it :-)
1146 *          So this will have to do in the meantime.</SuSE>
1147 */
1148
1149static int setinqserial(struct aac_dev *dev, void *data, int cid)
1150{
1151        /*
1152         *      This breaks array migration.
1153         */
1154        return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
1155                        le32_to_cpu(dev->adapter_info.serial[0]), cid);
1156}
1157
1158static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
1159        u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
1160{
1161        u8 *sense_buf = (u8 *)sense_data;
1162        /* Sense data valid, err code 70h */
1163        sense_buf[0] = 0x70; /* No info field */
1164        sense_buf[1] = 0;       /* Segment number, always zero */
1165
1166        sense_buf[2] = sense_key;       /* Sense key */
1167
1168        sense_buf[12] = sense_code;     /* Additional sense code */
1169        sense_buf[13] = a_sense_code;   /* Additional sense code qualifier */
1170
1171        if (sense_key == ILLEGAL_REQUEST) {
1172                sense_buf[7] = 10;      /* Additional sense length */
1173
1174                sense_buf[15] = bit_pointer;
1175                /* Illegal parameter is in the parameter block */
1176                if (sense_code == SENCODE_INVALID_CDB_FIELD)
1177                        sense_buf[15] |= 0xc0;/* Std sense key specific field */
1178                /* Illegal parameter is in the CDB block */
1179                sense_buf[16] = field_pointer >> 8;     /* MSB */
1180                sense_buf[17] = field_pointer;          /* LSB */
1181        } else
1182                sense_buf[7] = 6;       /* Additional sense length */
1183}
1184
1185static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1186{
1187        if (lba & 0xffffffff00000000LL) {
1188                int cid = scmd_id(cmd);
1189                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
1190                cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1191                        SAM_STAT_CHECK_CONDITION;
1192                set_sense(&dev->fsa_dev[cid].sense_data,
1193                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1194                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1195                memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1196                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1197                             SCSI_SENSE_BUFFERSIZE));
1198                cmd->scsi_done(cmd);
1199                return 1;
1200        }
1201        return 0;
1202}
1203
1204static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1205{
1206        return 0;
1207}
1208
1209static void io_callback(void *context, struct fib * fibptr);
1210
1211static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1212{
1213        struct aac_dev *dev = fib->dev;
1214        u16 fibsize, command;
1215        long ret;
1216
1217        aac_fib_init(fib);
1218        if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1219                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1220                !dev->sync_mode) {
1221                struct aac_raw_io2 *readcmd2;
1222                readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
1223                memset(readcmd2, 0, sizeof(struct aac_raw_io2));
1224                readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1225                readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1226                readcmd2->byteCount = cpu_to_le32(count *
1227                        dev->fsa_dev[scmd_id(cmd)].block_size);
1228                readcmd2->cid = cpu_to_le16(scmd_id(cmd));
1229                readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
1230                ret = aac_build_sgraw2(cmd, readcmd2,
1231                                dev->scsi_host_ptr->sg_tablesize);
1232                if (ret < 0)
1233                        return ret;
1234                command = ContainerRawIo2;
1235                fibsize = sizeof(struct aac_raw_io2) +
1236                        ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1237        } else {
1238                struct aac_raw_io *readcmd;
1239                readcmd = (struct aac_raw_io *) fib_data(fib);
1240                readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1241                readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1242                readcmd->count = cpu_to_le32(count *
1243                        dev->fsa_dev[scmd_id(cmd)].block_size);
1244                readcmd->cid = cpu_to_le16(scmd_id(cmd));
1245                readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
1246                readcmd->bpTotal = 0;
1247                readcmd->bpComplete = 0;
1248                ret = aac_build_sgraw(cmd, &readcmd->sg);
1249                if (ret < 0)
1250                        return ret;
1251                command = ContainerRawIo;
1252                fibsize = sizeof(struct aac_raw_io) +
1253                        ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
1254        }
1255
1256        BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1257        /*
1258         *      Now send the Fib to the adapter
1259         */
1260        return aac_fib_send(command,
1261                          fib,
1262                          fibsize,
1263                          FsaNormal,
1264                          0, 1,
1265                          (fib_callback) io_callback,
1266                          (void *) cmd);
1267}
1268
1269static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1270{
1271        u16 fibsize;
1272        struct aac_read64 *readcmd;
1273        long ret;
1274
1275        aac_fib_init(fib);
1276        readcmd = (struct aac_read64 *) fib_data(fib);
1277        readcmd->command = cpu_to_le32(VM_CtHostRead64);
1278        readcmd->cid = cpu_to_le16(scmd_id(cmd));
1279        readcmd->sector_count = cpu_to_le16(count);
1280        readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1281        readcmd->pad   = 0;
1282        readcmd->flags = 0;
1283
1284        ret = aac_build_sg64(cmd, &readcmd->sg);
1285        if (ret < 0)
1286                return ret;
1287        fibsize = sizeof(struct aac_read64) +
1288                ((le32_to_cpu(readcmd->sg.count) - 1) *
1289                 sizeof (struct sgentry64));
1290        BUG_ON (fibsize > (fib->dev->max_fib_size -
1291                                sizeof(struct aac_fibhdr)));
1292        /*
1293         *      Now send the Fib to the adapter
1294         */
1295        return aac_fib_send(ContainerCommand64,
1296                          fib,
1297                          fibsize,
1298                          FsaNormal,
1299                          0, 1,
1300                          (fib_callback) io_callback,
1301                          (void *) cmd);
1302}
1303
1304static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1305{
1306        u16 fibsize;
1307        struct aac_read *readcmd;
1308        struct aac_dev *dev = fib->dev;
1309        long ret;
1310
1311        aac_fib_init(fib);
1312        readcmd = (struct aac_read *) fib_data(fib);
1313        readcmd->command = cpu_to_le32(VM_CtBlockRead);
1314        readcmd->cid = cpu_to_le32(scmd_id(cmd));
1315        readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1316        readcmd->count = cpu_to_le32(count *
1317                dev->fsa_dev[scmd_id(cmd)].block_size);
1318
1319        ret = aac_build_sg(cmd, &readcmd->sg);
1320        if (ret < 0)
1321                return ret;
1322        fibsize = sizeof(struct aac_read) +
1323                        ((le32_to_cpu(readcmd->sg.count) - 1) *
1324                         sizeof (struct sgentry));
1325        BUG_ON (fibsize > (fib->dev->max_fib_size -
1326                                sizeof(struct aac_fibhdr)));
1327        /*
1328         *      Now send the Fib to the adapter
1329         */
1330        return aac_fib_send(ContainerCommand,
1331                          fib,
1332                          fibsize,
1333                          FsaNormal,
1334                          0, 1,
1335                          (fib_callback) io_callback,
1336                          (void *) cmd);
1337}
1338
1339static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1340{
1341        struct aac_dev *dev = fib->dev;
1342        u16 fibsize, command;
1343        long ret;
1344
1345        aac_fib_init(fib);
1346        if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1347                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1348                !dev->sync_mode) {
1349                struct aac_raw_io2 *writecmd2;
1350                writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
1351                memset(writecmd2, 0, sizeof(struct aac_raw_io2));
1352                writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1353                writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1354                writecmd2->byteCount = cpu_to_le32(count *
1355                        dev->fsa_dev[scmd_id(cmd)].block_size);
1356                writecmd2->cid = cpu_to_le16(scmd_id(cmd));
1357                writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
1358                                                   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1359                        cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
1360                        cpu_to_le16(RIO2_IO_TYPE_WRITE);
1361                ret = aac_build_sgraw2(cmd, writecmd2,
1362                                dev->scsi_host_ptr->sg_tablesize);
1363                if (ret < 0)
1364                        return ret;
1365                command = ContainerRawIo2;
1366                fibsize = sizeof(struct aac_raw_io2) +
1367                        ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1368        } else {
1369                struct aac_raw_io *writecmd;
1370                writecmd = (struct aac_raw_io *) fib_data(fib);
1371                writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1372                writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1373                writecmd->count = cpu_to_le32(count *
1374                        dev->fsa_dev[scmd_id(cmd)].block_size);
1375                writecmd->cid = cpu_to_le16(scmd_id(cmd));
1376                writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
1377                                                   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1378                        cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
1379                        cpu_to_le16(RIO_TYPE_WRITE);
1380                writecmd->bpTotal = 0;
1381                writecmd->bpComplete = 0;
1382                ret = aac_build_sgraw(cmd, &writecmd->sg);
1383                if (ret < 0)
1384                        return ret;
1385                command = ContainerRawIo;
1386                fibsize = sizeof(struct aac_raw_io) +
1387                        ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
1388        }
1389
1390        BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1391        /*
1392         *      Now send the Fib to the adapter
1393         */
1394        return aac_fib_send(command,
1395                          fib,
1396                          fibsize,
1397                          FsaNormal,
1398                          0, 1,
1399                          (fib_callback) io_callback,
1400                          (void *) cmd);
1401}
1402
1403static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1404{
1405        u16 fibsize;
1406        struct aac_write64 *writecmd;
1407        long ret;
1408
1409        aac_fib_init(fib);
1410        writecmd = (struct aac_write64 *) fib_data(fib);
1411        writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1412        writecmd->cid = cpu_to_le16(scmd_id(cmd));
1413        writecmd->sector_count = cpu_to_le16(count);
1414        writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1415        writecmd->pad   = 0;
1416        writecmd->flags = 0;
1417
1418        ret = aac_build_sg64(cmd, &writecmd->sg);
1419        if (ret < 0)
1420                return ret;
1421        fibsize = sizeof(struct aac_write64) +
1422                ((le32_to_cpu(writecmd->sg.count) - 1) *
1423                 sizeof (struct sgentry64));
1424        BUG_ON (fibsize > (fib->dev->max_fib_size -
1425                                sizeof(struct aac_fibhdr)));
1426        /*
1427         *      Now send the Fib to the adapter
1428         */
1429        return aac_fib_send(ContainerCommand64,
1430                          fib,
1431                          fibsize,
1432                          FsaNormal,
1433                          0, 1,
1434                          (fib_callback) io_callback,
1435                          (void *) cmd);
1436}
1437
1438static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1439{
1440        u16 fibsize;
1441        struct aac_write *writecmd;
1442        struct aac_dev *dev = fib->dev;
1443        long ret;
1444
1445        aac_fib_init(fib);
1446        writecmd = (struct aac_write *) fib_data(fib);
1447        writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1448        writecmd->cid = cpu_to_le32(scmd_id(cmd));
1449        writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1450        writecmd->count = cpu_to_le32(count *
1451                dev->fsa_dev[scmd_id(cmd)].block_size);
1452        writecmd->sg.count = cpu_to_le32(1);
1453        /* ->stable is not used - it did mean which type of write */
1454
1455        ret = aac_build_sg(cmd, &writecmd->sg);
1456        if (ret < 0)
1457                return ret;
1458        fibsize = sizeof(struct aac_write) +
1459                ((le32_to_cpu(writecmd->sg.count) - 1) *
1460                 sizeof (struct sgentry));
1461        BUG_ON (fibsize > (fib->dev->max_fib_size -
1462                                sizeof(struct aac_fibhdr)));
1463        /*
1464         *      Now send the Fib to the adapter
1465         */
1466        return aac_fib_send(ContainerCommand,
1467                          fib,
1468                          fibsize,
1469                          FsaNormal,
1470                          0, 1,
1471                          (fib_callback) io_callback,
1472                          (void *) cmd);
1473}
1474
1475static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
1476{
1477        struct aac_srb * srbcmd;
1478        u32 flag;
1479        u32 timeout;
1480
1481        aac_fib_init(fib);
1482        switch(cmd->sc_data_direction){
1483        case DMA_TO_DEVICE:
1484                flag = SRB_DataOut;
1485                break;
1486        case DMA_BIDIRECTIONAL:
1487                flag = SRB_DataIn | SRB_DataOut;
1488                break;
1489        case DMA_FROM_DEVICE:
1490                flag = SRB_DataIn;
1491                break;
1492        case DMA_NONE:
1493        default:        /* shuts up some versions of gcc */
1494                flag = SRB_NoDataXfer;
1495                break;
1496        }
1497
1498        srbcmd = (struct aac_srb*) fib_data(fib);
1499        srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1500        srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
1501        srbcmd->id       = cpu_to_le32(scmd_id(cmd));
1502        srbcmd->lun      = cpu_to_le32(cmd->device->lun);
1503        srbcmd->flags    = cpu_to_le32(flag);
1504        timeout = cmd->request->timeout/HZ;
1505        if (timeout == 0)
1506                timeout = 1;
1507        srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
1508        srbcmd->retry_limit = 0; /* Obsolete parameter */
1509        srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
1510        return srbcmd;
1511}
1512
1513static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
1514                                                        struct scsi_cmnd *cmd)
1515{
1516        struct aac_hba_cmd_req *hbacmd;
1517        struct aac_dev *dev;
1518        int bus, target;
1519        u64 address;
1520
1521        dev = (struct aac_dev *)cmd->device->host->hostdata;
1522
1523        hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
1524        memset(hbacmd, 0, 96);  /* sizeof(*hbacmd) is not necessary */
1525        /* iu_type is a parameter of aac_hba_send */
1526        switch (cmd->sc_data_direction) {
1527        case DMA_TO_DEVICE:
1528                hbacmd->byte1 = 2;
1529                break;
1530        case DMA_FROM_DEVICE:
1531        case DMA_BIDIRECTIONAL:
1532                hbacmd->byte1 = 1;
1533                break;
1534        case DMA_NONE:
1535        default:
1536                break;
1537        }
1538        hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
1539
1540        bus = aac_logical_to_phys(scmd_channel(cmd));
1541        target = scmd_id(cmd);
1542        hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
1543
1544        /* we fill in reply_qid later in aac_src_deliver_message */
1545        /* we fill in iu_type, request_id later in aac_hba_send */
1546        /* we fill in emb_data_desc_count later in aac_build_sghba */
1547
1548        memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
1549        hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
1550
1551        address = (u64)fib->hw_error_pa;
1552        hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
1553        hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
1554        hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
1555
1556        return hbacmd;
1557}
1558
1559static void aac_srb_callback(void *context, struct fib * fibptr);
1560
1561static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
1562{
1563        u16 fibsize;
1564        struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1565        long ret;
1566
1567        ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
1568        if (ret < 0)
1569                return ret;
1570        srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1571
1572        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1573        memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1574        /*
1575         *      Build Scatter/Gather list
1576         */
1577        fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
1578                ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
1579                 sizeof (struct sgentry64));
1580        BUG_ON (fibsize > (fib->dev->max_fib_size -
1581                                sizeof(struct aac_fibhdr)));
1582
1583        /*
1584         *      Now send the Fib to the adapter
1585         */
1586        return aac_fib_send(ScsiPortCommand64, fib,
1587                                fibsize, FsaNormal, 0, 1,
1588                                  (fib_callback) aac_srb_callback,
1589                                  (void *) cmd);
1590}
1591
1592static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
1593{
1594        u16 fibsize;
1595        struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1596        long ret;
1597
1598        ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
1599        if (ret < 0)
1600                return ret;
1601        srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1602
1603        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1604        memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1605        /*
1606         *      Build Scatter/Gather list
1607         */
1608        fibsize = sizeof (struct aac_srb) +
1609                (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
1610                 sizeof (struct sgentry));
1611        BUG_ON (fibsize > (fib->dev->max_fib_size -
1612                                sizeof(struct aac_fibhdr)));
1613
1614        /*
1615         *      Now send the Fib to the adapter
1616         */
1617        return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
1618                                  (fib_callback) aac_srb_callback, (void *) cmd);
1619}
1620
1621static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
1622{
1623        if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
1624            (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
1625                return FAILED;
1626        return aac_scsi_32(fib, cmd);
1627}
1628
1629static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
1630{
1631        struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
1632        struct aac_dev *dev;
1633        long ret;
1634
1635        dev = (struct aac_dev *)cmd->device->host->hostdata;
1636
1637        ret = aac_build_sghba(cmd, hbacmd,
1638                dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
1639        if (ret < 0)
1640                return ret;
1641
1642        /*
1643         *      Now send the HBA command to the adapter
1644         */
1645        fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
1646                sizeof(struct aac_hba_sgl);
1647
1648        return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
1649                                  (fib_callback) aac_hba_callback,
1650                                  (void *) cmd);
1651}
1652
1653static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
1654        struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
1655{
1656        struct fib      *fibptr;
1657        dma_addr_t      addr;
1658        int             rcode;
1659        int             fibsize;
1660        struct aac_srb  *srb;
1661        struct aac_srb_reply *srb_reply;
1662        struct sgmap64  *sg64;
1663        u32 vbus;
1664        u32 vid;
1665
1666        if (!dev->sa_firmware)
1667                return 0;
1668
1669        /* allocate FIB */
1670        fibptr = aac_fib_alloc(dev);
1671        if (!fibptr)
1672                return -ENOMEM;
1673
1674        aac_fib_init(fibptr);
1675        fibptr->hw_fib_va->header.XferState &=
1676                ~cpu_to_le32(FastResponseCapable);
1677
1678        fibsize  = sizeof(struct aac_srb) - sizeof(struct sgentry) +
1679                                                sizeof(struct sgentry64);
1680
1681        /* allocate DMA buffer for response */
1682        addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
1683                                                        DMA_BIDIRECTIONAL);
1684        if (dma_mapping_error(&dev->pdev->dev, addr)) {
1685                rcode = -ENOMEM;
1686                goto fib_error;
1687        }
1688
1689        srb = fib_data(fibptr);
1690        memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
1691
1692        vbus = (u32)le16_to_cpu(
1693                        dev->supplement_adapter_info.virt_device_bus);
1694        vid  = (u32)le16_to_cpu(
1695                        dev->supplement_adapter_info.virt_device_target);
1696
1697        /* set the common request fields */
1698        srb->channel            = cpu_to_le32(vbus);
1699        srb->id                 = cpu_to_le32(vid);
1700        srb->lun                = 0;
1701        srb->function           = cpu_to_le32(SRBF_ExecuteScsi);
1702        srb->timeout            = 0;
1703        srb->retry_limit        = 0;
1704        srb->cdb_size           = cpu_to_le32(16);
1705        srb->count              = cpu_to_le32(xfer_len);
1706
1707        sg64 = (struct sgmap64 *)&srb->sg;
1708        sg64->count             = cpu_to_le32(1);
1709        sg64->sg[0].addr[1]     = cpu_to_le32(upper_32_bits(addr));
1710        sg64->sg[0].addr[0]     = cpu_to_le32(lower_32_bits(addr));
1711        sg64->sg[0].count       = cpu_to_le32(xfer_len);
1712
1713        /*
1714         * Copy the updated data for other dumping or other usage if needed
1715         */
1716        memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
1717
1718        /* issue request to the controller */
1719        rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
1720                                        1, 1, NULL, NULL);
1721
1722        if (rcode == -ERESTARTSYS)
1723                rcode = -ERESTART;
1724
1725        if (unlikely(rcode < 0))
1726                goto bmic_error;
1727
1728        srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
1729        memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
1730
1731bmic_error:
1732        dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
1733fib_error:
1734        aac_fib_complete(fibptr);
1735        aac_fib_free(fibptr);
1736        return rcode;
1737}
1738
1739static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
1740{
1741
1742        struct aac_ciss_identify_pd *identify_resp;
1743
1744        if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
1745                return;
1746
1747        identify_resp = dev->hba_map[bus][target].safw_identify_resp;
1748        if (identify_resp == NULL) {
1749                dev->hba_map[bus][target].qd_limit = 32;
1750                return;
1751        }
1752
1753        if (identify_resp->current_queue_depth_limit <= 0 ||
1754                identify_resp->current_queue_depth_limit > 255)
1755                dev->hba_map[bus][target].qd_limit = 32;
1756        else
1757                dev->hba_map[bus][target].qd_limit =
1758                        identify_resp->current_queue_depth_limit;
1759}
1760
1761static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
1762        struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
1763{
1764        int rcode = -ENOMEM;
1765        int datasize;
1766        struct aac_srb_unit srbu;
1767        struct aac_srb *srbcmd;
1768        struct aac_ciss_identify_pd *identify_reply;
1769
1770        datasize = sizeof(struct aac_ciss_identify_pd);
1771        identify_reply = kmalloc(datasize, GFP_KERNEL);
1772        if (!identify_reply)
1773                goto out;
1774
1775        memset(&srbu, 0, sizeof(struct aac_srb_unit));
1776
1777        srbcmd = &srbu.srb;
1778        srbcmd->flags   = cpu_to_le32(SRB_DataIn);
1779        srbcmd->cdb[0]  = 0x26;
1780        srbcmd->cdb[2]  = (u8)((AAC_MAX_LUN + target) & 0x00FF);
1781        srbcmd->cdb[6]  = CISS_IDENTIFY_PHYSICAL_DEVICE;
1782
1783        rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
1784        if (unlikely(rcode < 0))
1785                goto mem_free_all;
1786
1787        *identify_resp = identify_reply;
1788
1789out:
1790        return rcode;
1791mem_free_all:
1792        kfree(identify_reply);
1793        goto out;
1794}
1795
1796static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
1797{
1798        kfree(dev->safw_phys_luns);
1799        dev->safw_phys_luns = NULL;
1800}
1801
1802/**
1803 *      aac_get_safw_ciss_luns()        Process topology change
1804 *      @dev:           aac_dev structure
1805 *
1806 *      Execute a CISS REPORT PHYS LUNS and process the results into
1807 *      the current hba_map.
1808 */
1809static int aac_get_safw_ciss_luns(struct aac_dev *dev)
1810{
1811        int rcode = -ENOMEM;
1812        int datasize;
1813        struct aac_srb *srbcmd;
1814        struct aac_srb_unit srbu;
1815        struct aac_ciss_phys_luns_resp *phys_luns;
1816
1817        datasize = sizeof(struct aac_ciss_phys_luns_resp) +
1818                (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
1819        phys_luns = kmalloc(datasize, GFP_KERNEL);
1820        if (phys_luns == NULL)
1821                goto out;
1822
1823        memset(&srbu, 0, sizeof(struct aac_srb_unit));
1824
1825        srbcmd = &srbu.srb;
1826        srbcmd->flags   = cpu_to_le32(SRB_DataIn);
1827        srbcmd->cdb[0]  = CISS_REPORT_PHYSICAL_LUNS;
1828        srbcmd->cdb[1]  = 2; /* extended reporting */
1829        srbcmd->cdb[8]  = (u8)(datasize >> 8);
1830        srbcmd->cdb[9]  = (u8)(datasize);
1831
1832        rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
1833        if (unlikely(rcode < 0))
1834                goto mem_free_all;
1835
1836        if (phys_luns->resp_flag != 2) {
1837                rcode = -ENOMSG;
1838                goto mem_free_all;
1839        }
1840
1841        dev->safw_phys_luns = phys_luns;
1842
1843out:
1844        return rcode;
1845mem_free_all:
1846        kfree(phys_luns);
1847        goto out;
1848}
1849
1850static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
1851{
1852        return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
1853}
1854
1855static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
1856{
1857        return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
1858}
1859
1860static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
1861{
1862        return dev->safw_phys_luns->lun[lun].level2[0];
1863}
1864
1865static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
1866{
1867        return dev->safw_phys_luns->lun[lun].bus >> 6;
1868}
1869
1870static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
1871{
1872        return dev->safw_phys_luns->lun[lun].node_ident[9];
1873}
1874
1875static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
1876{
1877        return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
1878}
1879
1880static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
1881{
1882        return dev->safw_phys_luns->lun[lun].node_ident[8];
1883}
1884
1885static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
1886                                                int bus, int target)
1887{
1888        kfree(dev->hba_map[bus][target].safw_identify_resp);
1889        dev->hba_map[bus][target].safw_identify_resp = NULL;
1890}
1891
1892static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
1893        int lun_count)
1894{
1895        int luns;
1896        int i;
1897        u32 bus;
1898        u32 target;
1899
1900        luns = aac_get_safw_phys_lun_count(dev);
1901
1902        if (luns < lun_count)
1903                lun_count = luns;
1904        else if (lun_count < 0)
1905                lun_count = luns;
1906
1907        for (i = 0; i < lun_count; i++) {
1908                bus = aac_get_safw_phys_bus(dev, i);
1909                target = aac_get_safw_phys_target(dev, i);
1910
1911                aac_free_safw_identify_resp(dev, bus, target);
1912        }
1913}
1914
1915static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
1916{
1917        int i;
1918        int rcode = 0;
1919        u32 lun_count;
1920        u32 bus;
1921        u32 target;
1922        struct aac_ciss_identify_pd *identify_resp = NULL;
1923
1924        lun_count = aac_get_safw_phys_lun_count(dev);
1925
1926        for (i = 0; i < lun_count; ++i) {
1927
1928                bus = aac_get_safw_phys_bus(dev, i);
1929                target = aac_get_safw_phys_target(dev, i);
1930
1931                rcode = aac_issue_safw_bmic_identify(dev,
1932                                                &identify_resp, bus, target);
1933
1934                if (unlikely(rcode < 0))
1935                        goto free_identify_resp;
1936
1937                dev->hba_map[bus][target].safw_identify_resp = identify_resp;
1938        }
1939
1940out:
1941        return rcode;
1942free_identify_resp:
1943        aac_free_safw_all_identify_resp(dev, i);
1944        goto out;
1945}
1946
1947/**
1948 *      aac_set_safw_attr_all_targets-  update current hba map with data from FW
1949 *      @dev:   aac_dev structure
1950 *      @phys_luns: FW information from report phys luns
1951 *      @rescan: Indicates scan type
1952 *
1953 *      Update our hba map with the information gathered from the FW
1954 */
1955static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1956{
1957        /* ok and extended reporting */
1958        u32 lun_count, nexus;
1959        u32 i, bus, target;
1960        u8 expose_flag, attribs;
1961
1962        lun_count = aac_get_safw_phys_lun_count(dev);
1963
1964        dev->scan_counter++;
1965
1966        for (i = 0; i < lun_count; ++i) {
1967
1968                bus = aac_get_safw_phys_bus(dev, i);
1969                target = aac_get_safw_phys_target(dev, i);
1970                expose_flag = aac_get_safw_phys_expose_flag(dev, i);
1971                attribs = aac_get_safw_phys_attribs(dev, i);
1972                nexus = aac_get_safw_phys_nexus(dev, i);
1973
1974                if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
1975                        continue;
1976
1977                if (expose_flag != 0) {
1978                        dev->hba_map[bus][target].devtype =
1979                                AAC_DEVTYPE_RAID_MEMBER;
1980                        continue;
1981                }
1982
1983                if (nexus != 0 && (attribs & 8)) {
1984                        dev->hba_map[bus][target].devtype =
1985                                AAC_DEVTYPE_NATIVE_RAW;
1986                        dev->hba_map[bus][target].rmw_nexus =
1987                                        nexus;
1988                } else
1989                        dev->hba_map[bus][target].devtype =
1990                                AAC_DEVTYPE_ARC_RAW;
1991
1992                dev->hba_map[bus][target].scan_counter = dev->scan_counter;
1993
1994                aac_set_safw_target_qd(dev, bus, target);
1995        }
1996}
1997
1998static int aac_setup_safw_targets(struct aac_dev *dev)
1999{
2000        int rcode = 0;
2001
2002        rcode = aac_get_containers(dev);
2003        if (unlikely(rcode < 0))
2004                goto out;
2005
2006        rcode = aac_get_safw_ciss_luns(dev);
2007        if (unlikely(rcode < 0))
2008                goto out;
2009
2010        rcode = aac_get_safw_attr_all_targets(dev);
2011        if (unlikely(rcode < 0))
2012                goto free_ciss_luns;
2013
2014        aac_set_safw_attr_all_targets(dev);
2015
2016        aac_free_safw_all_identify_resp(dev, -1);
2017free_ciss_luns:
2018        aac_free_safw_ciss_luns(dev);
2019out:
2020        return rcode;
2021}
2022
2023int aac_setup_safw_adapter(struct aac_dev *dev)
2024{
2025        return aac_setup_safw_targets(dev);
2026}
2027
2028int aac_get_adapter_info(struct aac_dev* dev)
2029{
2030        struct fib* fibptr;
2031        int rcode;
2032        u32 tmp, bus, target;
2033        struct aac_adapter_info *info;
2034        struct aac_bus_info *command;
2035        struct aac_bus_info_response *bus_info;
2036
2037        if (!(fibptr = aac_fib_alloc(dev)))
2038                return -ENOMEM;
2039
2040        aac_fib_init(fibptr);
2041        info = (struct aac_adapter_info *) fib_data(fibptr);
2042        memset(info,0,sizeof(*info));
2043
2044        rcode = aac_fib_send(RequestAdapterInfo,
2045                         fibptr,
2046                         sizeof(*info),
2047                         FsaNormal,
2048                         -1, 1, /* First `interrupt' command uses special wait */
2049                         NULL,
2050                         NULL);
2051
2052        if (rcode < 0) {
2053                /* FIB should be freed only after
2054                 * getting the response from the F/W */
2055                if (rcode != -ERESTARTSYS) {
2056                        aac_fib_complete(fibptr);
2057                        aac_fib_free(fibptr);
2058                }
2059                return rcode;
2060        }
2061        memcpy(&dev->adapter_info, info, sizeof(*info));
2062
2063        dev->supplement_adapter_info.virt_device_bus = 0xffff;
2064        if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
2065                struct aac_supplement_adapter_info * sinfo;
2066
2067                aac_fib_init(fibptr);
2068
2069                sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
2070
2071                memset(sinfo,0,sizeof(*sinfo));
2072
2073                rcode = aac_fib_send(RequestSupplementAdapterInfo,
2074                                 fibptr,
2075                                 sizeof(*sinfo),
2076                                 FsaNormal,
2077                                 1, 1,
2078                                 NULL,
2079                                 NULL);
2080
2081                if (rcode >= 0)
2082                        memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
2083                if (rcode == -ERESTARTSYS) {
2084                        fibptr = aac_fib_alloc(dev);
2085                        if (!fibptr)
2086                                return -ENOMEM;
2087                }
2088
2089        }
2090
2091        /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
2092        for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
2093                for (target = 0; target < AAC_MAX_TARGETS; target++) {
2094                        dev->hba_map[bus][target].devtype = 0;
2095                        dev->hba_map[bus][target].qd_limit = 0;
2096                }
2097        }
2098
2099        /*
2100         * GetBusInfo
2101         */
2102
2103        aac_fib_init(fibptr);
2104
2105        bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
2106
2107        memset(bus_info, 0, sizeof(*bus_info));
2108
2109        command = (struct aac_bus_info *)bus_info;
2110
2111        command->Command = cpu_to_le32(VM_Ioctl);
2112        command->ObjType = cpu_to_le32(FT_DRIVE);
2113        command->MethodId = cpu_to_le32(1);
2114        command->CtlCmd = cpu_to_le32(GetBusInfo);
2115
2116        rcode = aac_fib_send(ContainerCommand,
2117                         fibptr,
2118                         sizeof (*bus_info),
2119                         FsaNormal,
2120                         1, 1,
2121                         NULL, NULL);
2122
2123        /* reasoned default */
2124        dev->maximum_num_physicals = 16;
2125        if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
2126                dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
2127                dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
2128        }
2129
2130        if (!dev->in_reset) {
2131                char buffer[16];
2132                tmp = le32_to_cpu(dev->adapter_info.kernelrev);
2133                printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
2134                        dev->name,
2135                        dev->id,
2136                        tmp>>24,
2137                        (tmp>>16)&0xff,
2138                        tmp&0xff,
2139                        le32_to_cpu(dev->adapter_info.kernelbuild),
2140                        (int)sizeof(dev->supplement_adapter_info.build_date),
2141                        dev->supplement_adapter_info.build_date);
2142                tmp = le32_to_cpu(dev->adapter_info.monitorrev);
2143                printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
2144                        dev->name, dev->id,
2145                        tmp>>24,(tmp>>16)&0xff,tmp&0xff,
2146                        le32_to_cpu(dev->adapter_info.monitorbuild));
2147                tmp = le32_to_cpu(dev->adapter_info.biosrev);
2148                printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
2149                        dev->name, dev->id,
2150                        tmp>>24,(tmp>>16)&0xff,tmp&0xff,
2151                        le32_to_cpu(dev->adapter_info.biosbuild));
2152                buffer[0] = '\0';
2153                if (aac_get_serial_number(
2154                  shost_to_class(dev->scsi_host_ptr), buffer))
2155                        printk(KERN_INFO "%s%d: serial %s",
2156                          dev->name, dev->id, buffer);
2157                if (dev->supplement_adapter_info.vpd_info.tsid[0]) {
2158                        printk(KERN_INFO "%s%d: TSID %.*s\n",
2159                          dev->name, dev->id,
2160                          (int)sizeof(dev->supplement_adapter_info
2161                                                        .vpd_info.tsid),
2162                                dev->supplement_adapter_info.vpd_info.tsid);
2163                }
2164                if (!aac_check_reset || ((aac_check_reset == 1) &&
2165                  (dev->supplement_adapter_info.supported_options2 &
2166                  AAC_OPTION_IGNORE_RESET))) {
2167                        printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
2168                          dev->name, dev->id);
2169                }
2170        }
2171
2172        dev->cache_protected = 0;
2173        dev->jbod = ((dev->supplement_adapter_info.feature_bits &
2174                AAC_FEATURE_JBOD) != 0);
2175        dev->nondasd_support = 0;
2176        dev->raid_scsi_mode = 0;
2177        if(dev->adapter_info.options & AAC_OPT_NONDASD)
2178                dev->nondasd_support = 1;
2179
2180        /*
2181         * If the firmware supports ROMB RAID/SCSI mode and we are currently
2182         * in RAID/SCSI mode, set the flag. For now if in this mode we will
2183         * force nondasd support on. If we decide to allow the non-dasd flag
2184         * additional changes changes will have to be made to support
2185         * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
2186         * changed to support the new dev->raid_scsi_mode flag instead of
2187         * leaching off of the dev->nondasd_support flag. Also in linit.c the
2188         * function aac_detect will have to be modified where it sets up the
2189         * max number of channels based on the aac->nondasd_support flag only.
2190         */
2191        if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
2192            (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
2193                dev->nondasd_support = 1;
2194                dev->raid_scsi_mode = 1;
2195        }
2196        if (dev->raid_scsi_mode != 0)
2197                printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
2198                                dev->name, dev->id);
2199
2200        if (nondasd != -1)
2201                dev->nondasd_support = (nondasd!=0);
2202        if (dev->nondasd_support && !dev->in_reset)
2203                printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
2204
2205        if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
2206                dev->needs_dac = 1;
2207        dev->dac_support = 0;
2208        if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
2209            (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
2210                if (!dev->in_reset)
2211                        printk(KERN_INFO "%s%d: 64bit support enabled.\n",
2212                                dev->name, dev->id);
2213                dev->dac_support = 1;
2214        }
2215
2216        if(dacmode != -1) {
2217                dev->dac_support = (dacmode!=0);
2218        }
2219
2220        /* avoid problems with AAC_QUIRK_SCSI_32 controllers */
2221        if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
2222                & AAC_QUIRK_SCSI_32)) {
2223                dev->nondasd_support = 0;
2224                dev->jbod = 0;
2225                expose_physicals = 0;
2226        }
2227
2228        if (dev->dac_support) {
2229                if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
2230                        if (!dev->in_reset)
2231                                dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
2232                } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
2233                        dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
2234                        dev->dac_support = 0;
2235                } else {
2236                        dev_info(&dev->pdev->dev, "No suitable DMA available\n");
2237                        rcode = -ENOMEM;
2238                }
2239        }
2240        /*
2241         * Deal with configuring for the individualized limits of each packet
2242         * interface.
2243         */
2244        dev->a_ops.adapter_scsi = (dev->dac_support)
2245          ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
2246                                ? aac_scsi_32_64
2247                                : aac_scsi_64)
2248                                : aac_scsi_32;
2249        if (dev->raw_io_interface) {
2250                dev->a_ops.adapter_bounds = (dev->raw_io_64)
2251                                        ? aac_bounds_64
2252                                        : aac_bounds_32;
2253                dev->a_ops.adapter_read = aac_read_raw_io;
2254                dev->a_ops.adapter_write = aac_write_raw_io;
2255        } else {
2256                dev->a_ops.adapter_bounds = aac_bounds_32;
2257                dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
2258                        sizeof(struct aac_fibhdr) -
2259                        sizeof(struct aac_write) + sizeof(struct sgentry)) /
2260                                sizeof(struct sgentry);
2261                if (dev->dac_support) {
2262                        dev->a_ops.adapter_read = aac_read_block64;
2263                        dev->a_ops.adapter_write = aac_write_block64;
2264                        /*
2265                         * 38 scatter gather elements
2266                         */
2267                        dev->scsi_host_ptr->sg_tablesize =
2268                                (dev->max_fib_size -
2269                                sizeof(struct aac_fibhdr) -
2270                                sizeof(struct aac_write64) +
2271                                sizeof(struct sgentry64)) /
2272                                        sizeof(struct sgentry64);
2273                } else {
2274                        dev->a_ops.adapter_read = aac_read_block;
2275                        dev->a_ops.adapter_write = aac_write_block;
2276                }
2277                dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
2278                if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
2279                        /*
2280                         * Worst case size that could cause sg overflow when
2281                         * we break up SG elements that are larger than 64KB.
2282                         * Would be nice if we could tell the SCSI layer what
2283                         * the maximum SG element size can be. Worst case is
2284                         * (sg_tablesize-1) 4KB elements with one 64KB
2285                         * element.
2286                         *      32bit -> 468 or 238KB   64bit -> 424 or 212KB
2287                         */
2288                        dev->scsi_host_ptr->max_sectors =
2289                          (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
2290                }
2291        }
2292        if (!dev->sync_mode && dev->sa_firmware &&
2293                dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
2294                dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
2295                        HBA_MAX_SG_SEPARATE;
2296
2297        /* FIB should be freed only after getting the response from the F/W */
2298        if (rcode != -ERESTARTSYS) {
2299                aac_fib_complete(fibptr);
2300                aac_fib_free(fibptr);
2301        }
2302
2303        return rcode;
2304}
2305
2306
2307static void io_callback(void *context, struct fib * fibptr)
2308{
2309        struct aac_dev *dev;
2310        struct aac_read_reply *readreply;
2311        struct scsi_cmnd *scsicmd;
2312        u32 cid;
2313
2314        scsicmd = (struct scsi_cmnd *) context;
2315
2316        if (!aac_valid_context(scsicmd, fibptr))
2317                return;
2318
2319        dev = fibptr->dev;
2320        cid = scmd_id(scsicmd);
2321
2322        if (nblank(dprintk(x))) {
2323                u64 lba;
2324                switch (scsicmd->cmnd[0]) {
2325                case WRITE_6:
2326                case READ_6:
2327                        lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2328                            (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2329                        break;
2330                case WRITE_16:
2331                case READ_16:
2332                        lba = ((u64)scsicmd->cmnd[2] << 56) |
2333                              ((u64)scsicmd->cmnd[3] << 48) |
2334                              ((u64)scsicmd->cmnd[4] << 40) |
2335                              ((u64)scsicmd->cmnd[5] << 32) |
2336                              ((u64)scsicmd->cmnd[6] << 24) |
2337                              (scsicmd->cmnd[7] << 16) |
2338                              (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2339                        break;
2340                case WRITE_12:
2341                case READ_12:
2342                        lba = ((u64)scsicmd->cmnd[2] << 24) |
2343                              (scsicmd->cmnd[3] << 16) |
2344                              (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2345                        break;
2346                default:
2347                        lba = ((u64)scsicmd->cmnd[2] << 24) |
2348                               (scsicmd->cmnd[3] << 16) |
2349                               (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2350                        break;
2351                }
2352                printk(KERN_DEBUG
2353                  "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
2354                  smp_processor_id(), (unsigned long long)lba, jiffies);
2355        }
2356
2357        BUG_ON(fibptr == NULL);
2358
2359        scsi_dma_unmap(scsicmd);
2360
2361        readreply = (struct aac_read_reply *)fib_data(fibptr);
2362        switch (le32_to_cpu(readreply->status)) {
2363        case ST_OK:
2364                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2365                        SAM_STAT_GOOD;
2366                dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
2367                break;
2368        case ST_NOT_READY:
2369                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2370                        SAM_STAT_CHECK_CONDITION;
2371                set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
2372                  SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
2373                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2374                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2375                             SCSI_SENSE_BUFFERSIZE));
2376                break;
2377        case ST_MEDERR:
2378                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2379                        SAM_STAT_CHECK_CONDITION;
2380                set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
2381                  SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
2382                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2383                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2384                             SCSI_SENSE_BUFFERSIZE));
2385                break;
2386        default:
2387#ifdef AAC_DETAILED_STATUS_INFO
2388                printk(KERN_WARNING "io_callback: io failed, status = %d\n",
2389                  le32_to_cpu(readreply->status));
2390#endif
2391                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2392                        SAM_STAT_CHECK_CONDITION;
2393                set_sense(&dev->fsa_dev[cid].sense_data,
2394                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2395                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2396                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2397                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2398                             SCSI_SENSE_BUFFERSIZE));
2399                break;
2400        }
2401        aac_fib_complete(fibptr);
2402
2403        scsicmd->scsi_done(scsicmd);
2404}
2405
2406static int aac_read(struct scsi_cmnd * scsicmd)
2407{
2408        u64 lba;
2409        u32 count;
2410        int status;
2411        struct aac_dev *dev;
2412        struct fib * cmd_fibcontext;
2413        int cid;
2414
2415        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2416        /*
2417         *      Get block address and transfer length
2418         */
2419        switch (scsicmd->cmnd[0]) {
2420        case READ_6:
2421                dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
2422
2423                lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2424                        (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2425                count = scsicmd->cmnd[4];
2426
2427                if (count == 0)
2428                        count = 256;
2429                break;
2430        case READ_16:
2431                dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
2432
2433                lba =   ((u64)scsicmd->cmnd[2] << 56) |
2434                        ((u64)scsicmd->cmnd[3] << 48) |
2435                        ((u64)scsicmd->cmnd[4] << 40) |
2436                        ((u64)scsicmd->cmnd[5] << 32) |
2437                        ((u64)scsicmd->cmnd[6] << 24) |
2438                        (scsicmd->cmnd[7] << 16) |
2439                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2440                count = (scsicmd->cmnd[10] << 24) |
2441                        (scsicmd->cmnd[11] << 16) |
2442                        (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2443                break;
2444        case READ_12:
2445                dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
2446
2447                lba = ((u64)scsicmd->cmnd[2] << 24) |
2448                        (scsicmd->cmnd[3] << 16) |
2449                        (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2450                count = (scsicmd->cmnd[6] << 24) |
2451                        (scsicmd->cmnd[7] << 16) |
2452                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2453                break;
2454        default:
2455                dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
2456
2457                lba = ((u64)scsicmd->cmnd[2] << 24) |
2458                        (scsicmd->cmnd[3] << 16) |
2459                        (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2460                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2461                break;
2462        }
2463
2464        if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2465                cid = scmd_id(scsicmd);
2466                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2467                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2468                        SAM_STAT_CHECK_CONDITION;
2469                set_sense(&dev->fsa_dev[cid].sense_data,
2470                          HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2471                          ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2472                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2473                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2474                             SCSI_SENSE_BUFFERSIZE));
2475                scsicmd->scsi_done(scsicmd);
2476                return 1;
2477        }
2478
2479        dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
2480          smp_processor_id(), (unsigned long long)lba, jiffies));
2481        if (aac_adapter_bounds(dev,scsicmd,lba))
2482                return 0;
2483        /*
2484         *      Alocate and initialize a Fib
2485         */
2486        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2487        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2488        status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
2489
2490        /*
2491         *      Check that the command queued to the controller
2492         */
2493        if (status == -EINPROGRESS)
2494                return 0;
2495
2496        printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
2497        /*
2498         *      For some reason, the Fib didn't queue, return QUEUE_FULL
2499         */
2500        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2501        scsicmd->scsi_done(scsicmd);
2502        aac_fib_complete(cmd_fibcontext);
2503        aac_fib_free(cmd_fibcontext);
2504        return 0;
2505}
2506
2507static int aac_write(struct scsi_cmnd * scsicmd)
2508{
2509        u64 lba;
2510        u32 count;
2511        int fua;
2512        int status;
2513        struct aac_dev *dev;
2514        struct fib * cmd_fibcontext;
2515        int cid;
2516
2517        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2518        /*
2519         *      Get block address and transfer length
2520         */
2521        if (scsicmd->cmnd[0] == WRITE_6)        /* 6 byte command */
2522        {
2523                lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2524                count = scsicmd->cmnd[4];
2525                if (count == 0)
2526                        count = 256;
2527                fua = 0;
2528        } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
2529                dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
2530
2531                lba =   ((u64)scsicmd->cmnd[2] << 56) |
2532                        ((u64)scsicmd->cmnd[3] << 48) |
2533                        ((u64)scsicmd->cmnd[4] << 40) |
2534                        ((u64)scsicmd->cmnd[5] << 32) |
2535                        ((u64)scsicmd->cmnd[6] << 24) |
2536                        (scsicmd->cmnd[7] << 16) |
2537                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2538                count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
2539                        (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2540                fua = scsicmd->cmnd[1] & 0x8;
2541        } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
2542                dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
2543
2544                lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
2545                    | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2546                count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
2547                      | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2548                fua = scsicmd->cmnd[1] & 0x8;
2549        } else {
2550                dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
2551                lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2552                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2553                fua = scsicmd->cmnd[1] & 0x8;
2554        }
2555
2556        if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2557                cid = scmd_id(scsicmd);
2558                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2559                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2560                        SAM_STAT_CHECK_CONDITION;
2561                set_sense(&dev->fsa_dev[cid].sense_data,
2562                          HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2563                          ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2564                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2565                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2566                             SCSI_SENSE_BUFFERSIZE));
2567                scsicmd->scsi_done(scsicmd);
2568                return 1;
2569        }
2570
2571        dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
2572          smp_processor_id(), (unsigned long long)lba, jiffies));
2573        if (aac_adapter_bounds(dev,scsicmd,lba))
2574                return 0;
2575        /*
2576         *      Allocate and initialize a Fib then setup a BlockWrite command
2577         */
2578        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2579        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2580        status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
2581
2582        /*
2583         *      Check that the command queued to the controller
2584         */
2585        if (status == -EINPROGRESS)
2586                return 0;
2587
2588        printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
2589        /*
2590         *      For some reason, the Fib didn't queue, return QUEUE_FULL
2591         */
2592        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2593        scsicmd->scsi_done(scsicmd);
2594
2595        aac_fib_complete(cmd_fibcontext);
2596        aac_fib_free(cmd_fibcontext);
2597        return 0;
2598}
2599
2600static void synchronize_callback(void *context, struct fib *fibptr)
2601{
2602        struct aac_synchronize_reply *synchronizereply;
2603        struct scsi_cmnd *cmd;
2604
2605        cmd = context;
2606
2607        if (!aac_valid_context(cmd, fibptr))
2608                return;
2609
2610        dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
2611                                smp_processor_id(), jiffies));
2612        BUG_ON(fibptr == NULL);
2613
2614
2615        synchronizereply = fib_data(fibptr);
2616        if (le32_to_cpu(synchronizereply->status) == CT_OK)
2617                cmd->result = DID_OK << 16 |
2618                        COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2619        else {
2620                struct scsi_device *sdev = cmd->device;
2621                struct aac_dev *dev = fibptr->dev;
2622                u32 cid = sdev_id(sdev);
2623                printk(KERN_WARNING
2624                     "synchronize_callback: synchronize failed, status = %d\n",
2625                     le32_to_cpu(synchronizereply->status));
2626                cmd->result = DID_OK << 16 |
2627                        COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2628                set_sense(&dev->fsa_dev[cid].sense_data,
2629                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2630                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2631                memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2632                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2633                             SCSI_SENSE_BUFFERSIZE));
2634        }
2635
2636        aac_fib_complete(fibptr);
2637        aac_fib_free(fibptr);
2638        cmd->scsi_done(cmd);
2639}
2640
2641static int aac_synchronize(struct scsi_cmnd *scsicmd)
2642{
2643        int status;
2644        struct fib *cmd_fibcontext;
2645        struct aac_synchronize *synchronizecmd;
2646        struct scsi_cmnd *cmd;
2647        struct scsi_device *sdev = scsicmd->device;
2648        int active = 0;
2649        struct aac_dev *aac;
2650        u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
2651                (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2652        u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2653        unsigned long flags;
2654
2655        /*
2656         * Wait for all outstanding queued commands to complete to this
2657         * specific target (block).
2658         */
2659        spin_lock_irqsave(&sdev->list_lock, flags);
2660        list_for_each_entry(cmd, &sdev->cmd_list, list)
2661                if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
2662                        u64 cmnd_lba;
2663                        u32 cmnd_count;
2664
2665                        if (cmd->cmnd[0] == WRITE_6) {
2666                                cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
2667                                        (cmd->cmnd[2] << 8) |
2668                                        cmd->cmnd[3];
2669                                cmnd_count = cmd->cmnd[4];
2670                                if (cmnd_count == 0)
2671                                        cmnd_count = 256;
2672                        } else if (cmd->cmnd[0] == WRITE_16) {
2673                                cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
2674                                        ((u64)cmd->cmnd[3] << 48) |
2675                                        ((u64)cmd->cmnd[4] << 40) |
2676                                        ((u64)cmd->cmnd[5] << 32) |
2677                                        ((u64)cmd->cmnd[6] << 24) |
2678                                        (cmd->cmnd[7] << 16) |
2679                                        (cmd->cmnd[8] << 8) |
2680                                        cmd->cmnd[9];
2681                                cmnd_count = (cmd->cmnd[10] << 24) |
2682                                        (cmd->cmnd[11] << 16) |
2683                                        (cmd->cmnd[12] << 8) |
2684                                        cmd->cmnd[13];
2685                        } else if (cmd->cmnd[0] == WRITE_12) {
2686                                cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
2687                                        (cmd->cmnd[3] << 16) |
2688                                        (cmd->cmnd[4] << 8) |
2689                                        cmd->cmnd[5];
2690                                cmnd_count = (cmd->cmnd[6] << 24) |
2691                                        (cmd->cmnd[7] << 16) |
2692                                        (cmd->cmnd[8] << 8) |
2693                                        cmd->cmnd[9];
2694                        } else if (cmd->cmnd[0] == WRITE_10) {
2695                                cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
2696                                        (cmd->cmnd[3] << 16) |
2697                                        (cmd->cmnd[4] << 8) |
2698                                        cmd->cmnd[5];
2699                                cmnd_count = (cmd->cmnd[7] << 8) |
2700                                        cmd->cmnd[8];
2701                        } else
2702                                continue;
2703                        if (((cmnd_lba + cmnd_count) < lba) ||
2704                          (count && ((lba + count) < cmnd_lba)))
2705                                continue;
2706                        ++active;
2707                        break;
2708                }
2709
2710        spin_unlock_irqrestore(&sdev->list_lock, flags);
2711
2712        /*
2713         *      Yield the processor (requeue for later)
2714         */
2715        if (active)
2716                return SCSI_MLQUEUE_DEVICE_BUSY;
2717
2718        aac = (struct aac_dev *)sdev->host->hostdata;
2719        if (aac->in_reset)
2720                return SCSI_MLQUEUE_HOST_BUSY;
2721
2722        /*
2723         *      Allocate and initialize a Fib
2724         */
2725        if (!(cmd_fibcontext = aac_fib_alloc(aac)))
2726                return SCSI_MLQUEUE_HOST_BUSY;
2727
2728        aac_fib_init(cmd_fibcontext);
2729
2730        synchronizecmd = fib_data(cmd_fibcontext);
2731        synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
2732        synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
2733        synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
2734        synchronizecmd->count =
2735             cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
2736        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2737
2738        /*
2739         *      Now send the Fib to the adapter
2740         */
2741        status = aac_fib_send(ContainerCommand,
2742                  cmd_fibcontext,
2743                  sizeof(struct aac_synchronize),
2744                  FsaNormal,
2745                  0, 1,
2746                  (fib_callback)synchronize_callback,
2747                  (void *)scsicmd);
2748
2749        /*
2750         *      Check that the command queued to the controller
2751         */
2752        if (status == -EINPROGRESS)
2753                return 0;
2754
2755        printk(KERN_WARNING
2756                "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
2757        aac_fib_complete(cmd_fibcontext);
2758        aac_fib_free(cmd_fibcontext);
2759        return SCSI_MLQUEUE_HOST_BUSY;
2760}
2761
2762static void aac_start_stop_callback(void *context, struct fib *fibptr)
2763{
2764        struct scsi_cmnd *scsicmd = context;
2765
2766        if (!aac_valid_context(scsicmd, fibptr))
2767                return;
2768
2769        BUG_ON(fibptr == NULL);
2770
2771        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2772
2773        aac_fib_complete(fibptr);
2774        aac_fib_free(fibptr);
2775        scsicmd->scsi_done(scsicmd);
2776}
2777
2778static int aac_start_stop(struct scsi_cmnd *scsicmd)
2779{
2780        int status;
2781        struct fib *cmd_fibcontext;
2782        struct aac_power_management *pmcmd;
2783        struct scsi_device *sdev = scsicmd->device;
2784        struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
2785
2786        if (!(aac->supplement_adapter_info.supported_options2 &
2787              AAC_OPTION_POWER_MANAGEMENT)) {
2788                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2789                                  SAM_STAT_GOOD;
2790                scsicmd->scsi_done(scsicmd);
2791                return 0;
2792        }
2793
2794        if (aac->in_reset)
2795                return SCSI_MLQUEUE_HOST_BUSY;
2796
2797        /*
2798         *      Allocate and initialize a Fib
2799         */
2800        cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
2801
2802        aac_fib_init(cmd_fibcontext);
2803
2804        pmcmd = fib_data(cmd_fibcontext);
2805        pmcmd->command = cpu_to_le32(VM_ContainerConfig);
2806        pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
2807        /* Eject bit ignored, not relevant */
2808        pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
2809                cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
2810        pmcmd->cid = cpu_to_le32(sdev_id(sdev));
2811        pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
2812                cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
2813        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2814
2815        /*
2816         *      Now send the Fib to the adapter
2817         */
2818        status = aac_fib_send(ContainerCommand,
2819                  cmd_fibcontext,
2820                  sizeof(struct aac_power_management),
2821                  FsaNormal,
2822                  0, 1,
2823                  (fib_callback)aac_start_stop_callback,
2824                  (void *)scsicmd);
2825
2826        /*
2827         *      Check that the command queued to the controller
2828         */
2829        if (status == -EINPROGRESS)
2830                return 0;
2831
2832        aac_fib_complete(cmd_fibcontext);
2833        aac_fib_free(cmd_fibcontext);
2834        return SCSI_MLQUEUE_HOST_BUSY;
2835}
2836
2837/**
2838 *      aac_scsi_cmd()          -       Process SCSI command
2839 *      @scsicmd:               SCSI command block
2840 *
2841 *      Emulate a SCSI command and queue the required request for the
2842 *      aacraid firmware.
2843 */
2844
2845int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2846{
2847        u32 cid, bus;
2848        struct Scsi_Host *host = scsicmd->device->host;
2849        struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2850        struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
2851
2852        if (fsa_dev_ptr == NULL)
2853                return -1;
2854        /*
2855         *      If the bus, id or lun is out of range, return fail
2856         *      Test does not apply to ID 16, the pseudo id for the controller
2857         *      itself.
2858         */
2859        cid = scmd_id(scsicmd);
2860        if (cid != host->this_id) {
2861                if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
2862                        if((cid >= dev->maximum_num_containers) ||
2863                                        (scsicmd->device->lun != 0)) {
2864                                scsicmd->result = DID_NO_CONNECT << 16;
2865                                goto scsi_done_ret;
2866                        }
2867
2868                        /*
2869                         *      If the target container doesn't exist, it may have
2870                         *      been newly created
2871                         */
2872                        if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
2873                          (fsa_dev_ptr[cid].sense_data.sense_key ==
2874                           NOT_READY)) {
2875                                switch (scsicmd->cmnd[0]) {
2876                                case SERVICE_ACTION_IN_16:
2877                                        if (!(dev->raw_io_interface) ||
2878                                            !(dev->raw_io_64) ||
2879                                            ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
2880                                                break;
2881                                        /* fall through */
2882                                case INQUIRY:
2883                                case READ_CAPACITY:
2884                                case TEST_UNIT_READY:
2885                                        if (dev->in_reset)
2886                                                return -1;
2887                                        return _aac_probe_container(scsicmd,
2888                                                        aac_probe_container_callback2);
2889                                default:
2890                                        break;
2891                                }
2892                        }
2893                } else {  /* check for physical non-dasd devices */
2894                        bus = aac_logical_to_phys(scmd_channel(scsicmd));
2895
2896                        if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2897                                dev->hba_map[bus][cid].devtype
2898                                        == AAC_DEVTYPE_NATIVE_RAW) {
2899                                if (dev->in_reset)
2900                                        return -1;
2901                                return aac_send_hba_fib(scsicmd);
2902                        } else if (dev->nondasd_support || expose_physicals ||
2903                                dev->jbod) {
2904                                if (dev->in_reset)
2905                                        return -1;
2906                                return aac_send_srb_fib(scsicmd);
2907                        } else {
2908                                scsicmd->result = DID_NO_CONNECT << 16;
2909                                goto scsi_done_ret;
2910                        }
2911                }
2912        }
2913        /*
2914         * else Command for the controller itself
2915         */
2916        else if ((scsicmd->cmnd[0] != INQUIRY) &&       /* only INQUIRY & TUR cmnd supported for controller */
2917                (scsicmd->cmnd[0] != TEST_UNIT_READY))
2918        {
2919                dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
2920                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2921                set_sense(&dev->fsa_dev[cid].sense_data,
2922                  ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2923                  ASENCODE_INVALID_COMMAND, 0, 0);
2924                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2925                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2926                             SCSI_SENSE_BUFFERSIZE));
2927                goto scsi_done_ret;
2928        }
2929
2930        switch (scsicmd->cmnd[0]) {
2931        case READ_6:
2932        case READ_10:
2933        case READ_12:
2934        case READ_16:
2935                if (dev->in_reset)
2936                        return -1;
2937                return aac_read(scsicmd);
2938
2939        case WRITE_6:
2940        case WRITE_10:
2941        case WRITE_12:
2942        case WRITE_16:
2943                if (dev->in_reset)
2944                        return -1;
2945                return aac_write(scsicmd);
2946
2947        case SYNCHRONIZE_CACHE:
2948                if (((aac_cache & 6) == 6) && dev->cache_protected) {
2949                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2950                                          SAM_STAT_GOOD;
2951                        break;
2952                }
2953                /* Issue FIB to tell Firmware to flush it's cache */
2954                if ((aac_cache & 6) != 2)
2955                        return aac_synchronize(scsicmd);
2956                /* fall through */
2957        case INQUIRY:
2958        {
2959                struct inquiry_data inq_data;
2960
2961                dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
2962                memset(&inq_data, 0, sizeof (struct inquiry_data));
2963
2964                if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
2965                        char *arr = (char *)&inq_data;
2966
2967                        /* EVPD bit set */
2968                        arr[0] = (scmd_id(scsicmd) == host->this_id) ?
2969                          INQD_PDT_PROC : INQD_PDT_DA;
2970                        if (scsicmd->cmnd[2] == 0) {
2971                                /* supported vital product data pages */
2972                                arr[3] = 3;
2973                                arr[4] = 0x0;
2974                                arr[5] = 0x80;
2975                                arr[6] = 0x83;
2976                                arr[1] = scsicmd->cmnd[2];
2977                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2978                                                         sizeof(inq_data));
2979                                scsicmd->result = DID_OK << 16 |
2980                                                  COMMAND_COMPLETE << 8 |
2981                                                  SAM_STAT_GOOD;
2982                        } else if (scsicmd->cmnd[2] == 0x80) {
2983                                /* unit serial number page */
2984                                arr[3] = setinqserial(dev, &arr[4],
2985                                  scmd_id(scsicmd));
2986                                arr[1] = scsicmd->cmnd[2];
2987                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2988                                                         sizeof(inq_data));
2989                                if (aac_wwn != 2)
2990                                        return aac_get_container_serial(
2991                                                scsicmd);
2992                                scsicmd->result = DID_OK << 16 |
2993                                                  COMMAND_COMPLETE << 8 |
2994                                                  SAM_STAT_GOOD;
2995                        } else if (scsicmd->cmnd[2] == 0x83) {
2996                                /* vpd page 0x83 - Device Identification Page */
2997                                char *sno = (char *)&inq_data;
2998                                sno[3] = setinqserial(dev, &sno[4],
2999                                                      scmd_id(scsicmd));
3000                                if (aac_wwn != 2)
3001                                        return aac_get_container_serial(
3002                                                scsicmd);
3003                                scsicmd->result = DID_OK << 16 |
3004                                                  COMMAND_COMPLETE << 8 |
3005                                                  SAM_STAT_GOOD;
3006                        } else {
3007                                /* vpd page not implemented */
3008                                scsicmd->result = DID_OK << 16 |
3009                                  COMMAND_COMPLETE << 8 |
3010                                  SAM_STAT_CHECK_CONDITION;
3011                                set_sense(&dev->fsa_dev[cid].sense_data,
3012                                  ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
3013                                  ASENCODE_NO_SENSE, 7, 2);
3014                                memcpy(scsicmd->sense_buffer,
3015                                  &dev->fsa_dev[cid].sense_data,
3016                                  min_t(size_t,
3017                                        sizeof(dev->fsa_dev[cid].sense_data),
3018                                        SCSI_SENSE_BUFFERSIZE));
3019                        }
3020                        break;
3021                }
3022                inq_data.inqd_ver = 2;  /* claim compliance to SCSI-2 */
3023                inq_data.inqd_rdf = 2;  /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
3024                inq_data.inqd_len = 31;
3025                /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
3026                inq_data.inqd_pad2= 0x32 ;       /*WBus16|Sync|CmdQue */
3027                /*
3028                 *      Set the Vendor, Product, and Revision Level
3029                 *      see: <vendor>.c i.e. aac.c
3030                 */
3031                if (cid == host->this_id) {
3032                        setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
3033                        inq_data.inqd_pdt = INQD_PDT_PROC;      /* Processor device */
3034                        scsi_sg_copy_from_buffer(scsicmd, &inq_data,
3035                                                 sizeof(inq_data));
3036                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3037                                          SAM_STAT_GOOD;
3038                        break;
3039                }
3040                if (dev->in_reset)
3041                        return -1;
3042                setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
3043                inq_data.inqd_pdt = INQD_PDT_DA;        /* Direct/random access device */
3044                scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
3045                return aac_get_container_name(scsicmd);
3046        }
3047        case SERVICE_ACTION_IN_16:
3048                if (!(dev->raw_io_interface) ||
3049                    !(dev->raw_io_64) ||
3050                    ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
3051                        break;
3052        {
3053                u64 capacity;
3054                char cp[13];
3055                unsigned int alloc_len;
3056
3057                dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
3058                capacity = fsa_dev_ptr[cid].size - 1;
3059                cp[0] = (capacity >> 56) & 0xff;
3060                cp[1] = (capacity >> 48) & 0xff;
3061                cp[2] = (capacity >> 40) & 0xff;
3062                cp[3] = (capacity >> 32) & 0xff;
3063                cp[4] = (capacity >> 24) & 0xff;
3064                cp[5] = (capacity >> 16) & 0xff;
3065                cp[6] = (capacity >> 8) & 0xff;
3066                cp[7] = (capacity >> 0) & 0xff;
3067                cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
3068                cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3069                cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3070                cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
3071                cp[12] = 0;
3072
3073                alloc_len = ((scsicmd->cmnd[10] << 24)
3074                             + (scsicmd->cmnd[11] << 16)
3075                             + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
3076
3077                alloc_len = min_t(size_t, alloc_len, sizeof(cp));
3078                scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
3079                if (alloc_len < scsi_bufflen(scsicmd))
3080                        scsi_set_resid(scsicmd,
3081                                       scsi_bufflen(scsicmd) - alloc_len);
3082
3083                /* Do not cache partition table for arrays */
3084                scsicmd->device->removable = 1;
3085
3086                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3087                                  SAM_STAT_GOOD;
3088                break;
3089        }
3090
3091        case READ_CAPACITY:
3092        {
3093                u32 capacity;
3094                char cp[8];
3095
3096                dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
3097                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3098                        capacity = fsa_dev_ptr[cid].size - 1;
3099                else
3100                        capacity = (u32)-1;
3101
3102                cp[0] = (capacity >> 24) & 0xff;
3103                cp[1] = (capacity >> 16) & 0xff;
3104                cp[2] = (capacity >> 8) & 0xff;
3105                cp[3] = (capacity >> 0) & 0xff;
3106                cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
3107                cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3108                cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3109                cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
3110                scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
3111                /* Do not cache partition table for arrays */
3112                scsicmd->device->removable = 1;
3113                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3114                                  SAM_STAT_GOOD;
3115                break;
3116        }
3117
3118        case MODE_SENSE:
3119        {
3120                int mode_buf_length = 4;
3121                u32 capacity;
3122                aac_modep_data mpd;
3123
3124                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3125                        capacity = fsa_dev_ptr[cid].size - 1;
3126                else
3127                        capacity = (u32)-1;
3128
3129                dprintk((KERN_DEBUG "MODE SENSE command.\n"));
3130                memset((char *)&mpd, 0, sizeof(aac_modep_data));
3131
3132                /* Mode data length */
3133                mpd.hd.data_length = sizeof(mpd.hd) - 1;
3134                /* Medium type - default */
3135                mpd.hd.med_type = 0;
3136                /* Device-specific param,
3137                   bit 8: 0/1 = write enabled/protected
3138                   bit 4: 0/1 = FUA enabled */
3139                mpd.hd.dev_par = 0;
3140
3141                if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3142                        mpd.hd.dev_par = 0x10;
3143                if (scsicmd->cmnd[1] & 0x8)
3144                        mpd.hd.bd_length = 0;   /* Block descriptor length */
3145                else {
3146                        mpd.hd.bd_length = sizeof(mpd.bd);
3147                        mpd.hd.data_length += mpd.hd.bd_length;
3148                        mpd.bd.block_length[0] =
3149                                (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3150                        mpd.bd.block_length[1] =
3151                                (fsa_dev_ptr[cid].block_size >> 8) &  0xff;
3152                        mpd.bd.block_length[2] =
3153                                fsa_dev_ptr[cid].block_size  & 0xff;
3154
3155                        mpd.mpc_buf[0] = scsicmd->cmnd[2];
3156                        if (scsicmd->cmnd[2] == 0x1C) {
3157                                /* page length */
3158                                mpd.mpc_buf[1] = 0xa;
3159                                /* Mode data length */
3160                                mpd.hd.data_length = 23;
3161                        } else {
3162                                /* Mode data length */
3163                                mpd.hd.data_length = 15;
3164                        }
3165
3166                        if (capacity > 0xffffff) {
3167                                mpd.bd.block_count[0] = 0xff;
3168                                mpd.bd.block_count[1] = 0xff;
3169                                mpd.bd.block_count[2] = 0xff;
3170                        } else {
3171                                mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
3172                                mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
3173                                mpd.bd.block_count[2] = capacity  & 0xff;
3174                        }
3175                }
3176                if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3177                  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3178                        mpd.hd.data_length += 3;
3179                        mpd.mpc_buf[0] = 8;
3180                        mpd.mpc_buf[1] = 1;
3181                        mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
3182                                ? 0 : 0x04; /* WCE */
3183                        mode_buf_length = sizeof(mpd);
3184                }
3185
3186                if (mode_buf_length > scsicmd->cmnd[4])
3187                        mode_buf_length = scsicmd->cmnd[4];
3188                else
3189                        mode_buf_length = sizeof(mpd);
3190                scsi_sg_copy_from_buffer(scsicmd,
3191                                         (char *)&mpd,
3192                                         mode_buf_length);
3193                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3194                                  SAM_STAT_GOOD;
3195                break;
3196        }
3197        case MODE_SENSE_10:
3198        {
3199                u32 capacity;
3200                int mode_buf_length = 8;
3201                aac_modep10_data mpd10;
3202
3203                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3204                        capacity = fsa_dev_ptr[cid].size - 1;
3205                else
3206                        capacity = (u32)-1;
3207
3208                dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
3209                memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
3210                /* Mode data length (MSB) */
3211                mpd10.hd.data_length[0] = 0;
3212                /* Mode data length (LSB) */
3213                mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
3214                /* Medium type - default */
3215                mpd10.hd.med_type = 0;
3216                /* Device-specific param,
3217                   bit 8: 0/1 = write enabled/protected
3218                   bit 4: 0/1 = FUA enabled */
3219                mpd10.hd.dev_par = 0;
3220
3221                if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3222                        mpd10.hd.dev_par = 0x10;
3223                mpd10.hd.rsrvd[0] = 0;  /* reserved */
3224                mpd10.hd.rsrvd[1] = 0;  /* reserved */
3225                if (scsicmd->cmnd[1] & 0x8) {
3226                        /* Block descriptor length (MSB) */
3227                        mpd10.hd.bd_length[0] = 0;
3228                        /* Block descriptor length (LSB) */
3229                        mpd10.hd.bd_length[1] = 0;
3230                } else {
3231                        mpd10.hd.bd_length[0] = 0;
3232                        mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
3233
3234                        mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
3235
3236                        mpd10.bd.block_length[0] =
3237                                (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3238                        mpd10.bd.block_length[1] =
3239                                (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3240                        mpd10.bd.block_length[2] =
3241                                fsa_dev_ptr[cid].block_size  & 0xff;
3242
3243                        if (capacity > 0xffffff) {
3244                                mpd10.bd.block_count[0] = 0xff;
3245                                mpd10.bd.block_count[1] = 0xff;
3246                                mpd10.bd.block_count[2] = 0xff;
3247                        } else {
3248                                mpd10.bd.block_count[0] =
3249                                        (capacity >> 16) & 0xff;
3250                                mpd10.bd.block_count[1] =
3251                                        (capacity >> 8) & 0xff;
3252                                mpd10.bd.block_count[2] =
3253                                        capacity  & 0xff;
3254                        }
3255                }
3256                if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3257                  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3258                        mpd10.hd.data_length[1] += 3;
3259                        mpd10.mpc_buf[0] = 8;
3260                        mpd10.mpc_buf[1] = 1;
3261                        mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
3262                                ? 0 : 0x04; /* WCE */
3263                        mode_buf_length = sizeof(mpd10);
3264                        if (mode_buf_length > scsicmd->cmnd[8])
3265                                mode_buf_length = scsicmd->cmnd[8];
3266                }
3267                scsi_sg_copy_from_buffer(scsicmd,
3268                                         (char *)&mpd10,
3269                                         mode_buf_length);
3270
3271                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3272                                  SAM_STAT_GOOD;
3273                break;
3274        }
3275        case REQUEST_SENSE:
3276                dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
3277                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3278                                sizeof(struct sense_data));
3279                memset(&dev->fsa_dev[cid].sense_data, 0,
3280                                sizeof(struct sense_data));
3281                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3282                                  SAM_STAT_GOOD;
3283                break;
3284
3285        case ALLOW_MEDIUM_REMOVAL:
3286                dprintk((KERN_DEBUG "LOCK command.\n"));
3287                if (scsicmd->cmnd[4])
3288                        fsa_dev_ptr[cid].locked = 1;
3289                else
3290                        fsa_dev_ptr[cid].locked = 0;
3291
3292                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3293                                  SAM_STAT_GOOD;
3294                break;
3295        /*
3296         *      These commands are all No-Ops
3297         */
3298        case TEST_UNIT_READY:
3299                if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
3300                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3301                                SAM_STAT_CHECK_CONDITION;
3302                        set_sense(&dev->fsa_dev[cid].sense_data,
3303                                  NOT_READY, SENCODE_BECOMING_READY,
3304                                  ASENCODE_BECOMING_READY, 0, 0);
3305                        memcpy(scsicmd->sense_buffer,
3306                               &dev->fsa_dev[cid].sense_data,
3307                               min_t(size_t,
3308                                     sizeof(dev->fsa_dev[cid].sense_data),
3309                                     SCSI_SENSE_BUFFERSIZE));
3310                        break;
3311                }
3312                /* fall through */
3313        case RESERVE:
3314        case RELEASE:
3315        case REZERO_UNIT:
3316        case REASSIGN_BLOCKS:
3317        case SEEK_10:
3318                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3319                                  SAM_STAT_GOOD;
3320                break;
3321
3322        case START_STOP:
3323                return aac_start_stop(scsicmd);
3324
3325        /* FALLTHRU */
3326        default:
3327        /*
3328         *      Unhandled commands
3329         */
3330                dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
3331                                scsicmd->cmnd[0]));
3332                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3333                                SAM_STAT_CHECK_CONDITION;
3334                set_sense(&dev->fsa_dev[cid].sense_data,
3335                          ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
3336                          ASENCODE_INVALID_COMMAND, 0, 0);
3337                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3338                                min_t(size_t,
3339                                      sizeof(dev->fsa_dev[cid].sense_data),
3340                                      SCSI_SENSE_BUFFERSIZE));
3341        }
3342
3343scsi_done_ret:
3344
3345        scsicmd->scsi_done(scsicmd);
3346        return 0;
3347}
3348
3349static int query_disk(struct aac_dev *dev, void __user *arg)
3350{
3351        struct aac_query_disk qd;
3352        struct fsa_dev_info *fsa_dev_ptr;
3353
3354        fsa_dev_ptr = dev->fsa_dev;
3355        if (!fsa_dev_ptr)
3356                return -EBUSY;
3357        if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
3358                return -EFAULT;
3359        if (qd.cnum == -1) {
3360                if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
3361                        return -EINVAL;
3362                qd.cnum = qd.id;
3363        } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
3364                if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
3365                        return -EINVAL;
3366                qd.instance = dev->scsi_host_ptr->host_no;
3367                qd.bus = 0;
3368                qd.id = CONTAINER_TO_ID(qd.cnum);
3369                qd.lun = CONTAINER_TO_LUN(qd.cnum);
3370        }
3371        else return -EINVAL;
3372
3373        qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
3374        qd.locked = fsa_dev_ptr[qd.cnum].locked;
3375        qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
3376
3377        if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
3378                qd.unmapped = 1;
3379        else
3380                qd.unmapped = 0;
3381
3382        strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
3383          min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
3384
3385        if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
3386                return -EFAULT;
3387        return 0;
3388}
3389
3390static int force_delete_disk(struct aac_dev *dev, void __user *arg)
3391{
3392        struct aac_delete_disk dd;
3393        struct fsa_dev_info *fsa_dev_ptr;
3394
3395        fsa_dev_ptr = dev->fsa_dev;
3396        if (!fsa_dev_ptr)
3397                return -EBUSY;
3398
3399        if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3400                return -EFAULT;
3401
3402        if (dd.cnum >= dev->maximum_num_containers)
3403                return -EINVAL;
3404        /*
3405         *      Mark this container as being deleted.
3406         */
3407        fsa_dev_ptr[dd.cnum].deleted = 1;
3408        /*
3409         *      Mark the container as no longer valid
3410         */
3411        fsa_dev_ptr[dd.cnum].valid = 0;
3412        return 0;
3413}
3414
3415static int delete_disk(struct aac_dev *dev, void __user *arg)
3416{
3417        struct aac_delete_disk dd;
3418        struct fsa_dev_info *fsa_dev_ptr;
3419
3420        fsa_dev_ptr = dev->fsa_dev;
3421        if (!fsa_dev_ptr)
3422                return -EBUSY;
3423
3424        if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3425                return -EFAULT;
3426
3427        if (dd.cnum >= dev->maximum_num_containers)
3428                return -EINVAL;
3429        /*
3430         *      If the container is locked, it can not be deleted by the API.
3431         */
3432        if (fsa_dev_ptr[dd.cnum].locked)
3433                return -EBUSY;
3434        else {
3435                /*
3436                 *      Mark the container as no longer being valid.
3437                 */
3438                fsa_dev_ptr[dd.cnum].valid = 0;
3439                fsa_dev_ptr[dd.cnum].devname[0] = '\0';
3440                return 0;
3441        }
3442}
3443
3444int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
3445{
3446        switch (cmd) {
3447        case FSACTL_QUERY_DISK:
3448                return query_disk(dev, arg);
3449        case FSACTL_DELETE_DISK:
3450                return delete_disk(dev, arg);
3451        case FSACTL_FORCE_DELETE_DISK:
3452                return force_delete_disk(dev, arg);
3453        case FSACTL_GET_CONTAINERS:
3454                return aac_get_containers(dev);
3455        default:
3456                return -ENOTTY;
3457        }
3458}
3459
3460/**
3461 *
3462 * aac_srb_callback
3463 * @context: the context set in the fib - here it is scsi cmd
3464 * @fibptr: pointer to the fib
3465 *
3466 * Handles the completion of a scsi command to a non dasd device
3467 *
3468 */
3469
3470static void aac_srb_callback(void *context, struct fib * fibptr)
3471{
3472        struct aac_srb_reply *srbreply;
3473        struct scsi_cmnd *scsicmd;
3474
3475        scsicmd = (struct scsi_cmnd *) context;
3476
3477        if (!aac_valid_context(scsicmd, fibptr))
3478                return;
3479
3480        BUG_ON(fibptr == NULL);
3481
3482        srbreply = (struct aac_srb_reply *) fib_data(fibptr);
3483
3484        scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
3485
3486        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3487                /* fast response */
3488                srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
3489                srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
3490        } else {
3491                /*
3492                 *      Calculate resid for sg
3493                 */
3494                scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
3495                                   - le32_to_cpu(srbreply->data_xfer_length));
3496        }
3497
3498
3499        scsi_dma_unmap(scsicmd);
3500
3501        /* expose physical device if expose_physicald flag is on */
3502        if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
3503          && expose_physicals > 0)
3504                aac_expose_phy_device(scsicmd);
3505
3506        /*
3507         * First check the fib status
3508         */
3509
3510        if (le32_to_cpu(srbreply->status) != ST_OK) {
3511                int len;
3512
3513                pr_warn("aac_srb_callback: srb failed, status = %d\n",
3514                                le32_to_cpu(srbreply->status));
3515                len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3516                            SCSI_SENSE_BUFFERSIZE);
3517                scsicmd->result = DID_ERROR << 16
3518                                | COMMAND_COMPLETE << 8
3519                                | SAM_STAT_CHECK_CONDITION;
3520                memcpy(scsicmd->sense_buffer,
3521                                srbreply->sense_data, len);
3522        }
3523
3524        /*
3525         * Next check the srb status
3526         */
3527        switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
3528        case SRB_STATUS_ERROR_RECOVERY:
3529        case SRB_STATUS_PENDING:
3530        case SRB_STATUS_SUCCESS:
3531                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3532                break;
3533        case SRB_STATUS_DATA_OVERRUN:
3534                switch (scsicmd->cmnd[0]) {
3535                case  READ_6:
3536                case  WRITE_6:
3537                case  READ_10:
3538                case  WRITE_10:
3539                case  READ_12:
3540                case  WRITE_12:
3541                case  READ_16:
3542                case  WRITE_16:
3543                        if (le32_to_cpu(srbreply->data_xfer_length)
3544                                                < scsicmd->underflow)
3545                                pr_warn("aacraid: SCSI CMD underflow\n");
3546                        else
3547                                pr_warn("aacraid: SCSI CMD Data Overrun\n");
3548                        scsicmd->result = DID_ERROR << 16
3549                                        | COMMAND_COMPLETE << 8;
3550                        break;
3551                case INQUIRY:
3552                        scsicmd->result = DID_OK << 16
3553                                        | COMMAND_COMPLETE << 8;
3554                        break;
3555                default:
3556                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3557                        break;
3558                }
3559                break;
3560        case SRB_STATUS_ABORTED:
3561                scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3562                break;
3563        case SRB_STATUS_ABORT_FAILED:
3564                /*
3565                 * Not sure about this one - but assuming the
3566                 * hba was trying to abort for some reason
3567                 */
3568                scsicmd->result = DID_ERROR << 16 | ABORT << 8;
3569                break;
3570        case SRB_STATUS_PARITY_ERROR:
3571                scsicmd->result = DID_PARITY << 16
3572                                | MSG_PARITY_ERROR << 8;
3573                break;
3574        case SRB_STATUS_NO_DEVICE:
3575        case SRB_STATUS_INVALID_PATH_ID:
3576        case SRB_STATUS_INVALID_TARGET_ID:
3577        case SRB_STATUS_INVALID_LUN:
3578        case SRB_STATUS_SELECTION_TIMEOUT:
3579                scsicmd->result = DID_NO_CONNECT << 16
3580                                | COMMAND_COMPLETE << 8;
3581                break;
3582
3583        case SRB_STATUS_COMMAND_TIMEOUT:
3584        case SRB_STATUS_TIMEOUT:
3585                scsicmd->result = DID_TIME_OUT << 16
3586                                | COMMAND_COMPLETE << 8;
3587                break;
3588
3589        case SRB_STATUS_BUSY:
3590                scsicmd->result = DID_BUS_BUSY << 16
3591                                | COMMAND_COMPLETE << 8;
3592                break;
3593
3594        case SRB_STATUS_BUS_RESET:
3595                scsicmd->result = DID_RESET << 16
3596                                | COMMAND_COMPLETE << 8;
3597                break;
3598
3599        case SRB_STATUS_MESSAGE_REJECTED:
3600                scsicmd->result = DID_ERROR << 16
3601                                | MESSAGE_REJECT << 8;
3602                break;
3603        case SRB_STATUS_REQUEST_FLUSHED:
3604        case SRB_STATUS_ERROR:
3605        case SRB_STATUS_INVALID_REQUEST:
3606        case SRB_STATUS_REQUEST_SENSE_FAILED:
3607        case SRB_STATUS_NO_HBA:
3608        case SRB_STATUS_UNEXPECTED_BUS_FREE:
3609        case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
3610        case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
3611        case SRB_STATUS_DELAYED_RETRY:
3612        case SRB_STATUS_BAD_FUNCTION:
3613        case SRB_STATUS_NOT_STARTED:
3614        case SRB_STATUS_NOT_IN_USE:
3615        case SRB_STATUS_FORCE_ABORT:
3616        case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
3617        default:
3618#ifdef AAC_DETAILED_STATUS_INFO
3619                pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
3620                        le32_to_cpu(srbreply->srb_status) & 0x3F,
3621                        aac_get_status_string(
3622                                le32_to_cpu(srbreply->srb_status) & 0x3F),
3623                        scsicmd->cmnd[0],
3624                        le32_to_cpu(srbreply->scsi_status));
3625#endif
3626                /*
3627                 * When the CC bit is SET by the host in ATA pass thru CDB,
3628                 *  driver is supposed to return DID_OK
3629                 *
3630                 * When the CC bit is RESET by the host, driver should
3631                 *  return DID_ERROR
3632                 */
3633                if ((scsicmd->cmnd[0] == ATA_12)
3634                        || (scsicmd->cmnd[0] == ATA_16)) {
3635
3636                        if (scsicmd->cmnd[2] & (0x01 << 5)) {
3637                                scsicmd->result = DID_OK << 16
3638                                        | COMMAND_COMPLETE << 8;
3639                        break;
3640                        } else {
3641                                scsicmd->result = DID_ERROR << 16
3642                                        | COMMAND_COMPLETE << 8;
3643                        break;
3644                        }
3645                } else {
3646                        scsicmd->result = DID_ERROR << 16
3647                                | COMMAND_COMPLETE << 8;
3648                        break;
3649                }
3650        }
3651        if (le32_to_cpu(srbreply->scsi_status)
3652                        == SAM_STAT_CHECK_CONDITION) {
3653                int len;
3654
3655                scsicmd->result |= SAM_STAT_CHECK_CONDITION;
3656                len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3657                            SCSI_SENSE_BUFFERSIZE);
3658#ifdef AAC_DETAILED_STATUS_INFO
3659                pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
3660                                        le32_to_cpu(srbreply->status), len);
3661#endif
3662                memcpy(scsicmd->sense_buffer,
3663                                srbreply->sense_data, len);
3664        }
3665
3666        /*
3667         * OR in the scsi status (already shifted up a bit)
3668         */
3669        scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
3670
3671        aac_fib_complete(fibptr);
3672        scsicmd->scsi_done(scsicmd);
3673}
3674
3675static void hba_resp_task_complete(struct aac_dev *dev,
3676                                        struct scsi_cmnd *scsicmd,
3677                                        struct aac_hba_resp *err) {
3678
3679        scsicmd->result = err->status;
3680        /* set residual count */
3681        scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
3682
3683        switch (err->status) {
3684        case SAM_STAT_GOOD:
3685                scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3686                break;
3687        case SAM_STAT_CHECK_CONDITION:
3688        {
3689                int len;
3690
3691                len = min_t(u8, err->sense_response_data_len,
3692                        SCSI_SENSE_BUFFERSIZE);
3693                if (len)
3694                        memcpy(scsicmd->sense_buffer,
3695                                err->sense_response_buf, len);
3696                scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3697                break;
3698        }
3699        case SAM_STAT_BUSY:
3700                scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
3701                break;
3702        case SAM_STAT_TASK_ABORTED:
3703                scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
3704                break;
3705        case SAM_STAT_RESERVATION_CONFLICT:
3706        case SAM_STAT_TASK_SET_FULL:
3707        default:
3708                scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3709                break;
3710        }
3711}
3712
3713static void hba_resp_task_failure(struct aac_dev *dev,
3714                                        struct scsi_cmnd *scsicmd,
3715                                        struct aac_hba_resp *err)
3716{
3717        switch (err->status) {
3718        case HBA_RESP_STAT_HBAMODE_DISABLED:
3719        {
3720                u32 bus, cid;
3721
3722                bus = aac_logical_to_phys(scmd_channel(scsicmd));
3723                cid = scmd_id(scsicmd);
3724                if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
3725                        dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
3726                        dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
3727                }
3728                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3729                break;
3730        }
3731        case HBA_RESP_STAT_IO_ERROR:
3732        case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
3733                scsicmd->result = DID_OK << 16 |
3734                        COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
3735                break;
3736        case HBA_RESP_STAT_IO_ABORTED:
3737                scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3738                break;
3739        case HBA_RESP_STAT_INVALID_DEVICE:
3740                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3741                break;
3742        case HBA_RESP_STAT_UNDERRUN:
3743                /* UNDERRUN is OK */
3744                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3745                break;
3746        case HBA_RESP_STAT_OVERRUN:
3747        default:
3748                scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3749                break;
3750        }
3751}
3752
3753/**
3754 *
3755 * aac_hba_callback
3756 * @context: the context set in the fib - here it is scsi cmd
3757 * @fibptr: pointer to the fib
3758 *
3759 * Handles the completion of a native HBA scsi command
3760 *
3761 */
3762void aac_hba_callback(void *context, struct fib *fibptr)
3763{
3764        struct aac_dev *dev;
3765        struct scsi_cmnd *scsicmd;
3766
3767        struct aac_hba_resp *err =
3768                        &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
3769
3770        scsicmd = (struct scsi_cmnd *) context;
3771
3772        if (!aac_valid_context(scsicmd, fibptr))
3773                return;
3774
3775        WARN_ON(fibptr == NULL);
3776        dev = fibptr->dev;
3777
3778        if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
3779                scsi_dma_unmap(scsicmd);
3780
3781        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3782                /* fast response */
3783                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3784                goto out;
3785        }
3786
3787        switch (err->service_response) {
3788        case HBA_RESP_SVCRES_TASK_COMPLETE:
3789                hba_resp_task_complete(dev, scsicmd, err);
3790                break;
3791        case HBA_RESP_SVCRES_FAILURE:
3792                hba_resp_task_failure(dev, scsicmd, err);
3793                break;
3794        case HBA_RESP_SVCRES_TMF_REJECTED:
3795                scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
3796                break;
3797        case HBA_RESP_SVCRES_TMF_LUN_INVALID:
3798                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3799                break;
3800        case HBA_RESP_SVCRES_TMF_COMPLETE:
3801        case HBA_RESP_SVCRES_TMF_SUCCEEDED:
3802                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3803                break;
3804        default:
3805                scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3806                break;
3807        }
3808
3809out:
3810        aac_fib_complete(fibptr);
3811
3812        if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
3813                scsicmd->SCp.sent_command = 1;
3814        else
3815                scsicmd->scsi_done(scsicmd);
3816}
3817
3818/**
3819 *
3820 * aac_send_srb_fib
3821 * @scsicmd: the scsi command block
3822 *
3823 * This routine will form a FIB and fill in the aac_srb from the
3824 * scsicmd passed in.
3825 */
3826
3827static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
3828{
3829        struct fib* cmd_fibcontext;
3830        struct aac_dev* dev;
3831        int status;
3832
3833        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3834        if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3835                        scsicmd->device->lun > 7) {
3836                scsicmd->result = DID_NO_CONNECT << 16;
3837                scsicmd->scsi_done(scsicmd);
3838                return 0;
3839        }
3840
3841        /*
3842         *      Allocate and initialize a Fib then setup a BlockWrite command
3843         */
3844        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3845        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3846        status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
3847
3848        /*
3849         *      Check that the command queued to the controller
3850         */
3851        if (status == -EINPROGRESS)
3852                return 0;
3853
3854        printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
3855        aac_fib_complete(cmd_fibcontext);
3856        aac_fib_free(cmd_fibcontext);
3857
3858        return -1;
3859}
3860
3861/**
3862 *
3863 * aac_send_hba_fib
3864 * @scsicmd: the scsi command block
3865 *
3866 * This routine will form a FIB and fill in the aac_hba_cmd_req from the
3867 * scsicmd passed in.
3868 */
3869static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
3870{
3871        struct fib *cmd_fibcontext;
3872        struct aac_dev *dev;
3873        int status;
3874
3875        dev = shost_priv(scsicmd->device->host);
3876        if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3877                        scsicmd->device->lun > AAC_MAX_LUN - 1) {
3878                scsicmd->result = DID_NO_CONNECT << 16;
3879                scsicmd->scsi_done(scsicmd);
3880                return 0;
3881        }
3882
3883        /*
3884         *      Allocate and initialize a Fib then setup a BlockWrite command
3885         */
3886        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3887        if (!cmd_fibcontext)
3888                return -1;
3889
3890        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3891        status = aac_adapter_hba(cmd_fibcontext, scsicmd);
3892
3893        /*
3894         *      Check that the command queued to the controller
3895         */
3896        if (status == -EINPROGRESS)
3897                return 0;
3898
3899        pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
3900                status);
3901        aac_fib_complete(cmd_fibcontext);
3902        aac_fib_free(cmd_fibcontext);
3903
3904        return -1;
3905}
3906
3907
3908static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
3909{
3910        unsigned long byte_count = 0;
3911        int nseg;
3912        struct scatterlist *sg;
3913        int i;
3914
3915        // Get rid of old data
3916        psg->count = 0;
3917        psg->sg[0].addr = 0;
3918        psg->sg[0].count = 0;
3919
3920        nseg = scsi_dma_map(scsicmd);
3921        if (nseg <= 0)
3922                return nseg;
3923
3924        psg->count = cpu_to_le32(nseg);
3925
3926        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3927                psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
3928                psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
3929                byte_count += sg_dma_len(sg);
3930        }
3931        /* hba wants the size to be exact */
3932        if (byte_count > scsi_bufflen(scsicmd)) {
3933                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3934                        (byte_count - scsi_bufflen(scsicmd));
3935                psg->sg[i-1].count = cpu_to_le32(temp);
3936                byte_count = scsi_bufflen(scsicmd);
3937        }
3938        /* Check for command underflow */
3939        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3940                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3941                       byte_count, scsicmd->underflow);
3942        }
3943
3944        return byte_count;
3945}
3946
3947
3948static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
3949{
3950        unsigned long byte_count = 0;
3951        u64 addr;
3952        int nseg;
3953        struct scatterlist *sg;
3954        int i;
3955
3956        // Get rid of old data
3957        psg->count = 0;
3958        psg->sg[0].addr[0] = 0;
3959        psg->sg[0].addr[1] = 0;
3960        psg->sg[0].count = 0;
3961
3962        nseg = scsi_dma_map(scsicmd);
3963        if (nseg <= 0)
3964                return nseg;
3965
3966        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3967                int count = sg_dma_len(sg);
3968                addr = sg_dma_address(sg);
3969                psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
3970                psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
3971                psg->sg[i].count = cpu_to_le32(count);
3972                byte_count += count;
3973        }
3974        psg->count = cpu_to_le32(nseg);
3975        /* hba wants the size to be exact */
3976        if (byte_count > scsi_bufflen(scsicmd)) {
3977                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3978                        (byte_count - scsi_bufflen(scsicmd));
3979                psg->sg[i-1].count = cpu_to_le32(temp);
3980                byte_count = scsi_bufflen(scsicmd);
3981        }
3982        /* Check for command underflow */
3983        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3984                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3985                       byte_count, scsicmd->underflow);
3986        }
3987
3988        return byte_count;
3989}
3990
3991static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
3992{
3993        unsigned long byte_count = 0;
3994        int nseg;
3995        struct scatterlist *sg;
3996        int i;
3997
3998        // Get rid of old data
3999        psg->count = 0;
4000        psg->sg[0].next = 0;
4001        psg->sg[0].prev = 0;
4002        psg->sg[0].addr[0] = 0;
4003        psg->sg[0].addr[1] = 0;
4004        psg->sg[0].count = 0;
4005        psg->sg[0].flags = 0;
4006
4007        nseg = scsi_dma_map(scsicmd);
4008        if (nseg <= 0)
4009                return nseg;
4010
4011        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4012                int count = sg_dma_len(sg);
4013                u64 addr = sg_dma_address(sg);
4014                psg->sg[i].next = 0;
4015                psg->sg[i].prev = 0;
4016                psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
4017                psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
4018                psg->sg[i].count = cpu_to_le32(count);
4019                psg->sg[i].flags = 0;
4020                byte_count += count;
4021        }
4022        psg->count = cpu_to_le32(nseg);
4023        /* hba wants the size to be exact */
4024        if (byte_count > scsi_bufflen(scsicmd)) {
4025                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
4026                        (byte_count - scsi_bufflen(scsicmd));
4027                psg->sg[i-1].count = cpu_to_le32(temp);
4028                byte_count = scsi_bufflen(scsicmd);
4029        }
4030        /* Check for command underflow */
4031        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4032                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
4033                       byte_count, scsicmd->underflow);
4034        }
4035
4036        return byte_count;
4037}
4038
4039static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
4040                                struct aac_raw_io2 *rio2, int sg_max)
4041{
4042        unsigned long byte_count = 0;
4043        int nseg;
4044        struct scatterlist *sg;
4045        int i, conformable = 0;
4046        u32 min_size = PAGE_SIZE, cur_size;
4047
4048        nseg = scsi_dma_map(scsicmd);
4049        if (nseg <= 0)
4050                return nseg;
4051
4052        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4053                int count = sg_dma_len(sg);
4054                u64 addr = sg_dma_address(sg);
4055
4056                BUG_ON(i >= sg_max);
4057                rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
4058                rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
4059                cur_size = cpu_to_le32(count);
4060                rio2->sge[i].length = cur_size;
4061                rio2->sge[i].flags = 0;
4062                if (i == 0) {
4063                        conformable = 1;
4064                        rio2->sgeFirstSize = cur_size;
4065                } else if (i == 1) {
4066                        rio2->sgeNominalSize = cur_size;
4067                        min_size = cur_size;
4068                } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
4069                        conformable = 0;
4070                        if (cur_size < min_size)
4071                                min_size = cur_size;
4072                }
4073                byte_count += count;
4074        }
4075
4076        /* hba wants the size to be exact */
4077        if (byte_count > scsi_bufflen(scsicmd)) {
4078                u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
4079                        (byte_count - scsi_bufflen(scsicmd));
4080                rio2->sge[i-1].length = cpu_to_le32(temp);
4081                byte_count = scsi_bufflen(scsicmd);
4082        }
4083
4084        rio2->sgeCnt = cpu_to_le32(nseg);
4085        rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
4086        /* not conformable: evaluate required sg elements */
4087        if (!conformable) {
4088                int j, nseg_new = nseg, err_found;
4089                for (i = min_size / PAGE_SIZE; i >= 1; --i) {
4090                        err_found = 0;
4091                        nseg_new = 2;
4092                        for (j = 1; j < nseg - 1; ++j) {
4093                                if (rio2->sge[j].length % (i*PAGE_SIZE)) {
4094                                        err_found = 1;
4095                                        break;
4096                                }
4097                                nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
4098                        }
4099                        if (!err_found)
4100                                break;
4101                }
4102                if (i > 0 && nseg_new <= sg_max) {
4103                        int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new);
4104
4105                        if (ret < 0)
4106                                return ret;
4107                }
4108        } else
4109                rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4110
4111        /* Check for command underflow */
4112        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4113                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
4114                       byte_count, scsicmd->underflow);
4115        }
4116
4117        return byte_count;
4118}
4119
4120static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
4121{
4122        struct sge_ieee1212 *sge;
4123        int i, j, pos;
4124        u32 addr_low;
4125
4126        if (aac_convert_sgl == 0)
4127                return 0;
4128
4129        sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
4130        if (sge == NULL)
4131                return -ENOMEM;
4132
4133        for (i = 1, pos = 1; i < nseg-1; ++i) {
4134                for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
4135                        addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
4136                        sge[pos].addrLow = addr_low;
4137                        sge[pos].addrHigh = rio2->sge[i].addrHigh;
4138                        if (addr_low < rio2->sge[i].addrLow)
4139                                sge[pos].addrHigh++;
4140                        sge[pos].length = pages * PAGE_SIZE;
4141                        sge[pos].flags = 0;
4142                        pos++;
4143                }
4144        }
4145        sge[pos] = rio2->sge[nseg-1];
4146        memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
4147
4148        kfree(sge);
4149        rio2->sgeCnt = cpu_to_le32(nseg_new);
4150        rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4151        rio2->sgeNominalSize = pages * PAGE_SIZE;
4152        return 0;
4153}
4154
4155static long aac_build_sghba(struct scsi_cmnd *scsicmd,
4156                        struct aac_hba_cmd_req *hbacmd,
4157                        int sg_max,
4158                        u64 sg_address)
4159{
4160        unsigned long byte_count = 0;
4161        int nseg;
4162        struct scatterlist *sg;
4163        int i;
4164        u32 cur_size;
4165        struct aac_hba_sgl *sge;
4166
4167        nseg = scsi_dma_map(scsicmd);
4168        if (nseg <= 0) {
4169                byte_count = nseg;
4170                goto out;
4171        }
4172
4173        if (nseg > HBA_MAX_SG_EMBEDDED)
4174                sge = &hbacmd->sge[2];
4175        else
4176                sge = &hbacmd->sge[0];
4177
4178        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4179                int count = sg_dma_len(sg);
4180                u64 addr = sg_dma_address(sg);
4181
4182                WARN_ON(i >= sg_max);
4183                sge->addr_hi = cpu_to_le32((u32)(addr>>32));
4184                sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
4185                cur_size = cpu_to_le32(count);
4186                sge->len = cur_size;
4187                sge->flags = 0;
4188                byte_count += count;
4189                sge++;
4190        }
4191
4192        sge--;
4193        /* hba wants the size to be exact */
4194        if (byte_count > scsi_bufflen(scsicmd)) {
4195                u32 temp;
4196
4197                temp = le32_to_cpu(sge->len) - byte_count
4198                                                - scsi_bufflen(scsicmd);
4199                sge->len = cpu_to_le32(temp);
4200                byte_count = scsi_bufflen(scsicmd);
4201        }
4202
4203        if (nseg <= HBA_MAX_SG_EMBEDDED) {
4204                hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
4205                sge->flags = cpu_to_le32(0x40000000);
4206        } else {
4207                /* not embedded */
4208                hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
4209                hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
4210                hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
4211                hbacmd->sge[0].addr_lo =
4212                        cpu_to_le32((u32)(sg_address & 0xffffffff));
4213        }
4214
4215        /* Check for command underflow */
4216        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4217                pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
4218                                byte_count, scsicmd->underflow);
4219        }
4220out:
4221        return byte_count;
4222}
4223
4224#ifdef AAC_DETAILED_STATUS_INFO
4225
4226struct aac_srb_status_info {
4227        u32     status;
4228        char    *str;
4229};
4230
4231
4232static struct aac_srb_status_info srb_status_info[] = {
4233        { SRB_STATUS_PENDING,           "Pending Status"},
4234        { SRB_STATUS_SUCCESS,           "Success"},
4235        { SRB_STATUS_ABORTED,           "Aborted Command"},
4236        { SRB_STATUS_ABORT_FAILED,      "Abort Failed"},
4237        { SRB_STATUS_ERROR,             "Error Event"},
4238        { SRB_STATUS_BUSY,              "Device Busy"},
4239        { SRB_STATUS_INVALID_REQUEST,   "Invalid Request"},
4240        { SRB_STATUS_INVALID_PATH_ID,   "Invalid Path ID"},
4241        { SRB_STATUS_NO_DEVICE,         "No Device"},
4242        { SRB_STATUS_TIMEOUT,           "Timeout"},
4243        { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
4244        { SRB_STATUS_COMMAND_TIMEOUT,   "Command Timeout"},
4245        { SRB_STATUS_MESSAGE_REJECTED,  "Message Rejected"},
4246        { SRB_STATUS_BUS_RESET,         "Bus Reset"},
4247        { SRB_STATUS_PARITY_ERROR,      "Parity Error"},
4248        { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
4249        { SRB_STATUS_NO_HBA,            "No HBA"},
4250        { SRB_STATUS_DATA_OVERRUN,      "Data Overrun/Data Underrun"},
4251        { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
4252        { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
4253        { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
4254        { SRB_STATUS_REQUEST_FLUSHED,   "Request Flushed"},
4255        { SRB_STATUS_DELAYED_RETRY,     "Delayed Retry"},
4256        { SRB_STATUS_INVALID_LUN,       "Invalid LUN"},
4257        { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
4258        { SRB_STATUS_BAD_FUNCTION,      "Bad Function"},
4259        { SRB_STATUS_ERROR_RECOVERY,    "Error Recovery"},
4260        { SRB_STATUS_NOT_STARTED,       "Not Started"},
4261        { SRB_STATUS_NOT_IN_USE,        "Not In Use"},
4262        { SRB_STATUS_FORCE_ABORT,       "Force Abort"},
4263        { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
4264        { 0xff,                         "Unknown Error"}
4265};
4266
4267char *aac_get_status_string(u32 status)
4268{
4269        int i;
4270
4271        for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
4272                if (srb_status_info[i].status == status)
4273                        return srb_status_info[i].str;
4274
4275        return "Bad Status Code";
4276}
4277
4278#endif
4279