linux/drivers/scsi/aacraid/aachba.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *      Adaptec AAC series RAID controller driver
   4 *      (c) Copyright 2001 Red Hat Inc.
   5 *
   6 * based on the old aacraid driver that is..
   7 * Adaptec aacraid device driver for Linux.
   8 *
   9 * Copyright (c) 2000-2010 Adaptec, Inc.
  10 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  11 *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  12 *
  13 * Module Name:
  14 *  aachba.c
  15 *
  16 * Abstract: Contains Interfaces to manage IOs.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/init.h>
  21#include <linux/types.h>
  22#include <linux/pci.h>
  23#include <linux/spinlock.h>
  24#include <linux/slab.h>
  25#include <linux/completion.h>
  26#include <linux/blkdev.h>
  27#include <linux/uaccess.h>
  28#include <linux/highmem.h> /* For flush_kernel_dcache_page */
  29#include <linux/module.h>
  30
  31#include <asm/unaligned.h>
  32
  33#include <scsi/scsi.h>
  34#include <scsi/scsi_cmnd.h>
  35#include <scsi/scsi_device.h>
  36#include <scsi/scsi_host.h>
  37
  38#include "aacraid.h"
  39
  40/* values for inqd_pdt: Peripheral device type in plain English */
  41#define INQD_PDT_DA     0x00    /* Direct-access (DISK) device */
  42#define INQD_PDT_PROC   0x03    /* Processor device */
  43#define INQD_PDT_CHNGR  0x08    /* Changer (jukebox, scsi2) */
  44#define INQD_PDT_COMM   0x09    /* Communication device (scsi2) */
  45#define INQD_PDT_NOLUN2 0x1f    /* Unknown Device (scsi2) */
  46#define INQD_PDT_NOLUN  0x7f    /* Logical Unit Not Present */
  47
  48#define INQD_PDT_DMASK  0x1F    /* Peripheral Device Type Mask */
  49#define INQD_PDT_QMASK  0xE0    /* Peripheral Device Qualifer Mask */
  50
  51/*
  52 *      Sense codes
  53 */
  54
  55#define SENCODE_NO_SENSE                        0x00
  56#define SENCODE_END_OF_DATA                     0x00
  57#define SENCODE_BECOMING_READY                  0x04
  58#define SENCODE_INIT_CMD_REQUIRED               0x04
  59#define SENCODE_UNRECOVERED_READ_ERROR          0x11
  60#define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
  61#define SENCODE_INVALID_COMMAND                 0x20
  62#define SENCODE_LBA_OUT_OF_RANGE                0x21
  63#define SENCODE_INVALID_CDB_FIELD               0x24
  64#define SENCODE_LUN_NOT_SUPPORTED               0x25
  65#define SENCODE_INVALID_PARAM_FIELD             0x26
  66#define SENCODE_PARAM_NOT_SUPPORTED             0x26
  67#define SENCODE_PARAM_VALUE_INVALID             0x26
  68#define SENCODE_RESET_OCCURRED                  0x29
  69#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
  70#define SENCODE_INQUIRY_DATA_CHANGED            0x3F
  71#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
  72#define SENCODE_DIAGNOSTIC_FAILURE              0x40
  73#define SENCODE_INTERNAL_TARGET_FAILURE         0x44
  74#define SENCODE_INVALID_MESSAGE_ERROR           0x49
  75#define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
  76#define SENCODE_OVERLAPPED_COMMAND              0x4E
  77
  78/*
  79 *      Additional sense codes
  80 */
  81
  82#define ASENCODE_NO_SENSE                       0x00
  83#define ASENCODE_END_OF_DATA                    0x05
  84#define ASENCODE_BECOMING_READY                 0x01
  85#define ASENCODE_INIT_CMD_REQUIRED              0x02
  86#define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
  87#define ASENCODE_INVALID_COMMAND                0x00
  88#define ASENCODE_LBA_OUT_OF_RANGE               0x00
  89#define ASENCODE_INVALID_CDB_FIELD              0x00
  90#define ASENCODE_LUN_NOT_SUPPORTED              0x00
  91#define ASENCODE_INVALID_PARAM_FIELD            0x00
  92#define ASENCODE_PARAM_NOT_SUPPORTED            0x01
  93#define ASENCODE_PARAM_VALUE_INVALID            0x02
  94#define ASENCODE_RESET_OCCURRED                 0x00
  95#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
  96#define ASENCODE_INQUIRY_DATA_CHANGED           0x03
  97#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
  98#define ASENCODE_DIAGNOSTIC_FAILURE             0x80
  99#define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
 100#define ASENCODE_INVALID_MESSAGE_ERROR          0x00
 101#define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
 102#define ASENCODE_OVERLAPPED_COMMAND             0x00
 103
 104#define BYTE0(x) (unsigned char)(x)
 105#define BYTE1(x) (unsigned char)((x) >> 8)
 106#define BYTE2(x) (unsigned char)((x) >> 16)
 107#define BYTE3(x) (unsigned char)((x) >> 24)
 108
 109/* MODE_SENSE data format */
 110typedef struct {
 111        struct {
 112                u8      data_length;
 113                u8      med_type;
 114                u8      dev_par;
 115                u8      bd_length;
 116        } __attribute__((packed)) hd;
 117        struct {
 118                u8      dens_code;
 119                u8      block_count[3];
 120                u8      reserved;
 121                u8      block_length[3];
 122        } __attribute__((packed)) bd;
 123                u8      mpc_buf[3];
 124} __attribute__((packed)) aac_modep_data;
 125
 126/* MODE_SENSE_10 data format */
 127typedef struct {
 128        struct {
 129                u8      data_length[2];
 130                u8      med_type;
 131                u8      dev_par;
 132                u8      rsrvd[2];
 133                u8      bd_length[2];
 134        } __attribute__((packed)) hd;
 135        struct {
 136                u8      dens_code;
 137                u8      block_count[3];
 138                u8      reserved;
 139                u8      block_length[3];
 140        } __attribute__((packed)) bd;
 141                u8      mpc_buf[3];
 142} __attribute__((packed)) aac_modep10_data;
 143
 144/*------------------------------------------------------------------------------
 145 *              S T R U C T S / T Y P E D E F S
 146 *----------------------------------------------------------------------------*/
 147/* SCSI inquiry data */
 148struct inquiry_data {
 149        u8 inqd_pdt;    /* Peripheral qualifier | Peripheral Device Type */
 150        u8 inqd_dtq;    /* RMB | Device Type Qualifier */
 151        u8 inqd_ver;    /* ISO version | ECMA version | ANSI-approved version */
 152        u8 inqd_rdf;    /* AENC | TrmIOP | Response data format */
 153        u8 inqd_len;    /* Additional length (n-4) */
 154        u8 inqd_pad1[2];/* Reserved - must be zero */
 155        u8 inqd_pad2;   /* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
 156        u8 inqd_vid[8]; /* Vendor ID */
 157        u8 inqd_pid[16];/* Product ID */
 158        u8 inqd_prl[4]; /* Product Revision Level */
 159};
 160
 161/* Added for VPD 0x83 */
 162struct  tvpd_id_descriptor_type_1 {
 163        u8 codeset:4;           /* VPD_CODE_SET */
 164        u8 reserved:4;
 165        u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
 166        u8 reserved2:4;
 167        u8 reserved3;
 168        u8 identifierlength;
 169        u8 venid[8];
 170        u8 productid[16];
 171        u8 serialnumber[8];     /* SN in ASCII */
 172
 173};
 174
 175struct tvpd_id_descriptor_type_2 {
 176        u8 codeset:4;           /* VPD_CODE_SET */
 177        u8 reserved:4;
 178        u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
 179        u8 reserved2:4;
 180        u8 reserved3;
 181        u8 identifierlength;
 182        struct teu64id {
 183                u32 Serial;
 184                 /* The serial number supposed to be 40 bits,
 185                  * bit we only support 32, so make the last byte zero. */
 186                u8 reserved;
 187                u8 venid[3];
 188        } eu64id;
 189
 190};
 191
 192struct tvpd_id_descriptor_type_3 {
 193        u8 codeset : 4;          /* VPD_CODE_SET */
 194        u8 reserved : 4;
 195        u8 identifiertype : 4;   /* VPD_IDENTIFIER_TYPE */
 196        u8 reserved2 : 4;
 197        u8 reserved3;
 198        u8 identifierlength;
 199        u8 Identifier[16];
 200};
 201
 202struct tvpd_page83 {
 203        u8 DeviceType:5;
 204        u8 DeviceTypeQualifier:3;
 205        u8 PageCode;
 206        u8 reserved;
 207        u8 PageLength;
 208        struct tvpd_id_descriptor_type_1 type1;
 209        struct tvpd_id_descriptor_type_2 type2;
 210        struct tvpd_id_descriptor_type_3 type3;
 211};
 212
 213/*
 214 *              M O D U L E   G L O B A L S
 215 */
 216
 217static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
 218static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
 219static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
 220static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
 221                                struct aac_raw_io2 *rio2, int sg_max);
 222static long aac_build_sghba(struct scsi_cmnd *scsicmd,
 223                                struct aac_hba_cmd_req *hbacmd,
 224                                int sg_max, u64 sg_address);
 225static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
 226                                int pages, int nseg, int nseg_new);
 227static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
 228static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
 229#ifdef AAC_DETAILED_STATUS_INFO
 230static char *aac_get_status_string(u32 status);
 231#endif
 232
 233/*
 234 *      Non dasd selection is handled entirely in aachba now
 235 */
 236
 237static int nondasd = -1;
 238static int aac_cache = 2;       /* WCE=0 to avoid performance problems */
 239static int dacmode = -1;
 240int aac_msi;
 241int aac_commit = -1;
 242int startup_timeout = 180;
 243int aif_timeout = 120;
 244int aac_sync_mode;  /* Only Sync. transfer - disabled */
 245static int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
 246
 247module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
 248MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
 249        " 0=off, 1=on");
 250module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
 251MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
 252        " 0=off, 1=on");
 253module_param(nondasd, int, S_IRUGO|S_IWUSR);
 254MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
 255        " 0=off, 1=on");
 256module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
 257MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
 258        "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
 259        "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
 260        "\tbit 2 - Disable only if Battery is protecting Cache");
 261module_param(dacmode, int, S_IRUGO|S_IWUSR);
 262MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
 263        " 0=off, 1=on");
 264module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
 265MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
 266        " adapter for foreign arrays.\n"
 267        "This is typically needed in systems that do not have a BIOS."
 268        " 0=off, 1=on");
 269module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
 270MODULE_PARM_DESC(msi, "IRQ handling."
 271        " 0=PIC(default), 1=MSI, 2=MSI-X)");
 272module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
 273MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
 274        " adapter to have it's kernel up and\n"
 275        "running. This is typically adjusted for large systems that do not"
 276        " have a BIOS.");
 277module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
 278MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
 279        " applications to pick up AIFs before\n"
 280        "deregistering them. This is typically adjusted for heavily burdened"
 281        " systems.");
 282
 283int aac_fib_dump;
 284module_param(aac_fib_dump, int, 0644);
 285MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on");
 286
 287int numacb = -1;
 288module_param(numacb, int, S_IRUGO|S_IWUSR);
 289MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
 290        " blocks (FIB) allocated. Valid values are 512 and down. Default is"
 291        " to use suggestion from Firmware.");
 292
 293static int acbsize = -1;
 294module_param(acbsize, int, S_IRUGO|S_IWUSR);
 295MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
 296        " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
 297        " suggestion from Firmware.");
 298
 299int update_interval = 30 * 60;
 300module_param(update_interval, int, S_IRUGO|S_IWUSR);
 301MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
 302        " updates issued to adapter.");
 303
 304int check_interval = 60;
 305module_param(check_interval, int, S_IRUGO|S_IWUSR);
 306MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
 307        " checks.");
 308
 309int aac_check_reset = 1;
 310module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
 311MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
 312        " adapter. a value of -1 forces the reset to adapters programmed to"
 313        " ignore it.");
 314
 315int expose_physicals = -1;
 316module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
 317MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
 318        " -1=protect 0=off, 1=on");
 319
 320int aac_reset_devices;
 321module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
 322MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
 323
 324static int aac_wwn = 1;
 325module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
 326MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
 327        "\t0 - Disable\n"
 328        "\t1 - Array Meta Data Signature (default)\n"
 329        "\t2 - Adapter Serial Number");
 330
 331
 332static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
 333                struct fib *fibptr) {
 334        struct scsi_device *device;
 335
 336        if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
 337                dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
 338                aac_fib_complete(fibptr);
 339                return 0;
 340        }
 341        scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
 342        device = scsicmd->device;
 343        if (unlikely(!device)) {
 344                dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
 345                aac_fib_complete(fibptr);
 346                return 0;
 347        }
 348        return 1;
 349}
 350
 351/**
 352 *      aac_get_config_status   -       check the adapter configuration
 353 *      @dev: aac driver data
 354 *      @commit_flag: force sending CT_COMMIT_CONFIG
 355 *
 356 *      Query config status, and commit the configuration if needed.
 357 */
 358int aac_get_config_status(struct aac_dev *dev, int commit_flag)
 359{
 360        int status = 0;
 361        struct fib * fibptr;
 362
 363        if (!(fibptr = aac_fib_alloc(dev)))
 364                return -ENOMEM;
 365
 366        aac_fib_init(fibptr);
 367        {
 368                struct aac_get_config_status *dinfo;
 369                dinfo = (struct aac_get_config_status *) fib_data(fibptr);
 370
 371                dinfo->command = cpu_to_le32(VM_ContainerConfig);
 372                dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
 373                dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
 374        }
 375
 376        status = aac_fib_send(ContainerCommand,
 377                            fibptr,
 378                            sizeof (struct aac_get_config_status),
 379                            FsaNormal,
 380                            1, 1,
 381                            NULL, NULL);
 382        if (status < 0) {
 383                printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
 384        } else {
 385                struct aac_get_config_status_resp *reply
 386                  = (struct aac_get_config_status_resp *) fib_data(fibptr);
 387                dprintk((KERN_WARNING
 388                  "aac_get_config_status: response=%d status=%d action=%d\n",
 389                  le32_to_cpu(reply->response),
 390                  le32_to_cpu(reply->status),
 391                  le32_to_cpu(reply->data.action)));
 392                if ((le32_to_cpu(reply->response) != ST_OK) ||
 393                     (le32_to_cpu(reply->status) != CT_OK) ||
 394                     (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
 395                        printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
 396                        status = -EINVAL;
 397                }
 398        }
 399        /* Do not set XferState to zero unless receives a response from F/W */
 400        if (status >= 0)
 401                aac_fib_complete(fibptr);
 402
 403        /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
 404        if (status >= 0) {
 405                if ((aac_commit == 1) || commit_flag) {
 406                        struct aac_commit_config * dinfo;
 407                        aac_fib_init(fibptr);
 408                        dinfo = (struct aac_commit_config *) fib_data(fibptr);
 409
 410                        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 411                        dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
 412
 413                        status = aac_fib_send(ContainerCommand,
 414                                    fibptr,
 415                                    sizeof (struct aac_commit_config),
 416                                    FsaNormal,
 417                                    1, 1,
 418                                    NULL, NULL);
 419                        /* Do not set XferState to zero unless
 420                         * receives a response from F/W */
 421                        if (status >= 0)
 422                                aac_fib_complete(fibptr);
 423                } else if (aac_commit == 0) {
 424                        printk(KERN_WARNING
 425                          "aac_get_config_status: Foreign device configurations are being ignored\n");
 426                }
 427        }
 428        /* FIB should be freed only after getting the response from the F/W */
 429        if (status != -ERESTARTSYS)
 430                aac_fib_free(fibptr);
 431        return status;
 432}
 433
 434static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
 435{
 436        char inq_data;
 437        scsi_sg_copy_to_buffer(scsicmd,  &inq_data, sizeof(inq_data));
 438        if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
 439                inq_data &= 0xdf;
 440                scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
 441        }
 442}
 443
 444/**
 445 *      aac_get_containers      -       list containers
 446 *      @dev: aac driver data
 447 *
 448 *      Make a list of all containers on this controller
 449 */
 450int aac_get_containers(struct aac_dev *dev)
 451{
 452        struct fsa_dev_info *fsa_dev_ptr;
 453        u32 index;
 454        int status = 0;
 455        struct fib * fibptr;
 456        struct aac_get_container_count *dinfo;
 457        struct aac_get_container_count_resp *dresp;
 458        int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
 459
 460        if (!(fibptr = aac_fib_alloc(dev)))
 461                return -ENOMEM;
 462
 463        aac_fib_init(fibptr);
 464        dinfo = (struct aac_get_container_count *) fib_data(fibptr);
 465        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 466        dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
 467
 468        status = aac_fib_send(ContainerCommand,
 469                    fibptr,
 470                    sizeof (struct aac_get_container_count),
 471                    FsaNormal,
 472                    1, 1,
 473                    NULL, NULL);
 474        if (status >= 0) {
 475                dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
 476                maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
 477                if (fibptr->dev->supplement_adapter_info.supported_options2 &
 478                    AAC_OPTION_SUPPORTED_240_VOLUMES) {
 479                        maximum_num_containers =
 480                                le32_to_cpu(dresp->MaxSimpleVolumes);
 481                }
 482                aac_fib_complete(fibptr);
 483        }
 484        /* FIB should be freed only after getting the response from the F/W */
 485        if (status != -ERESTARTSYS)
 486                aac_fib_free(fibptr);
 487
 488        if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
 489                maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
 490        if (dev->fsa_dev == NULL ||
 491                dev->maximum_num_containers != maximum_num_containers) {
 492
 493                fsa_dev_ptr = dev->fsa_dev;
 494
 495                dev->fsa_dev = kcalloc(maximum_num_containers,
 496                                        sizeof(*fsa_dev_ptr), GFP_KERNEL);
 497
 498                kfree(fsa_dev_ptr);
 499                fsa_dev_ptr = NULL;
 500
 501
 502                if (!dev->fsa_dev)
 503                        return -ENOMEM;
 504
 505                dev->maximum_num_containers = maximum_num_containers;
 506        }
 507        for (index = 0; index < dev->maximum_num_containers; index++) {
 508                dev->fsa_dev[index].devname[0] = '\0';
 509                dev->fsa_dev[index].valid = 0;
 510
 511                status = aac_probe_container(dev, index);
 512
 513                if (status < 0) {
 514                        printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
 515                        break;
 516                }
 517        }
 518        return status;
 519}
 520
 521static void get_container_name_callback(void *context, struct fib * fibptr)
 522{
 523        struct aac_get_name_resp * get_name_reply;
 524        struct scsi_cmnd * scsicmd;
 525
 526        scsicmd = (struct scsi_cmnd *) context;
 527
 528        if (!aac_valid_context(scsicmd, fibptr))
 529                return;
 530
 531        dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
 532        BUG_ON(fibptr == NULL);
 533
 534        get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
 535        /* Failure is irrelevant, using default value instead */
 536        if ((le32_to_cpu(get_name_reply->status) == CT_OK)
 537         && (get_name_reply->data[0] != '\0')) {
 538                char *sp = get_name_reply->data;
 539                int data_size = sizeof_field(struct aac_get_name_resp, data);
 540
 541                sp[data_size - 1] = '\0';
 542                while (*sp == ' ')
 543                        ++sp;
 544                if (*sp) {
 545                        struct inquiry_data inq;
 546                        char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
 547                        int count = sizeof(d);
 548                        char *dp = d;
 549                        do {
 550                                *dp++ = (*sp) ? *sp++ : ' ';
 551                        } while (--count > 0);
 552
 553                        scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
 554                        memcpy(inq.inqd_pid, d, sizeof(d));
 555                        scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
 556                }
 557        }
 558
 559        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 560
 561        aac_fib_complete(fibptr);
 562        scsicmd->scsi_done(scsicmd);
 563}
 564
 565/*
 566 *      aac_get_container_name  -       get container name, none blocking.
 567 */
 568static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 569{
 570        int status;
 571        int data_size;
 572        struct aac_get_name *dinfo;
 573        struct fib * cmd_fibcontext;
 574        struct aac_dev * dev;
 575
 576        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 577
 578        data_size = sizeof_field(struct aac_get_name_resp, data);
 579
 580        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
 581
 582        aac_fib_init(cmd_fibcontext);
 583        dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
 584        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 585
 586        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 587        dinfo->type = cpu_to_le32(CT_READ_NAME);
 588        dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
 589        dinfo->count = cpu_to_le32(data_size - 1);
 590
 591        status = aac_fib_send(ContainerCommand,
 592                  cmd_fibcontext,
 593                  sizeof(struct aac_get_name_resp),
 594                  FsaNormal,
 595                  0, 1,
 596                  (fib_callback)get_container_name_callback,
 597                  (void *) scsicmd);
 598
 599        /*
 600         *      Check that the command queued to the controller
 601         */
 602        if (status == -EINPROGRESS)
 603                return 0;
 604
 605        printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
 606        aac_fib_complete(cmd_fibcontext);
 607        return -1;
 608}
 609
 610static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
 611{
 612        struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
 613
 614        if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
 615                return aac_scsi_cmd(scsicmd);
 616
 617        scsicmd->result = DID_NO_CONNECT << 16;
 618        scsicmd->scsi_done(scsicmd);
 619        return 0;
 620}
 621
 622static void _aac_probe_container2(void * context, struct fib * fibptr)
 623{
 624        struct fsa_dev_info *fsa_dev_ptr;
 625        int (*callback)(struct scsi_cmnd *);
 626        struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
 627        int i;
 628
 629
 630        if (!aac_valid_context(scsicmd, fibptr))
 631                return;
 632
 633        scsicmd->SCp.Status = 0;
 634        fsa_dev_ptr = fibptr->dev->fsa_dev;
 635        if (fsa_dev_ptr) {
 636                struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
 637                __le32 sup_options2;
 638
 639                fsa_dev_ptr += scmd_id(scsicmd);
 640                sup_options2 =
 641                        fibptr->dev->supplement_adapter_info.supported_options2;
 642
 643                if ((le32_to_cpu(dresp->status) == ST_OK) &&
 644                    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
 645                    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
 646                        if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
 647                                dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
 648                                fsa_dev_ptr->block_size = 0x200;
 649                        } else {
 650                                fsa_dev_ptr->block_size =
 651                                        le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
 652                        }
 653                        for (i = 0; i < 16; i++)
 654                                fsa_dev_ptr->identifier[i] =
 655                                        dresp->mnt[0].fileinfo.bdevinfo
 656                                                                .identifier[i];
 657                        fsa_dev_ptr->valid = 1;
 658                        /* sense_key holds the current state of the spin-up */
 659                        if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
 660                                fsa_dev_ptr->sense_data.sense_key = NOT_READY;
 661                        else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
 662                                fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
 663                        fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
 664                        fsa_dev_ptr->size
 665                          = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
 666                            (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
 667                        fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
 668                }
 669                if ((fsa_dev_ptr->valid & 1) == 0)
 670                        fsa_dev_ptr->valid = 0;
 671                scsicmd->SCp.Status = le32_to_cpu(dresp->count);
 672        }
 673        aac_fib_complete(fibptr);
 674        aac_fib_free(fibptr);
 675        callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
 676        scsicmd->SCp.ptr = NULL;
 677        (*callback)(scsicmd);
 678        return;
 679}
 680
 681static void _aac_probe_container1(void * context, struct fib * fibptr)
 682{
 683        struct scsi_cmnd * scsicmd;
 684        struct aac_mount * dresp;
 685        struct aac_query_mount *dinfo;
 686        int status;
 687
 688        dresp = (struct aac_mount *) fib_data(fibptr);
 689        if (!aac_supports_2T(fibptr->dev)) {
 690                dresp->mnt[0].capacityhigh = 0;
 691                if ((le32_to_cpu(dresp->status) == ST_OK) &&
 692                        (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
 693                        _aac_probe_container2(context, fibptr);
 694                        return;
 695                }
 696        }
 697        scsicmd = (struct scsi_cmnd *) context;
 698
 699        if (!aac_valid_context(scsicmd, fibptr))
 700                return;
 701
 702        aac_fib_init(fibptr);
 703
 704        dinfo = (struct aac_query_mount *)fib_data(fibptr);
 705
 706        if (fibptr->dev->supplement_adapter_info.supported_options2 &
 707            AAC_OPTION_VARIABLE_BLOCK_SIZE)
 708                dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
 709        else
 710                dinfo->command = cpu_to_le32(VM_NameServe64);
 711
 712        dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 713        dinfo->type = cpu_to_le32(FT_FILESYS);
 714        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 715
 716        status = aac_fib_send(ContainerCommand,
 717                          fibptr,
 718                          sizeof(struct aac_query_mount),
 719                          FsaNormal,
 720                          0, 1,
 721                          _aac_probe_container2,
 722                          (void *) scsicmd);
 723        /*
 724         *      Check that the command queued to the controller
 725         */
 726        if (status < 0 && status != -EINPROGRESS) {
 727                /* Inherit results from VM_NameServe, if any */
 728                dresp->status = cpu_to_le32(ST_OK);
 729                _aac_probe_container2(context, fibptr);
 730        }
 731}
 732
 733static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
 734{
 735        struct fib * fibptr;
 736        int status = -ENOMEM;
 737
 738        if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
 739                struct aac_query_mount *dinfo;
 740
 741                aac_fib_init(fibptr);
 742
 743                dinfo = (struct aac_query_mount *)fib_data(fibptr);
 744
 745                if (fibptr->dev->supplement_adapter_info.supported_options2 &
 746                    AAC_OPTION_VARIABLE_BLOCK_SIZE)
 747                        dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
 748                else
 749                        dinfo->command = cpu_to_le32(VM_NameServe);
 750
 751                dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 752                dinfo->type = cpu_to_le32(FT_FILESYS);
 753                scsicmd->SCp.ptr = (char *)callback;
 754                scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 755
 756                status = aac_fib_send(ContainerCommand,
 757                          fibptr,
 758                          sizeof(struct aac_query_mount),
 759                          FsaNormal,
 760                          0, 1,
 761                          _aac_probe_container1,
 762                          (void *) scsicmd);
 763                /*
 764                 *      Check that the command queued to the controller
 765                 */
 766                if (status == -EINPROGRESS)
 767                        return 0;
 768
 769                if (status < 0) {
 770                        scsicmd->SCp.ptr = NULL;
 771                        aac_fib_complete(fibptr);
 772                        aac_fib_free(fibptr);
 773                }
 774        }
 775        if (status < 0) {
 776                struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
 777                if (fsa_dev_ptr) {
 778                        fsa_dev_ptr += scmd_id(scsicmd);
 779                        if ((fsa_dev_ptr->valid & 1) == 0) {
 780                                fsa_dev_ptr->valid = 0;
 781                                return (*callback)(scsicmd);
 782                        }
 783                }
 784        }
 785        return status;
 786}
 787
 788/**
 789 *      aac_probe_container             -       query a logical volume
 790 * @scsicmd: the scsi command block
 791 *
 792 *      Queries the controller about the given volume. The volume information
 793 *      is updated in the struct fsa_dev_info structure rather than returned.
 794 */
 795static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
 796{
 797        scsicmd->device = NULL;
 798        return 0;
 799}
 800
 801static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
 802{
 803        aac_probe_container_callback1(scsi_cmnd);
 804}
 805
 806int aac_probe_container(struct aac_dev *dev, int cid)
 807{
 808        struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
 809        struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
 810        int status;
 811
 812        if (!scsicmd || !scsidev) {
 813                kfree(scsicmd);
 814                kfree(scsidev);
 815                return -ENOMEM;
 816        }
 817        scsicmd->scsi_done = aac_probe_container_scsi_done;
 818
 819        scsicmd->device = scsidev;
 820        scsidev->sdev_state = 0;
 821        scsidev->id = cid;
 822        scsidev->host = dev->scsi_host_ptr;
 823
 824        if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
 825                while (scsicmd->device == scsidev)
 826                        schedule();
 827        kfree(scsidev);
 828        status = scsicmd->SCp.Status;
 829        kfree(scsicmd);
 830        return status;
 831}
 832
 833/* Local Structure to set SCSI inquiry data strings */
 834struct scsi_inq {
 835        char vid[8];         /* Vendor ID */
 836        char pid[16];        /* Product ID */
 837        char prl[4];         /* Product Revision Level */
 838};
 839
 840/**
 841 *      InqStrCopy      -       string merge
 842 *      @a:     string to copy from
 843 *      @b:     string to copy to
 844 *
 845 *      Copy a String from one location to another
 846 *      without copying \0
 847 */
 848
 849static void inqstrcpy(char *a, char *b)
 850{
 851
 852        while (*a != (char)0)
 853                *b++ = *a++;
 854}
 855
 856static char *container_types[] = {
 857        "None",
 858        "Volume",
 859        "Mirror",
 860        "Stripe",
 861        "RAID5",
 862        "SSRW",
 863        "SSRO",
 864        "Morph",
 865        "Legacy",
 866        "RAID4",
 867        "RAID10",
 868        "RAID00",
 869        "V-MIRRORS",
 870        "PSEUDO R4",
 871        "RAID50",
 872        "RAID5D",
 873        "RAID5D0",
 874        "RAID1E",
 875        "RAID6",
 876        "RAID60",
 877        "Unknown"
 878};
 879
 880char * get_container_type(unsigned tindex)
 881{
 882        if (tindex >= ARRAY_SIZE(container_types))
 883                tindex = ARRAY_SIZE(container_types) - 1;
 884        return container_types[tindex];
 885}
 886
 887/* Function: setinqstr
 888 *
 889 * Arguments: [1] pointer to void [1] int
 890 *
 891 * Purpose: Sets SCSI inquiry data strings for vendor, product
 892 * and revision level. Allows strings to be set in platform dependent
 893 * files instead of in OS dependent driver source.
 894 */
 895
 896static void setinqstr(struct aac_dev *dev, void *data, int tindex)
 897{
 898        struct scsi_inq *str;
 899        struct aac_supplement_adapter_info *sup_adap_info;
 900
 901        sup_adap_info = &dev->supplement_adapter_info;
 902        str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
 903        memset(str, ' ', sizeof(*str));
 904
 905        if (sup_adap_info->adapter_type_text[0]) {
 906                int c;
 907                char *cp;
 908                char *cname = kmemdup(sup_adap_info->adapter_type_text,
 909                                sizeof(sup_adap_info->adapter_type_text),
 910                                                                GFP_ATOMIC);
 911                if (!cname)
 912                        return;
 913
 914                cp = cname;
 915                if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
 916                        inqstrcpy("SMC", str->vid);
 917                else {
 918                        c = sizeof(str->vid);
 919                        while (*cp && *cp != ' ' && --c)
 920                                ++cp;
 921                        c = *cp;
 922                        *cp = '\0';
 923                        inqstrcpy(cname, str->vid);
 924                        *cp = c;
 925                        while (*cp && *cp != ' ')
 926                                ++cp;
 927                }
 928                while (*cp == ' ')
 929                        ++cp;
 930                /* last six chars reserved for vol type */
 931                if (strlen(cp) > sizeof(str->pid))
 932                        cp[sizeof(str->pid)] = '\0';
 933                inqstrcpy (cp, str->pid);
 934
 935                kfree(cname);
 936        } else {
 937                struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
 938
 939                inqstrcpy (mp->vname, str->vid);
 940                /* last six chars reserved for vol type */
 941                inqstrcpy (mp->model, str->pid);
 942        }
 943
 944        if (tindex < ARRAY_SIZE(container_types)){
 945                char *findit = str->pid;
 946
 947                for ( ; *findit != ' '; findit++); /* walk till we find a space */
 948                /* RAID is superfluous in the context of a RAID device */
 949                if (memcmp(findit-4, "RAID", 4) == 0)
 950                        *(findit -= 4) = ' ';
 951                if (((findit - str->pid) + strlen(container_types[tindex]))
 952                 < (sizeof(str->pid) + sizeof(str->prl)))
 953                        inqstrcpy (container_types[tindex], findit + 1);
 954        }
 955        inqstrcpy ("V1.0", str->prl);
 956}
 957
 958static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
 959                struct aac_dev *dev, struct scsi_cmnd *scsicmd)
 960{
 961        int container;
 962
 963        vpdpage83data->type3.codeset = 1;
 964        vpdpage83data->type3.identifiertype = 3;
 965        vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
 966                        - 4;
 967
 968        for (container = 0; container < dev->maximum_num_containers;
 969                        container++) {
 970
 971                if (scmd_id(scsicmd) == container) {
 972                        memcpy(vpdpage83data->type3.Identifier,
 973                                        dev->fsa_dev[container].identifier,
 974                                        16);
 975                        break;
 976                }
 977        }
 978}
 979
 980static void get_container_serial_callback(void *context, struct fib * fibptr)
 981{
 982        struct aac_get_serial_resp * get_serial_reply;
 983        struct scsi_cmnd * scsicmd;
 984
 985        BUG_ON(fibptr == NULL);
 986
 987        scsicmd = (struct scsi_cmnd *) context;
 988        if (!aac_valid_context(scsicmd, fibptr))
 989                return;
 990
 991        get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
 992        /* Failure is irrelevant, using default value instead */
 993        if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
 994                /*Check to see if it's for VPD 0x83 or 0x80 */
 995                if (scsicmd->cmnd[2] == 0x83) {
 996                        /* vpd page 0x83 - Device Identification Page */
 997                        struct aac_dev *dev;
 998                        int i;
 999                        struct tvpd_page83 vpdpage83data;
1000
1001                        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1002
1003                        memset(((u8 *)&vpdpage83data), 0,
1004                               sizeof(vpdpage83data));
1005
1006                        /* DIRECT_ACCESS_DEVIC */
1007                        vpdpage83data.DeviceType = 0;
1008                        /* DEVICE_CONNECTED */
1009                        vpdpage83data.DeviceTypeQualifier = 0;
1010                        /* VPD_DEVICE_IDENTIFIERS */
1011                        vpdpage83data.PageCode = 0x83;
1012                        vpdpage83data.reserved = 0;
1013                        vpdpage83data.PageLength =
1014                                sizeof(vpdpage83data.type1) +
1015                                sizeof(vpdpage83data.type2);
1016
1017                        /* VPD 83 Type 3 is not supported for ARC */
1018                        if (dev->sa_firmware)
1019                                vpdpage83data.PageLength +=
1020                                sizeof(vpdpage83data.type3);
1021
1022                        /* T10 Vendor Identifier Field Format */
1023                        /* VpdcodesetAscii */
1024                        vpdpage83data.type1.codeset = 2;
1025                        /* VpdIdentifierTypeVendorId */
1026                        vpdpage83data.type1.identifiertype = 1;
1027                        vpdpage83data.type1.identifierlength =
1028                                sizeof(vpdpage83data.type1) - 4;
1029
1030                        /* "ADAPTEC " for adaptec */
1031                        memcpy(vpdpage83data.type1.venid,
1032                                "ADAPTEC ",
1033                                sizeof(vpdpage83data.type1.venid));
1034                        memcpy(vpdpage83data.type1.productid,
1035                                "ARRAY           ",
1036                                sizeof(
1037                                vpdpage83data.type1.productid));
1038
1039                        /* Convert to ascii based serial number.
1040                         * The LSB is the the end.
1041                         */
1042                        for (i = 0; i < 8; i++) {
1043                                u8 temp =
1044                                        (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
1045                                if (temp  > 0x9) {
1046                                        vpdpage83data.type1.serialnumber[i] =
1047                                                        'A' + (temp - 0xA);
1048                                } else {
1049                                        vpdpage83data.type1.serialnumber[i] =
1050                                                        '0' + temp;
1051                                }
1052                        }
1053
1054                        /* VpdCodeSetBinary */
1055                        vpdpage83data.type2.codeset = 1;
1056                        /* VpdidentifiertypeEUI64 */
1057                        vpdpage83data.type2.identifiertype = 2;
1058                        vpdpage83data.type2.identifierlength =
1059                                sizeof(vpdpage83data.type2) - 4;
1060
1061                        vpdpage83data.type2.eu64id.venid[0] = 0xD0;
1062                        vpdpage83data.type2.eu64id.venid[1] = 0;
1063                        vpdpage83data.type2.eu64id.venid[2] = 0;
1064
1065                        vpdpage83data.type2.eu64id.Serial =
1066                                                        get_serial_reply->uid;
1067                        vpdpage83data.type2.eu64id.reserved = 0;
1068
1069                        /*
1070                         * VpdIdentifierTypeFCPHName
1071                         * VPD 0x83 Type 3 not supported for ARC
1072                         */
1073                        if (dev->sa_firmware) {
1074                                build_vpd83_type3(&vpdpage83data,
1075                                                dev, scsicmd);
1076                        }
1077
1078                        /* Move the inquiry data to the response buffer. */
1079                        scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
1080                                                 sizeof(vpdpage83data));
1081                } else {
1082                        /* It must be for VPD 0x80 */
1083                        char sp[13];
1084                        /* EVPD bit set */
1085                        sp[0] = INQD_PDT_DA;
1086                        sp[1] = scsicmd->cmnd[2];
1087                        sp[2] = 0;
1088                        sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
1089                                le32_to_cpu(get_serial_reply->uid));
1090                        scsi_sg_copy_from_buffer(scsicmd, sp,
1091                                                 sizeof(sp));
1092                }
1093        }
1094
1095        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1096
1097        aac_fib_complete(fibptr);
1098        scsicmd->scsi_done(scsicmd);
1099}
1100
1101/*
1102 *      aac_get_container_serial - get container serial, none blocking.
1103 */
1104static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
1105{
1106        int status;
1107        struct aac_get_serial *dinfo;
1108        struct fib * cmd_fibcontext;
1109        struct aac_dev * dev;
1110
1111        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1112
1113        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
1114
1115        aac_fib_init(cmd_fibcontext);
1116        dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
1117
1118        dinfo->command = cpu_to_le32(VM_ContainerConfig);
1119        dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
1120        dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
1121        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1122
1123        status = aac_fib_send(ContainerCommand,
1124                  cmd_fibcontext,
1125                  sizeof(struct aac_get_serial_resp),
1126                  FsaNormal,
1127                  0, 1,
1128                  (fib_callback) get_container_serial_callback,
1129                  (void *) scsicmd);
1130
1131        /*
1132         *      Check that the command queued to the controller
1133         */
1134        if (status == -EINPROGRESS)
1135                return 0;
1136
1137        printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
1138        aac_fib_complete(cmd_fibcontext);
1139        return -1;
1140}
1141
1142/* Function: setinqserial
1143 *
1144 * Arguments: [1] pointer to void [1] int
1145 *
1146 * Purpose: Sets SCSI Unit Serial number.
1147 *          This is a fake. We should read a proper
1148 *          serial number from the container. <SuSE>But
1149 *          without docs it's quite hard to do it :-)
1150 *          So this will have to do in the meantime.</SuSE>
1151 */
1152
1153static int setinqserial(struct aac_dev *dev, void *data, int cid)
1154{
1155        /*
1156         *      This breaks array migration.
1157         */
1158        return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
1159                        le32_to_cpu(dev->adapter_info.serial[0]), cid);
1160}
1161
1162static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
1163        u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
1164{
1165        u8 *sense_buf = (u8 *)sense_data;
1166        /* Sense data valid, err code 70h */
1167        sense_buf[0] = 0x70; /* No info field */
1168        sense_buf[1] = 0;       /* Segment number, always zero */
1169
1170        sense_buf[2] = sense_key;       /* Sense key */
1171
1172        sense_buf[12] = sense_code;     /* Additional sense code */
1173        sense_buf[13] = a_sense_code;   /* Additional sense code qualifier */
1174
1175        if (sense_key == ILLEGAL_REQUEST) {
1176                sense_buf[7] = 10;      /* Additional sense length */
1177
1178                sense_buf[15] = bit_pointer;
1179                /* Illegal parameter is in the parameter block */
1180                if (sense_code == SENCODE_INVALID_CDB_FIELD)
1181                        sense_buf[15] |= 0xc0;/* Std sense key specific field */
1182                /* Illegal parameter is in the CDB block */
1183                sense_buf[16] = field_pointer >> 8;     /* MSB */
1184                sense_buf[17] = field_pointer;          /* LSB */
1185        } else
1186                sense_buf[7] = 6;       /* Additional sense length */
1187}
1188
1189static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1190{
1191        if (lba & 0xffffffff00000000LL) {
1192                int cid = scmd_id(cmd);
1193                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
1194                cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1195                        SAM_STAT_CHECK_CONDITION;
1196                set_sense(&dev->fsa_dev[cid].sense_data,
1197                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1198                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1199                memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1200                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1201                             SCSI_SENSE_BUFFERSIZE));
1202                cmd->scsi_done(cmd);
1203                return 1;
1204        }
1205        return 0;
1206}
1207
1208static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1209{
1210        return 0;
1211}
1212
1213static void io_callback(void *context, struct fib * fibptr);
1214
1215static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1216{
1217        struct aac_dev *dev = fib->dev;
1218        u16 fibsize, command;
1219        long ret;
1220
1221        aac_fib_init(fib);
1222        if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1223                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1224                !dev->sync_mode) {
1225                struct aac_raw_io2 *readcmd2;
1226                readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
1227                memset(readcmd2, 0, sizeof(struct aac_raw_io2));
1228                readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1229                readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1230                readcmd2->byteCount = cpu_to_le32(count *
1231                        dev->fsa_dev[scmd_id(cmd)].block_size);
1232                readcmd2->cid = cpu_to_le16(scmd_id(cmd));
1233                readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
1234                ret = aac_build_sgraw2(cmd, readcmd2,
1235                                dev->scsi_host_ptr->sg_tablesize);
1236                if (ret < 0)
1237                        return ret;
1238                command = ContainerRawIo2;
1239                fibsize = sizeof(struct aac_raw_io2) +
1240                        ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1241        } else {
1242                struct aac_raw_io *readcmd;
1243                readcmd = (struct aac_raw_io *) fib_data(fib);
1244                readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1245                readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1246                readcmd->count = cpu_to_le32(count *
1247                        dev->fsa_dev[scmd_id(cmd)].block_size);
1248                readcmd->cid = cpu_to_le16(scmd_id(cmd));
1249                readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
1250                readcmd->bpTotal = 0;
1251                readcmd->bpComplete = 0;
1252                ret = aac_build_sgraw(cmd, &readcmd->sg);
1253                if (ret < 0)
1254                        return ret;
1255                command = ContainerRawIo;
1256                fibsize = sizeof(struct aac_raw_io) +
1257                        ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
1258        }
1259
1260        BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1261        /*
1262         *      Now send the Fib to the adapter
1263         */
1264        return aac_fib_send(command,
1265                          fib,
1266                          fibsize,
1267                          FsaNormal,
1268                          0, 1,
1269                          (fib_callback) io_callback,
1270                          (void *) cmd);
1271}
1272
1273static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1274{
1275        u16 fibsize;
1276        struct aac_read64 *readcmd;
1277        long ret;
1278
1279        aac_fib_init(fib);
1280        readcmd = (struct aac_read64 *) fib_data(fib);
1281        readcmd->command = cpu_to_le32(VM_CtHostRead64);
1282        readcmd->cid = cpu_to_le16(scmd_id(cmd));
1283        readcmd->sector_count = cpu_to_le16(count);
1284        readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1285        readcmd->pad   = 0;
1286        readcmd->flags = 0;
1287
1288        ret = aac_build_sg64(cmd, &readcmd->sg);
1289        if (ret < 0)
1290                return ret;
1291        fibsize = sizeof(struct aac_read64) +
1292                ((le32_to_cpu(readcmd->sg.count) - 1) *
1293                 sizeof (struct sgentry64));
1294        BUG_ON (fibsize > (fib->dev->max_fib_size -
1295                                sizeof(struct aac_fibhdr)));
1296        /*
1297         *      Now send the Fib to the adapter
1298         */
1299        return aac_fib_send(ContainerCommand64,
1300                          fib,
1301                          fibsize,
1302                          FsaNormal,
1303                          0, 1,
1304                          (fib_callback) io_callback,
1305                          (void *) cmd);
1306}
1307
1308static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1309{
1310        u16 fibsize;
1311        struct aac_read *readcmd;
1312        struct aac_dev *dev = fib->dev;
1313        long ret;
1314
1315        aac_fib_init(fib);
1316        readcmd = (struct aac_read *) fib_data(fib);
1317        readcmd->command = cpu_to_le32(VM_CtBlockRead);
1318        readcmd->cid = cpu_to_le32(scmd_id(cmd));
1319        readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1320        readcmd->count = cpu_to_le32(count *
1321                dev->fsa_dev[scmd_id(cmd)].block_size);
1322
1323        ret = aac_build_sg(cmd, &readcmd->sg);
1324        if (ret < 0)
1325                return ret;
1326        fibsize = sizeof(struct aac_read) +
1327                        ((le32_to_cpu(readcmd->sg.count) - 1) *
1328                         sizeof (struct sgentry));
1329        BUG_ON (fibsize > (fib->dev->max_fib_size -
1330                                sizeof(struct aac_fibhdr)));
1331        /*
1332         *      Now send the Fib to the adapter
1333         */
1334        return aac_fib_send(ContainerCommand,
1335                          fib,
1336                          fibsize,
1337                          FsaNormal,
1338                          0, 1,
1339                          (fib_callback) io_callback,
1340                          (void *) cmd);
1341}
1342
1343static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1344{
1345        struct aac_dev *dev = fib->dev;
1346        u16 fibsize, command;
1347        long ret;
1348
1349        aac_fib_init(fib);
1350        if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1351                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1352                !dev->sync_mode) {
1353                struct aac_raw_io2 *writecmd2;
1354                writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
1355                memset(writecmd2, 0, sizeof(struct aac_raw_io2));
1356                writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1357                writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1358                writecmd2->byteCount = cpu_to_le32(count *
1359                        dev->fsa_dev[scmd_id(cmd)].block_size);
1360                writecmd2->cid = cpu_to_le16(scmd_id(cmd));
1361                writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
1362                                                   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1363                        cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
1364                        cpu_to_le16(RIO2_IO_TYPE_WRITE);
1365                ret = aac_build_sgraw2(cmd, writecmd2,
1366                                dev->scsi_host_ptr->sg_tablesize);
1367                if (ret < 0)
1368                        return ret;
1369                command = ContainerRawIo2;
1370                fibsize = sizeof(struct aac_raw_io2) +
1371                        ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1372        } else {
1373                struct aac_raw_io *writecmd;
1374                writecmd = (struct aac_raw_io *) fib_data(fib);
1375                writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1376                writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1377                writecmd->count = cpu_to_le32(count *
1378                        dev->fsa_dev[scmd_id(cmd)].block_size);
1379                writecmd->cid = cpu_to_le16(scmd_id(cmd));
1380                writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
1381                                                   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1382                        cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
1383                        cpu_to_le16(RIO_TYPE_WRITE);
1384                writecmd->bpTotal = 0;
1385                writecmd->bpComplete = 0;
1386                ret = aac_build_sgraw(cmd, &writecmd->sg);
1387                if (ret < 0)
1388                        return ret;
1389                command = ContainerRawIo;
1390                fibsize = sizeof(struct aac_raw_io) +
1391                        ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
1392        }
1393
1394        BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1395        /*
1396         *      Now send the Fib to the adapter
1397         */
1398        return aac_fib_send(command,
1399                          fib,
1400                          fibsize,
1401                          FsaNormal,
1402                          0, 1,
1403                          (fib_callback) io_callback,
1404                          (void *) cmd);
1405}
1406
1407static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1408{
1409        u16 fibsize;
1410        struct aac_write64 *writecmd;
1411        long ret;
1412
1413        aac_fib_init(fib);
1414        writecmd = (struct aac_write64 *) fib_data(fib);
1415        writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1416        writecmd->cid = cpu_to_le16(scmd_id(cmd));
1417        writecmd->sector_count = cpu_to_le16(count);
1418        writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1419        writecmd->pad   = 0;
1420        writecmd->flags = 0;
1421
1422        ret = aac_build_sg64(cmd, &writecmd->sg);
1423        if (ret < 0)
1424                return ret;
1425        fibsize = sizeof(struct aac_write64) +
1426                ((le32_to_cpu(writecmd->sg.count) - 1) *
1427                 sizeof (struct sgentry64));
1428        BUG_ON (fibsize > (fib->dev->max_fib_size -
1429                                sizeof(struct aac_fibhdr)));
1430        /*
1431         *      Now send the Fib to the adapter
1432         */
1433        return aac_fib_send(ContainerCommand64,
1434                          fib,
1435                          fibsize,
1436                          FsaNormal,
1437                          0, 1,
1438                          (fib_callback) io_callback,
1439                          (void *) cmd);
1440}
1441
1442static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1443{
1444        u16 fibsize;
1445        struct aac_write *writecmd;
1446        struct aac_dev *dev = fib->dev;
1447        long ret;
1448
1449        aac_fib_init(fib);
1450        writecmd = (struct aac_write *) fib_data(fib);
1451        writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1452        writecmd->cid = cpu_to_le32(scmd_id(cmd));
1453        writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1454        writecmd->count = cpu_to_le32(count *
1455                dev->fsa_dev[scmd_id(cmd)].block_size);
1456        writecmd->sg.count = cpu_to_le32(1);
1457        /* ->stable is not used - it did mean which type of write */
1458
1459        ret = aac_build_sg(cmd, &writecmd->sg);
1460        if (ret < 0)
1461                return ret;
1462        fibsize = sizeof(struct aac_write) +
1463                ((le32_to_cpu(writecmd->sg.count) - 1) *
1464                 sizeof (struct sgentry));
1465        BUG_ON (fibsize > (fib->dev->max_fib_size -
1466                                sizeof(struct aac_fibhdr)));
1467        /*
1468         *      Now send the Fib to the adapter
1469         */
1470        return aac_fib_send(ContainerCommand,
1471                          fib,
1472                          fibsize,
1473                          FsaNormal,
1474                          0, 1,
1475                          (fib_callback) io_callback,
1476                          (void *) cmd);
1477}
1478
1479static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
1480{
1481        struct aac_srb * srbcmd;
1482        u32 flag;
1483        u32 timeout;
1484        struct aac_dev *dev = fib->dev;
1485
1486        aac_fib_init(fib);
1487        switch(cmd->sc_data_direction){
1488        case DMA_TO_DEVICE:
1489                flag = SRB_DataOut;
1490                break;
1491        case DMA_BIDIRECTIONAL:
1492                flag = SRB_DataIn | SRB_DataOut;
1493                break;
1494        case DMA_FROM_DEVICE:
1495                flag = SRB_DataIn;
1496                break;
1497        case DMA_NONE:
1498        default:        /* shuts up some versions of gcc */
1499                flag = SRB_NoDataXfer;
1500                break;
1501        }
1502
1503        srbcmd = (struct aac_srb*) fib_data(fib);
1504        srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1505        srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
1506        srbcmd->id       = cpu_to_le32(scmd_id(cmd));
1507        srbcmd->lun      = cpu_to_le32(cmd->device->lun);
1508        srbcmd->flags    = cpu_to_le32(flag);
1509        timeout = cmd->request->timeout/HZ;
1510        if (timeout == 0)
1511                timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
1512        srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
1513        srbcmd->retry_limit = 0; /* Obsolete parameter */
1514        srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
1515        return srbcmd;
1516}
1517
1518static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
1519                                                        struct scsi_cmnd *cmd)
1520{
1521        struct aac_hba_cmd_req *hbacmd;
1522        struct aac_dev *dev;
1523        int bus, target;
1524        u64 address;
1525
1526        dev = (struct aac_dev *)cmd->device->host->hostdata;
1527
1528        hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
1529        memset(hbacmd, 0, 96);  /* sizeof(*hbacmd) is not necessary */
1530        /* iu_type is a parameter of aac_hba_send */
1531        switch (cmd->sc_data_direction) {
1532        case DMA_TO_DEVICE:
1533                hbacmd->byte1 = 2;
1534                break;
1535        case DMA_FROM_DEVICE:
1536        case DMA_BIDIRECTIONAL:
1537                hbacmd->byte1 = 1;
1538                break;
1539        case DMA_NONE:
1540        default:
1541                break;
1542        }
1543        hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
1544
1545        bus = aac_logical_to_phys(scmd_channel(cmd));
1546        target = scmd_id(cmd);
1547        hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
1548
1549        /* we fill in reply_qid later in aac_src_deliver_message */
1550        /* we fill in iu_type, request_id later in aac_hba_send */
1551        /* we fill in emb_data_desc_count later in aac_build_sghba */
1552
1553        memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
1554        hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
1555
1556        address = (u64)fib->hw_error_pa;
1557        hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
1558        hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
1559        hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
1560
1561        return hbacmd;
1562}
1563
1564static void aac_srb_callback(void *context, struct fib * fibptr);
1565
1566static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
1567{
1568        u16 fibsize;
1569        struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1570        long ret;
1571
1572        ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
1573        if (ret < 0)
1574                return ret;
1575        srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1576
1577        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1578        memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1579        /*
1580         *      Build Scatter/Gather list
1581         */
1582        fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
1583                ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
1584                 sizeof (struct sgentry64));
1585        BUG_ON (fibsize > (fib->dev->max_fib_size -
1586                                sizeof(struct aac_fibhdr)));
1587
1588        /*
1589         *      Now send the Fib to the adapter
1590         */
1591        return aac_fib_send(ScsiPortCommand64, fib,
1592                                fibsize, FsaNormal, 0, 1,
1593                                  (fib_callback) aac_srb_callback,
1594                                  (void *) cmd);
1595}
1596
1597static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
1598{
1599        u16 fibsize;
1600        struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1601        long ret;
1602
1603        ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
1604        if (ret < 0)
1605                return ret;
1606        srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1607
1608        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1609        memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1610        /*
1611         *      Build Scatter/Gather list
1612         */
1613        fibsize = sizeof (struct aac_srb) +
1614                (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
1615                 sizeof (struct sgentry));
1616        BUG_ON (fibsize > (fib->dev->max_fib_size -
1617                                sizeof(struct aac_fibhdr)));
1618
1619        /*
1620         *      Now send the Fib to the adapter
1621         */
1622        return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
1623                                  (fib_callback) aac_srb_callback, (void *) cmd);
1624}
1625
1626static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
1627{
1628        if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
1629            (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
1630                return FAILED;
1631        return aac_scsi_32(fib, cmd);
1632}
1633
1634static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
1635{
1636        struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
1637        struct aac_dev *dev;
1638        long ret;
1639
1640        dev = (struct aac_dev *)cmd->device->host->hostdata;
1641
1642        ret = aac_build_sghba(cmd, hbacmd,
1643                dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
1644        if (ret < 0)
1645                return ret;
1646
1647        /*
1648         *      Now send the HBA command to the adapter
1649         */
1650        fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
1651                sizeof(struct aac_hba_sgl);
1652
1653        return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
1654                                  (fib_callback) aac_hba_callback,
1655                                  (void *) cmd);
1656}
1657
1658static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
1659        struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
1660{
1661        struct fib      *fibptr;
1662        dma_addr_t      addr;
1663        int             rcode;
1664        int             fibsize;
1665        struct aac_srb  *srb;
1666        struct aac_srb_reply *srb_reply;
1667        struct sgmap64  *sg64;
1668        u32 vbus;
1669        u32 vid;
1670
1671        if (!dev->sa_firmware)
1672                return 0;
1673
1674        /* allocate FIB */
1675        fibptr = aac_fib_alloc(dev);
1676        if (!fibptr)
1677                return -ENOMEM;
1678
1679        aac_fib_init(fibptr);
1680        fibptr->hw_fib_va->header.XferState &=
1681                ~cpu_to_le32(FastResponseCapable);
1682
1683        fibsize  = sizeof(struct aac_srb) - sizeof(struct sgentry) +
1684                                                sizeof(struct sgentry64);
1685
1686        /* allocate DMA buffer for response */
1687        addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
1688                                                        DMA_BIDIRECTIONAL);
1689        if (dma_mapping_error(&dev->pdev->dev, addr)) {
1690                rcode = -ENOMEM;
1691                goto fib_error;
1692        }
1693
1694        srb = fib_data(fibptr);
1695        memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
1696
1697        vbus = (u32)le16_to_cpu(
1698                        dev->supplement_adapter_info.virt_device_bus);
1699        vid  = (u32)le16_to_cpu(
1700                        dev->supplement_adapter_info.virt_device_target);
1701
1702        /* set the common request fields */
1703        srb->channel            = cpu_to_le32(vbus);
1704        srb->id                 = cpu_to_le32(vid);
1705        srb->lun                = 0;
1706        srb->function           = cpu_to_le32(SRBF_ExecuteScsi);
1707        srb->timeout            = 0;
1708        srb->retry_limit        = 0;
1709        srb->cdb_size           = cpu_to_le32(16);
1710        srb->count              = cpu_to_le32(xfer_len);
1711
1712        sg64 = (struct sgmap64 *)&srb->sg;
1713        sg64->count             = cpu_to_le32(1);
1714        sg64->sg[0].addr[1]     = cpu_to_le32(upper_32_bits(addr));
1715        sg64->sg[0].addr[0]     = cpu_to_le32(lower_32_bits(addr));
1716        sg64->sg[0].count       = cpu_to_le32(xfer_len);
1717
1718        /*
1719         * Copy the updated data for other dumping or other usage if needed
1720         */
1721        memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
1722
1723        /* issue request to the controller */
1724        rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
1725                                        1, 1, NULL, NULL);
1726
1727        if (rcode == -ERESTARTSYS)
1728                rcode = -ERESTART;
1729
1730        if (unlikely(rcode < 0))
1731                goto bmic_error;
1732
1733        srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
1734        memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
1735
1736bmic_error:
1737        dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
1738fib_error:
1739        aac_fib_complete(fibptr);
1740        aac_fib_free(fibptr);
1741        return rcode;
1742}
1743
1744static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
1745{
1746
1747        struct aac_ciss_identify_pd *identify_resp;
1748
1749        if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
1750                return;
1751
1752        identify_resp = dev->hba_map[bus][target].safw_identify_resp;
1753        if (identify_resp == NULL) {
1754                dev->hba_map[bus][target].qd_limit = 32;
1755                return;
1756        }
1757
1758        if (identify_resp->current_queue_depth_limit <= 0 ||
1759                identify_resp->current_queue_depth_limit > 255)
1760                dev->hba_map[bus][target].qd_limit = 32;
1761        else
1762                dev->hba_map[bus][target].qd_limit =
1763                        identify_resp->current_queue_depth_limit;
1764}
1765
1766static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
1767        struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
1768{
1769        int rcode = -ENOMEM;
1770        int datasize;
1771        struct aac_srb_unit srbu;
1772        struct aac_srb *srbcmd;
1773        struct aac_ciss_identify_pd *identify_reply;
1774
1775        datasize = sizeof(struct aac_ciss_identify_pd);
1776        identify_reply = kmalloc(datasize, GFP_KERNEL);
1777        if (!identify_reply)
1778                goto out;
1779
1780        memset(&srbu, 0, sizeof(struct aac_srb_unit));
1781
1782        srbcmd = &srbu.srb;
1783        srbcmd->flags   = cpu_to_le32(SRB_DataIn);
1784        srbcmd->cdb[0]  = 0x26;
1785        srbcmd->cdb[2]  = (u8)((AAC_MAX_LUN + target) & 0x00FF);
1786        srbcmd->cdb[6]  = CISS_IDENTIFY_PHYSICAL_DEVICE;
1787
1788        rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
1789        if (unlikely(rcode < 0))
1790                goto mem_free_all;
1791
1792        *identify_resp = identify_reply;
1793
1794out:
1795        return rcode;
1796mem_free_all:
1797        kfree(identify_reply);
1798        goto out;
1799}
1800
1801static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
1802{
1803        kfree(dev->safw_phys_luns);
1804        dev->safw_phys_luns = NULL;
1805}
1806
1807/**
1808 *      aac_get_safw_ciss_luns()        Process topology change
1809 *      @dev:           aac_dev structure
1810 *
1811 *      Execute a CISS REPORT PHYS LUNS and process the results into
1812 *      the current hba_map.
1813 */
1814static int aac_get_safw_ciss_luns(struct aac_dev *dev)
1815{
1816        int rcode = -ENOMEM;
1817        int datasize;
1818        struct aac_srb *srbcmd;
1819        struct aac_srb_unit srbu;
1820        struct aac_ciss_phys_luns_resp *phys_luns;
1821
1822        datasize = sizeof(struct aac_ciss_phys_luns_resp) +
1823                (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
1824        phys_luns = kmalloc(datasize, GFP_KERNEL);
1825        if (phys_luns == NULL)
1826                goto out;
1827
1828        memset(&srbu, 0, sizeof(struct aac_srb_unit));
1829
1830        srbcmd = &srbu.srb;
1831        srbcmd->flags   = cpu_to_le32(SRB_DataIn);
1832        srbcmd->cdb[0]  = CISS_REPORT_PHYSICAL_LUNS;
1833        srbcmd->cdb[1]  = 2; /* extended reporting */
1834        srbcmd->cdb[8]  = (u8)(datasize >> 8);
1835        srbcmd->cdb[9]  = (u8)(datasize);
1836
1837        rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
1838        if (unlikely(rcode < 0))
1839                goto mem_free_all;
1840
1841        if (phys_luns->resp_flag != 2) {
1842                rcode = -ENOMSG;
1843                goto mem_free_all;
1844        }
1845
1846        dev->safw_phys_luns = phys_luns;
1847
1848out:
1849        return rcode;
1850mem_free_all:
1851        kfree(phys_luns);
1852        goto out;
1853}
1854
1855static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
1856{
1857        return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
1858}
1859
1860static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
1861{
1862        return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
1863}
1864
1865static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
1866{
1867        return dev->safw_phys_luns->lun[lun].level2[0];
1868}
1869
1870static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
1871{
1872        return dev->safw_phys_luns->lun[lun].bus >> 6;
1873}
1874
1875static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
1876{
1877        return dev->safw_phys_luns->lun[lun].node_ident[9];
1878}
1879
1880static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
1881{
1882        return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
1883}
1884
1885static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
1886{
1887        return dev->safw_phys_luns->lun[lun].node_ident[8];
1888}
1889
1890static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
1891                                                int bus, int target)
1892{
1893        kfree(dev->hba_map[bus][target].safw_identify_resp);
1894        dev->hba_map[bus][target].safw_identify_resp = NULL;
1895}
1896
1897static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
1898        int lun_count)
1899{
1900        int luns;
1901        int i;
1902        u32 bus;
1903        u32 target;
1904
1905        luns = aac_get_safw_phys_lun_count(dev);
1906
1907        if (luns < lun_count)
1908                lun_count = luns;
1909        else if (lun_count < 0)
1910                lun_count = luns;
1911
1912        for (i = 0; i < lun_count; i++) {
1913                bus = aac_get_safw_phys_bus(dev, i);
1914                target = aac_get_safw_phys_target(dev, i);
1915
1916                aac_free_safw_identify_resp(dev, bus, target);
1917        }
1918}
1919
1920static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
1921{
1922        int i;
1923        int rcode = 0;
1924        u32 lun_count;
1925        u32 bus;
1926        u32 target;
1927        struct aac_ciss_identify_pd *identify_resp = NULL;
1928
1929        lun_count = aac_get_safw_phys_lun_count(dev);
1930
1931        for (i = 0; i < lun_count; ++i) {
1932
1933                bus = aac_get_safw_phys_bus(dev, i);
1934                target = aac_get_safw_phys_target(dev, i);
1935
1936                rcode = aac_issue_safw_bmic_identify(dev,
1937                                                &identify_resp, bus, target);
1938
1939                if (unlikely(rcode < 0))
1940                        goto free_identify_resp;
1941
1942                dev->hba_map[bus][target].safw_identify_resp = identify_resp;
1943        }
1944
1945out:
1946        return rcode;
1947free_identify_resp:
1948        aac_free_safw_all_identify_resp(dev, i);
1949        goto out;
1950}
1951
1952/**
1953 *      aac_set_safw_attr_all_targets-  update current hba map with data from FW
1954 *      @dev:   aac_dev structure
1955 *
1956 *      Update our hba map with the information gathered from the FW
1957 */
1958static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1959{
1960        /* ok and extended reporting */
1961        u32 lun_count, nexus;
1962        u32 i, bus, target;
1963        u8 expose_flag, attribs;
1964
1965        lun_count = aac_get_safw_phys_lun_count(dev);
1966
1967        dev->scan_counter++;
1968
1969        for (i = 0; i < lun_count; ++i) {
1970
1971                bus = aac_get_safw_phys_bus(dev, i);
1972                target = aac_get_safw_phys_target(dev, i);
1973                expose_flag = aac_get_safw_phys_expose_flag(dev, i);
1974                attribs = aac_get_safw_phys_attribs(dev, i);
1975                nexus = aac_get_safw_phys_nexus(dev, i);
1976
1977                if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
1978                        continue;
1979
1980                if (expose_flag != 0) {
1981                        dev->hba_map[bus][target].devtype =
1982                                AAC_DEVTYPE_RAID_MEMBER;
1983                        continue;
1984                }
1985
1986                if (nexus != 0 && (attribs & 8)) {
1987                        dev->hba_map[bus][target].devtype =
1988                                AAC_DEVTYPE_NATIVE_RAW;
1989                        dev->hba_map[bus][target].rmw_nexus =
1990                                        nexus;
1991                } else
1992                        dev->hba_map[bus][target].devtype =
1993                                AAC_DEVTYPE_ARC_RAW;
1994
1995                dev->hba_map[bus][target].scan_counter = dev->scan_counter;
1996
1997                aac_set_safw_target_qd(dev, bus, target);
1998        }
1999}
2000
2001static int aac_setup_safw_targets(struct aac_dev *dev)
2002{
2003        int rcode = 0;
2004
2005        rcode = aac_get_containers(dev);
2006        if (unlikely(rcode < 0))
2007                goto out;
2008
2009        rcode = aac_get_safw_ciss_luns(dev);
2010        if (unlikely(rcode < 0))
2011                goto out;
2012
2013        rcode = aac_get_safw_attr_all_targets(dev);
2014        if (unlikely(rcode < 0))
2015                goto free_ciss_luns;
2016
2017        aac_set_safw_attr_all_targets(dev);
2018
2019        aac_free_safw_all_identify_resp(dev, -1);
2020free_ciss_luns:
2021        aac_free_safw_ciss_luns(dev);
2022out:
2023        return rcode;
2024}
2025
2026int aac_setup_safw_adapter(struct aac_dev *dev)
2027{
2028        return aac_setup_safw_targets(dev);
2029}
2030
2031int aac_get_adapter_info(struct aac_dev* dev)
2032{
2033        struct fib* fibptr;
2034        int rcode;
2035        u32 tmp, bus, target;
2036        struct aac_adapter_info *info;
2037        struct aac_bus_info *command;
2038        struct aac_bus_info_response *bus_info;
2039
2040        if (!(fibptr = aac_fib_alloc(dev)))
2041                return -ENOMEM;
2042
2043        aac_fib_init(fibptr);
2044        info = (struct aac_adapter_info *) fib_data(fibptr);
2045        memset(info,0,sizeof(*info));
2046
2047        rcode = aac_fib_send(RequestAdapterInfo,
2048                         fibptr,
2049                         sizeof(*info),
2050                         FsaNormal,
2051                         -1, 1, /* First `interrupt' command uses special wait */
2052                         NULL,
2053                         NULL);
2054
2055        if (rcode < 0) {
2056                /* FIB should be freed only after
2057                 * getting the response from the F/W */
2058                if (rcode != -ERESTARTSYS) {
2059                        aac_fib_complete(fibptr);
2060                        aac_fib_free(fibptr);
2061                }
2062                return rcode;
2063        }
2064        memcpy(&dev->adapter_info, info, sizeof(*info));
2065
2066        dev->supplement_adapter_info.virt_device_bus = 0xffff;
2067        if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
2068                struct aac_supplement_adapter_info * sinfo;
2069
2070                aac_fib_init(fibptr);
2071
2072                sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
2073
2074                memset(sinfo,0,sizeof(*sinfo));
2075
2076                rcode = aac_fib_send(RequestSupplementAdapterInfo,
2077                                 fibptr,
2078                                 sizeof(*sinfo),
2079                                 FsaNormal,
2080                                 1, 1,
2081                                 NULL,
2082                                 NULL);
2083
2084                if (rcode >= 0)
2085                        memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
2086                if (rcode == -ERESTARTSYS) {
2087                        fibptr = aac_fib_alloc(dev);
2088                        if (!fibptr)
2089                                return -ENOMEM;
2090                }
2091
2092        }
2093
2094        /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
2095        for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
2096                for (target = 0; target < AAC_MAX_TARGETS; target++) {
2097                        dev->hba_map[bus][target].devtype = 0;
2098                        dev->hba_map[bus][target].qd_limit = 0;
2099                }
2100        }
2101
2102        /*
2103         * GetBusInfo
2104         */
2105
2106        aac_fib_init(fibptr);
2107
2108        bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
2109
2110        memset(bus_info, 0, sizeof(*bus_info));
2111
2112        command = (struct aac_bus_info *)bus_info;
2113
2114        command->Command = cpu_to_le32(VM_Ioctl);
2115        command->ObjType = cpu_to_le32(FT_DRIVE);
2116        command->MethodId = cpu_to_le32(1);
2117        command->CtlCmd = cpu_to_le32(GetBusInfo);
2118
2119        rcode = aac_fib_send(ContainerCommand,
2120                         fibptr,
2121                         sizeof (*bus_info),
2122                         FsaNormal,
2123                         1, 1,
2124                         NULL, NULL);
2125
2126        /* reasoned default */
2127        dev->maximum_num_physicals = 16;
2128        if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
2129                dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
2130                dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
2131        }
2132
2133        if (!dev->in_reset) {
2134                char buffer[16];
2135                tmp = le32_to_cpu(dev->adapter_info.kernelrev);
2136                printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
2137                        dev->name,
2138                        dev->id,
2139                        tmp>>24,
2140                        (tmp>>16)&0xff,
2141                        tmp&0xff,
2142                        le32_to_cpu(dev->adapter_info.kernelbuild),
2143                        (int)sizeof(dev->supplement_adapter_info.build_date),
2144                        dev->supplement_adapter_info.build_date);
2145                tmp = le32_to_cpu(dev->adapter_info.monitorrev);
2146                printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
2147                        dev->name, dev->id,
2148                        tmp>>24,(tmp>>16)&0xff,tmp&0xff,
2149                        le32_to_cpu(dev->adapter_info.monitorbuild));
2150                tmp = le32_to_cpu(dev->adapter_info.biosrev);
2151                printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
2152                        dev->name, dev->id,
2153                        tmp>>24,(tmp>>16)&0xff,tmp&0xff,
2154                        le32_to_cpu(dev->adapter_info.biosbuild));
2155                buffer[0] = '\0';
2156                if (aac_get_serial_number(
2157                  shost_to_class(dev->scsi_host_ptr), buffer))
2158                        printk(KERN_INFO "%s%d: serial %s",
2159                          dev->name, dev->id, buffer);
2160                if (dev->supplement_adapter_info.vpd_info.tsid[0]) {
2161                        printk(KERN_INFO "%s%d: TSID %.*s\n",
2162                          dev->name, dev->id,
2163                          (int)sizeof(dev->supplement_adapter_info
2164                                                        .vpd_info.tsid),
2165                                dev->supplement_adapter_info.vpd_info.tsid);
2166                }
2167                if (!aac_check_reset || ((aac_check_reset == 1) &&
2168                  (dev->supplement_adapter_info.supported_options2 &
2169                  AAC_OPTION_IGNORE_RESET))) {
2170                        printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
2171                          dev->name, dev->id);
2172                }
2173        }
2174
2175        dev->cache_protected = 0;
2176        dev->jbod = ((dev->supplement_adapter_info.feature_bits &
2177                AAC_FEATURE_JBOD) != 0);
2178        dev->nondasd_support = 0;
2179        dev->raid_scsi_mode = 0;
2180        if(dev->adapter_info.options & AAC_OPT_NONDASD)
2181                dev->nondasd_support = 1;
2182
2183        /*
2184         * If the firmware supports ROMB RAID/SCSI mode and we are currently
2185         * in RAID/SCSI mode, set the flag. For now if in this mode we will
2186         * force nondasd support on. If we decide to allow the non-dasd flag
2187         * additional changes changes will have to be made to support
2188         * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
2189         * changed to support the new dev->raid_scsi_mode flag instead of
2190         * leaching off of the dev->nondasd_support flag. Also in linit.c the
2191         * function aac_detect will have to be modified where it sets up the
2192         * max number of channels based on the aac->nondasd_support flag only.
2193         */
2194        if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
2195            (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
2196                dev->nondasd_support = 1;
2197                dev->raid_scsi_mode = 1;
2198        }
2199        if (dev->raid_scsi_mode != 0)
2200                printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
2201                                dev->name, dev->id);
2202
2203        if (nondasd != -1)
2204                dev->nondasd_support = (nondasd!=0);
2205        if (dev->nondasd_support && !dev->in_reset)
2206                printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
2207
2208        if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
2209                dev->needs_dac = 1;
2210        dev->dac_support = 0;
2211        if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
2212            (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
2213                if (!dev->in_reset)
2214                        printk(KERN_INFO "%s%d: 64bit support enabled.\n",
2215                                dev->name, dev->id);
2216                dev->dac_support = 1;
2217        }
2218
2219        if(dacmode != -1) {
2220                dev->dac_support = (dacmode!=0);
2221        }
2222
2223        /* avoid problems with AAC_QUIRK_SCSI_32 controllers */
2224        if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
2225                & AAC_QUIRK_SCSI_32)) {
2226                dev->nondasd_support = 0;
2227                dev->jbod = 0;
2228                expose_physicals = 0;
2229        }
2230
2231        if (dev->dac_support) {
2232                if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(64))) {
2233                        if (!dev->in_reset)
2234                                dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
2235                } else if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(32))) {
2236                        dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
2237                        dev->dac_support = 0;
2238                } else {
2239                        dev_info(&dev->pdev->dev, "No suitable DMA available\n");
2240                        rcode = -ENOMEM;
2241                }
2242        }
2243        /*
2244         * Deal with configuring for the individualized limits of each packet
2245         * interface.
2246         */
2247        dev->a_ops.adapter_scsi = (dev->dac_support)
2248          ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
2249                                ? aac_scsi_32_64
2250                                : aac_scsi_64)
2251                                : aac_scsi_32;
2252        if (dev->raw_io_interface) {
2253                dev->a_ops.adapter_bounds = (dev->raw_io_64)
2254                                        ? aac_bounds_64
2255                                        : aac_bounds_32;
2256                dev->a_ops.adapter_read = aac_read_raw_io;
2257                dev->a_ops.adapter_write = aac_write_raw_io;
2258        } else {
2259                dev->a_ops.adapter_bounds = aac_bounds_32;
2260                dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
2261                        sizeof(struct aac_fibhdr) -
2262                        sizeof(struct aac_write) + sizeof(struct sgentry)) /
2263                                sizeof(struct sgentry);
2264                if (dev->dac_support) {
2265                        dev->a_ops.adapter_read = aac_read_block64;
2266                        dev->a_ops.adapter_write = aac_write_block64;
2267                        /*
2268                         * 38 scatter gather elements
2269                         */
2270                        dev->scsi_host_ptr->sg_tablesize =
2271                                (dev->max_fib_size -
2272                                sizeof(struct aac_fibhdr) -
2273                                sizeof(struct aac_write64) +
2274                                sizeof(struct sgentry64)) /
2275                                        sizeof(struct sgentry64);
2276                } else {
2277                        dev->a_ops.adapter_read = aac_read_block;
2278                        dev->a_ops.adapter_write = aac_write_block;
2279                }
2280                dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
2281                if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
2282                        /*
2283                         * Worst case size that could cause sg overflow when
2284                         * we break up SG elements that are larger than 64KB.
2285                         * Would be nice if we could tell the SCSI layer what
2286                         * the maximum SG element size can be. Worst case is
2287                         * (sg_tablesize-1) 4KB elements with one 64KB
2288                         * element.
2289                         *      32bit -> 468 or 238KB   64bit -> 424 or 212KB
2290                         */
2291                        dev->scsi_host_ptr->max_sectors =
2292                          (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
2293                }
2294        }
2295        if (!dev->sync_mode && dev->sa_firmware &&
2296                dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
2297                dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
2298                        HBA_MAX_SG_SEPARATE;
2299
2300        /* FIB should be freed only after getting the response from the F/W */
2301        if (rcode != -ERESTARTSYS) {
2302                aac_fib_complete(fibptr);
2303                aac_fib_free(fibptr);
2304        }
2305
2306        return rcode;
2307}
2308
2309
2310static void io_callback(void *context, struct fib * fibptr)
2311{
2312        struct aac_dev *dev;
2313        struct aac_read_reply *readreply;
2314        struct scsi_cmnd *scsicmd;
2315        u32 cid;
2316
2317        scsicmd = (struct scsi_cmnd *) context;
2318
2319        if (!aac_valid_context(scsicmd, fibptr))
2320                return;
2321
2322        dev = fibptr->dev;
2323        cid = scmd_id(scsicmd);
2324
2325        if (nblank(dprintk(x))) {
2326                u64 lba;
2327                switch (scsicmd->cmnd[0]) {
2328                case WRITE_6:
2329                case READ_6:
2330                        lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2331                            (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2332                        break;
2333                case WRITE_16:
2334                case READ_16:
2335                        lba = ((u64)scsicmd->cmnd[2] << 56) |
2336                              ((u64)scsicmd->cmnd[3] << 48) |
2337                              ((u64)scsicmd->cmnd[4] << 40) |
2338                              ((u64)scsicmd->cmnd[5] << 32) |
2339                              ((u64)scsicmd->cmnd[6] << 24) |
2340                              (scsicmd->cmnd[7] << 16) |
2341                              (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2342                        break;
2343                case WRITE_12:
2344                case READ_12:
2345                        lba = ((u64)scsicmd->cmnd[2] << 24) |
2346                              (scsicmd->cmnd[3] << 16) |
2347                              (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2348                        break;
2349                default:
2350                        lba = ((u64)scsicmd->cmnd[2] << 24) |
2351                               (scsicmd->cmnd[3] << 16) |
2352                               (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2353                        break;
2354                }
2355                printk(KERN_DEBUG
2356                  "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
2357                  smp_processor_id(), (unsigned long long)lba, jiffies);
2358        }
2359
2360        BUG_ON(fibptr == NULL);
2361
2362        scsi_dma_unmap(scsicmd);
2363
2364        readreply = (struct aac_read_reply *)fib_data(fibptr);
2365        switch (le32_to_cpu(readreply->status)) {
2366        case ST_OK:
2367                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2368                        SAM_STAT_GOOD;
2369                dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
2370                break;
2371        case ST_NOT_READY:
2372                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2373                        SAM_STAT_CHECK_CONDITION;
2374                set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
2375                  SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
2376                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2377                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2378                             SCSI_SENSE_BUFFERSIZE));
2379                break;
2380        case ST_MEDERR:
2381                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2382                        SAM_STAT_CHECK_CONDITION;
2383                set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
2384                  SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
2385                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2386                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2387                             SCSI_SENSE_BUFFERSIZE));
2388                break;
2389        default:
2390#ifdef AAC_DETAILED_STATUS_INFO
2391                printk(KERN_WARNING "io_callback: io failed, status = %d\n",
2392                  le32_to_cpu(readreply->status));
2393#endif
2394                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2395                        SAM_STAT_CHECK_CONDITION;
2396                set_sense(&dev->fsa_dev[cid].sense_data,
2397                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2398                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2399                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2400                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2401                             SCSI_SENSE_BUFFERSIZE));
2402                break;
2403        }
2404        aac_fib_complete(fibptr);
2405
2406        scsicmd->scsi_done(scsicmd);
2407}
2408
2409static int aac_read(struct scsi_cmnd * scsicmd)
2410{
2411        u64 lba;
2412        u32 count;
2413        int status;
2414        struct aac_dev *dev;
2415        struct fib * cmd_fibcontext;
2416        int cid;
2417
2418        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2419        /*
2420         *      Get block address and transfer length
2421         */
2422        switch (scsicmd->cmnd[0]) {
2423        case READ_6:
2424                dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
2425
2426                lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2427                        (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2428                count = scsicmd->cmnd[4];
2429
2430                if (count == 0)
2431                        count = 256;
2432                break;
2433        case READ_16:
2434                dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
2435
2436                lba =   ((u64)scsicmd->cmnd[2] << 56) |
2437                        ((u64)scsicmd->cmnd[3] << 48) |
2438                        ((u64)scsicmd->cmnd[4] << 40) |
2439                        ((u64)scsicmd->cmnd[5] << 32) |
2440                        ((u64)scsicmd->cmnd[6] << 24) |
2441                        (scsicmd->cmnd[7] << 16) |
2442                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2443                count = (scsicmd->cmnd[10] << 24) |
2444                        (scsicmd->cmnd[11] << 16) |
2445                        (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2446                break;
2447        case READ_12:
2448                dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
2449
2450                lba = ((u64)scsicmd->cmnd[2] << 24) |
2451                        (scsicmd->cmnd[3] << 16) |
2452                        (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2453                count = (scsicmd->cmnd[6] << 24) |
2454                        (scsicmd->cmnd[7] << 16) |
2455                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2456                break;
2457        default:
2458                dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
2459
2460                lba = ((u64)scsicmd->cmnd[2] << 24) |
2461                        (scsicmd->cmnd[3] << 16) |
2462                        (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2463                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2464                break;
2465        }
2466
2467        if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2468                cid = scmd_id(scsicmd);
2469                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2470                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2471                        SAM_STAT_CHECK_CONDITION;
2472                set_sense(&dev->fsa_dev[cid].sense_data,
2473                          ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
2474                          ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2475                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2476                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2477                             SCSI_SENSE_BUFFERSIZE));
2478                scsicmd->scsi_done(scsicmd);
2479                return 0;
2480        }
2481
2482        dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
2483          smp_processor_id(), (unsigned long long)lba, jiffies));
2484        if (aac_adapter_bounds(dev,scsicmd,lba))
2485                return 0;
2486        /*
2487         *      Alocate and initialize a Fib
2488         */
2489        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2490        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2491        status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
2492
2493        /*
2494         *      Check that the command queued to the controller
2495         */
2496        if (status == -EINPROGRESS)
2497                return 0;
2498
2499        printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
2500        /*
2501         *      For some reason, the Fib didn't queue, return QUEUE_FULL
2502         */
2503        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2504        scsicmd->scsi_done(scsicmd);
2505        aac_fib_complete(cmd_fibcontext);
2506        aac_fib_free(cmd_fibcontext);
2507        return 0;
2508}
2509
2510static int aac_write(struct scsi_cmnd * scsicmd)
2511{
2512        u64 lba;
2513        u32 count;
2514        int fua;
2515        int status;
2516        struct aac_dev *dev;
2517        struct fib * cmd_fibcontext;
2518        int cid;
2519
2520        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2521        /*
2522         *      Get block address and transfer length
2523         */
2524        if (scsicmd->cmnd[0] == WRITE_6)        /* 6 byte command */
2525        {
2526                lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2527                count = scsicmd->cmnd[4];
2528                if (count == 0)
2529                        count = 256;
2530                fua = 0;
2531        } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
2532                dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
2533
2534                lba =   ((u64)scsicmd->cmnd[2] << 56) |
2535                        ((u64)scsicmd->cmnd[3] << 48) |
2536                        ((u64)scsicmd->cmnd[4] << 40) |
2537                        ((u64)scsicmd->cmnd[5] << 32) |
2538                        ((u64)scsicmd->cmnd[6] << 24) |
2539                        (scsicmd->cmnd[7] << 16) |
2540                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2541                count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
2542                        (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2543                fua = scsicmd->cmnd[1] & 0x8;
2544        } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
2545                dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
2546
2547                lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
2548                    | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2549                count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
2550                      | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2551                fua = scsicmd->cmnd[1] & 0x8;
2552        } else {
2553                dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
2554                lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2555                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2556                fua = scsicmd->cmnd[1] & 0x8;
2557        }
2558
2559        if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2560                cid = scmd_id(scsicmd);
2561                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2562                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2563                        SAM_STAT_CHECK_CONDITION;
2564                set_sense(&dev->fsa_dev[cid].sense_data,
2565                          ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
2566                          ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2567                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2568                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2569                             SCSI_SENSE_BUFFERSIZE));
2570                scsicmd->scsi_done(scsicmd);
2571                return 0;
2572        }
2573
2574        dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
2575          smp_processor_id(), (unsigned long long)lba, jiffies));
2576        if (aac_adapter_bounds(dev,scsicmd,lba))
2577                return 0;
2578        /*
2579         *      Allocate and initialize a Fib then setup a BlockWrite command
2580         */
2581        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2582        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2583        status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
2584
2585        /*
2586         *      Check that the command queued to the controller
2587         */
2588        if (status == -EINPROGRESS)
2589                return 0;
2590
2591        printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
2592        /*
2593         *      For some reason, the Fib didn't queue, return QUEUE_FULL
2594         */
2595        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2596        scsicmd->scsi_done(scsicmd);
2597
2598        aac_fib_complete(cmd_fibcontext);
2599        aac_fib_free(cmd_fibcontext);
2600        return 0;
2601}
2602
2603static void synchronize_callback(void *context, struct fib *fibptr)
2604{
2605        struct aac_synchronize_reply *synchronizereply;
2606        struct scsi_cmnd *cmd = context;
2607
2608        if (!aac_valid_context(cmd, fibptr))
2609                return;
2610
2611        dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
2612                                smp_processor_id(), jiffies));
2613        BUG_ON(fibptr == NULL);
2614
2615
2616        synchronizereply = fib_data(fibptr);
2617        if (le32_to_cpu(synchronizereply->status) == CT_OK)
2618                cmd->result = DID_OK << 16 |
2619                        COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2620        else {
2621                struct scsi_device *sdev = cmd->device;
2622                struct aac_dev *dev = fibptr->dev;
2623                u32 cid = sdev_id(sdev);
2624                printk(KERN_WARNING
2625                     "synchronize_callback: synchronize failed, status = %d\n",
2626                     le32_to_cpu(synchronizereply->status));
2627                cmd->result = DID_OK << 16 |
2628                        COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2629                set_sense(&dev->fsa_dev[cid].sense_data,
2630                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2631                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2632                memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2633                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2634                             SCSI_SENSE_BUFFERSIZE));
2635        }
2636
2637        aac_fib_complete(fibptr);
2638        aac_fib_free(fibptr);
2639        cmd->scsi_done(cmd);
2640}
2641
2642static int aac_synchronize(struct scsi_cmnd *scsicmd)
2643{
2644        int status;
2645        struct fib *cmd_fibcontext;
2646        struct aac_synchronize *synchronizecmd;
2647        struct scsi_device *sdev = scsicmd->device;
2648        struct aac_dev *aac;
2649
2650        aac = (struct aac_dev *)sdev->host->hostdata;
2651        if (aac->in_reset)
2652                return SCSI_MLQUEUE_HOST_BUSY;
2653
2654        /*
2655         *      Allocate and initialize a Fib
2656         */
2657        cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
2658
2659        aac_fib_init(cmd_fibcontext);
2660
2661        synchronizecmd = fib_data(cmd_fibcontext);
2662        synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
2663        synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
2664        synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
2665        synchronizecmd->count =
2666             cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
2667        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2668
2669        /*
2670         *      Now send the Fib to the adapter
2671         */
2672        status = aac_fib_send(ContainerCommand,
2673                  cmd_fibcontext,
2674                  sizeof(struct aac_synchronize),
2675                  FsaNormal,
2676                  0, 1,
2677                  (fib_callback)synchronize_callback,
2678                  (void *)scsicmd);
2679
2680        /*
2681         *      Check that the command queued to the controller
2682         */
2683        if (status == -EINPROGRESS)
2684                return 0;
2685
2686        printk(KERN_WARNING
2687                "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
2688        aac_fib_complete(cmd_fibcontext);
2689        aac_fib_free(cmd_fibcontext);
2690        return SCSI_MLQUEUE_HOST_BUSY;
2691}
2692
2693static void aac_start_stop_callback(void *context, struct fib *fibptr)
2694{
2695        struct scsi_cmnd *scsicmd = context;
2696
2697        if (!aac_valid_context(scsicmd, fibptr))
2698                return;
2699
2700        BUG_ON(fibptr == NULL);
2701
2702        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2703
2704        aac_fib_complete(fibptr);
2705        aac_fib_free(fibptr);
2706        scsicmd->scsi_done(scsicmd);
2707}
2708
2709static int aac_start_stop(struct scsi_cmnd *scsicmd)
2710{
2711        int status;
2712        struct fib *cmd_fibcontext;
2713        struct aac_power_management *pmcmd;
2714        struct scsi_device *sdev = scsicmd->device;
2715        struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
2716
2717        if (!(aac->supplement_adapter_info.supported_options2 &
2718              AAC_OPTION_POWER_MANAGEMENT)) {
2719                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2720                                  SAM_STAT_GOOD;
2721                scsicmd->scsi_done(scsicmd);
2722                return 0;
2723        }
2724
2725        if (aac->in_reset)
2726                return SCSI_MLQUEUE_HOST_BUSY;
2727
2728        /*
2729         *      Allocate and initialize a Fib
2730         */
2731        cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
2732
2733        aac_fib_init(cmd_fibcontext);
2734
2735        pmcmd = fib_data(cmd_fibcontext);
2736        pmcmd->command = cpu_to_le32(VM_ContainerConfig);
2737        pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
2738        /* Eject bit ignored, not relevant */
2739        pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
2740                cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
2741        pmcmd->cid = cpu_to_le32(sdev_id(sdev));
2742        pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
2743                cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
2744        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2745
2746        /*
2747         *      Now send the Fib to the adapter
2748         */
2749        status = aac_fib_send(ContainerCommand,
2750                  cmd_fibcontext,
2751                  sizeof(struct aac_power_management),
2752                  FsaNormal,
2753                  0, 1,
2754                  (fib_callback)aac_start_stop_callback,
2755                  (void *)scsicmd);
2756
2757        /*
2758         *      Check that the command queued to the controller
2759         */
2760        if (status == -EINPROGRESS)
2761                return 0;
2762
2763        aac_fib_complete(cmd_fibcontext);
2764        aac_fib_free(cmd_fibcontext);
2765        return SCSI_MLQUEUE_HOST_BUSY;
2766}
2767
2768/**
2769 *      aac_scsi_cmd()          -       Process SCSI command
2770 *      @scsicmd:               SCSI command block
2771 *
2772 *      Emulate a SCSI command and queue the required request for the
2773 *      aacraid firmware.
2774 */
2775
2776int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2777{
2778        u32 cid, bus;
2779        struct Scsi_Host *host = scsicmd->device->host;
2780        struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2781        struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
2782
2783        if (fsa_dev_ptr == NULL)
2784                return -1;
2785        /*
2786         *      If the bus, id or lun is out of range, return fail
2787         *      Test does not apply to ID 16, the pseudo id for the controller
2788         *      itself.
2789         */
2790        cid = scmd_id(scsicmd);
2791        if (cid != host->this_id) {
2792                if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
2793                        if((cid >= dev->maximum_num_containers) ||
2794                                        (scsicmd->device->lun != 0)) {
2795                                scsicmd->result = DID_NO_CONNECT << 16;
2796                                goto scsi_done_ret;
2797                        }
2798
2799                        /*
2800                         *      If the target container doesn't exist, it may have
2801                         *      been newly created
2802                         */
2803                        if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
2804                          (fsa_dev_ptr[cid].sense_data.sense_key ==
2805                           NOT_READY)) {
2806                                switch (scsicmd->cmnd[0]) {
2807                                case SERVICE_ACTION_IN_16:
2808                                        if (!(dev->raw_io_interface) ||
2809                                            !(dev->raw_io_64) ||
2810                                            ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
2811                                                break;
2812                                        fallthrough;
2813                                case INQUIRY:
2814                                case READ_CAPACITY:
2815                                case TEST_UNIT_READY:
2816                                        if (dev->in_reset)
2817                                                return -1;
2818                                        return _aac_probe_container(scsicmd,
2819                                                        aac_probe_container_callback2);
2820                                default:
2821                                        break;
2822                                }
2823                        }
2824                } else {  /* check for physical non-dasd devices */
2825                        bus = aac_logical_to_phys(scmd_channel(scsicmd));
2826
2827                        if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2828                                dev->hba_map[bus][cid].devtype
2829                                        == AAC_DEVTYPE_NATIVE_RAW) {
2830                                if (dev->in_reset)
2831                                        return -1;
2832                                return aac_send_hba_fib(scsicmd);
2833                        } else if (dev->nondasd_support || expose_physicals ||
2834                                dev->jbod) {
2835                                if (dev->in_reset)
2836                                        return -1;
2837                                return aac_send_srb_fib(scsicmd);
2838                        } else {
2839                                scsicmd->result = DID_NO_CONNECT << 16;
2840                                goto scsi_done_ret;
2841                        }
2842                }
2843        }
2844        /*
2845         * else Command for the controller itself
2846         */
2847        else if ((scsicmd->cmnd[0] != INQUIRY) &&       /* only INQUIRY & TUR cmnd supported for controller */
2848                (scsicmd->cmnd[0] != TEST_UNIT_READY))
2849        {
2850                dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
2851                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2852                set_sense(&dev->fsa_dev[cid].sense_data,
2853                  ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2854                  ASENCODE_INVALID_COMMAND, 0, 0);
2855                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2856                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2857                             SCSI_SENSE_BUFFERSIZE));
2858                goto scsi_done_ret;
2859        }
2860
2861        switch (scsicmd->cmnd[0]) {
2862        case READ_6:
2863        case READ_10:
2864        case READ_12:
2865        case READ_16:
2866                if (dev->in_reset)
2867                        return -1;
2868                return aac_read(scsicmd);
2869
2870        case WRITE_6:
2871        case WRITE_10:
2872        case WRITE_12:
2873        case WRITE_16:
2874                if (dev->in_reset)
2875                        return -1;
2876                return aac_write(scsicmd);
2877
2878        case SYNCHRONIZE_CACHE:
2879                if (((aac_cache & 6) == 6) && dev->cache_protected) {
2880                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2881                                          SAM_STAT_GOOD;
2882                        break;
2883                }
2884                /* Issue FIB to tell Firmware to flush it's cache */
2885                if ((aac_cache & 6) != 2)
2886                        return aac_synchronize(scsicmd);
2887                fallthrough;
2888        case INQUIRY:
2889        {
2890                struct inquiry_data inq_data;
2891
2892                dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
2893                memset(&inq_data, 0, sizeof (struct inquiry_data));
2894
2895                if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
2896                        char *arr = (char *)&inq_data;
2897
2898                        /* EVPD bit set */
2899                        arr[0] = (scmd_id(scsicmd) == host->this_id) ?
2900                          INQD_PDT_PROC : INQD_PDT_DA;
2901                        if (scsicmd->cmnd[2] == 0) {
2902                                /* supported vital product data pages */
2903                                arr[3] = 3;
2904                                arr[4] = 0x0;
2905                                arr[5] = 0x80;
2906                                arr[6] = 0x83;
2907                                arr[1] = scsicmd->cmnd[2];
2908                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2909                                                         sizeof(inq_data));
2910                                scsicmd->result = DID_OK << 16 |
2911                                                  COMMAND_COMPLETE << 8 |
2912                                                  SAM_STAT_GOOD;
2913                        } else if (scsicmd->cmnd[2] == 0x80) {
2914                                /* unit serial number page */
2915                                arr[3] = setinqserial(dev, &arr[4],
2916                                  scmd_id(scsicmd));
2917                                arr[1] = scsicmd->cmnd[2];
2918                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2919                                                         sizeof(inq_data));
2920                                if (aac_wwn != 2)
2921                                        return aac_get_container_serial(
2922                                                scsicmd);
2923                                scsicmd->result = DID_OK << 16 |
2924                                                  COMMAND_COMPLETE << 8 |
2925                                                  SAM_STAT_GOOD;
2926                        } else if (scsicmd->cmnd[2] == 0x83) {
2927                                /* vpd page 0x83 - Device Identification Page */
2928                                char *sno = (char *)&inq_data;
2929                                sno[3] = setinqserial(dev, &sno[4],
2930                                                      scmd_id(scsicmd));
2931                                if (aac_wwn != 2)
2932                                        return aac_get_container_serial(
2933                                                scsicmd);
2934                                scsicmd->result = DID_OK << 16 |
2935                                                  COMMAND_COMPLETE << 8 |
2936                                                  SAM_STAT_GOOD;
2937                        } else {
2938                                /* vpd page not implemented */
2939                                scsicmd->result = DID_OK << 16 |
2940                                  COMMAND_COMPLETE << 8 |
2941                                  SAM_STAT_CHECK_CONDITION;
2942                                set_sense(&dev->fsa_dev[cid].sense_data,
2943                                  ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
2944                                  ASENCODE_NO_SENSE, 7, 2);
2945                                memcpy(scsicmd->sense_buffer,
2946                                  &dev->fsa_dev[cid].sense_data,
2947                                  min_t(size_t,
2948                                        sizeof(dev->fsa_dev[cid].sense_data),
2949                                        SCSI_SENSE_BUFFERSIZE));
2950                        }
2951                        break;
2952                }
2953                inq_data.inqd_ver = 2;  /* claim compliance to SCSI-2 */
2954                inq_data.inqd_rdf = 2;  /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
2955                inq_data.inqd_len = 31;
2956                /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
2957                inq_data.inqd_pad2= 0x32 ;       /*WBus16|Sync|CmdQue */
2958                /*
2959                 *      Set the Vendor, Product, and Revision Level
2960                 *      see: <vendor>.c i.e. aac.c
2961                 */
2962                if (cid == host->this_id) {
2963                        setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
2964                        inq_data.inqd_pdt = INQD_PDT_PROC;      /* Processor device */
2965                        scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2966                                                 sizeof(inq_data));
2967                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2968                                          SAM_STAT_GOOD;
2969                        break;
2970                }
2971                if (dev->in_reset)
2972                        return -1;
2973                setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
2974                inq_data.inqd_pdt = INQD_PDT_DA;        /* Direct/random access device */
2975                scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
2976                return aac_get_container_name(scsicmd);
2977        }
2978        case SERVICE_ACTION_IN_16:
2979                if (!(dev->raw_io_interface) ||
2980                    !(dev->raw_io_64) ||
2981                    ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
2982                        break;
2983        {
2984                u64 capacity;
2985                char cp[13];
2986                unsigned int alloc_len;
2987
2988                dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
2989                capacity = fsa_dev_ptr[cid].size - 1;
2990                cp[0] = (capacity >> 56) & 0xff;
2991                cp[1] = (capacity >> 48) & 0xff;
2992                cp[2] = (capacity >> 40) & 0xff;
2993                cp[3] = (capacity >> 32) & 0xff;
2994                cp[4] = (capacity >> 24) & 0xff;
2995                cp[5] = (capacity >> 16) & 0xff;
2996                cp[6] = (capacity >> 8) & 0xff;
2997                cp[7] = (capacity >> 0) & 0xff;
2998                cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
2999                cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3000                cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3001                cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
3002                cp[12] = 0;
3003
3004                alloc_len = ((scsicmd->cmnd[10] << 24)
3005                             + (scsicmd->cmnd[11] << 16)
3006                             + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
3007
3008                alloc_len = min_t(size_t, alloc_len, sizeof(cp));
3009                scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
3010                if (alloc_len < scsi_bufflen(scsicmd))
3011                        scsi_set_resid(scsicmd,
3012                                       scsi_bufflen(scsicmd) - alloc_len);
3013
3014                /* Do not cache partition table for arrays */
3015                scsicmd->device->removable = 1;
3016
3017                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3018                                  SAM_STAT_GOOD;
3019                break;
3020        }
3021
3022        case READ_CAPACITY:
3023        {
3024                u32 capacity;
3025                char cp[8];
3026
3027                dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
3028                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3029                        capacity = fsa_dev_ptr[cid].size - 1;
3030                else
3031                        capacity = (u32)-1;
3032
3033                cp[0] = (capacity >> 24) & 0xff;
3034                cp[1] = (capacity >> 16) & 0xff;
3035                cp[2] = (capacity >> 8) & 0xff;
3036                cp[3] = (capacity >> 0) & 0xff;
3037                cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
3038                cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3039                cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3040                cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
3041                scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
3042                /* Do not cache partition table for arrays */
3043                scsicmd->device->removable = 1;
3044                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3045                                  SAM_STAT_GOOD;
3046                break;
3047        }
3048
3049        case MODE_SENSE:
3050        {
3051                int mode_buf_length = 4;
3052                u32 capacity;
3053                aac_modep_data mpd;
3054
3055                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3056                        capacity = fsa_dev_ptr[cid].size - 1;
3057                else
3058                        capacity = (u32)-1;
3059
3060                dprintk((KERN_DEBUG "MODE SENSE command.\n"));
3061                memset((char *)&mpd, 0, sizeof(aac_modep_data));
3062
3063                /* Mode data length */
3064                mpd.hd.data_length = sizeof(mpd.hd) - 1;
3065                /* Medium type - default */
3066                mpd.hd.med_type = 0;
3067                /* Device-specific param,
3068                   bit 8: 0/1 = write enabled/protected
3069                   bit 4: 0/1 = FUA enabled */
3070                mpd.hd.dev_par = 0;
3071
3072                if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3073                        mpd.hd.dev_par = 0x10;
3074                if (scsicmd->cmnd[1] & 0x8)
3075                        mpd.hd.bd_length = 0;   /* Block descriptor length */
3076                else {
3077                        mpd.hd.bd_length = sizeof(mpd.bd);
3078                        mpd.hd.data_length += mpd.hd.bd_length;
3079                        mpd.bd.block_length[0] =
3080                                (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3081                        mpd.bd.block_length[1] =
3082                                (fsa_dev_ptr[cid].block_size >> 8) &  0xff;
3083                        mpd.bd.block_length[2] =
3084                                fsa_dev_ptr[cid].block_size  & 0xff;
3085
3086                        mpd.mpc_buf[0] = scsicmd->cmnd[2];
3087                        if (scsicmd->cmnd[2] == 0x1C) {
3088                                /* page length */
3089                                mpd.mpc_buf[1] = 0xa;
3090                                /* Mode data length */
3091                                mpd.hd.data_length = 23;
3092                        } else {
3093                                /* Mode data length */
3094                                mpd.hd.data_length = 15;
3095                        }
3096
3097                        if (capacity > 0xffffff) {
3098                                mpd.bd.block_count[0] = 0xff;
3099                                mpd.bd.block_count[1] = 0xff;
3100                                mpd.bd.block_count[2] = 0xff;
3101                        } else {
3102                                mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
3103                                mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
3104                                mpd.bd.block_count[2] = capacity  & 0xff;
3105                        }
3106                }
3107                if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3108                  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3109                        mpd.hd.data_length += 3;
3110                        mpd.mpc_buf[0] = 8;
3111                        mpd.mpc_buf[1] = 1;
3112                        mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
3113                                ? 0 : 0x04; /* WCE */
3114                        mode_buf_length = sizeof(mpd);
3115                }
3116
3117                if (mode_buf_length > scsicmd->cmnd[4])
3118                        mode_buf_length = scsicmd->cmnd[4];
3119                else
3120                        mode_buf_length = sizeof(mpd);
3121                scsi_sg_copy_from_buffer(scsicmd,
3122                                         (char *)&mpd,
3123                                         mode_buf_length);
3124                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3125                                  SAM_STAT_GOOD;
3126                break;
3127        }
3128        case MODE_SENSE_10:
3129        {
3130                u32 capacity;
3131                int mode_buf_length = 8;
3132                aac_modep10_data mpd10;
3133
3134                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3135                        capacity = fsa_dev_ptr[cid].size - 1;
3136                else
3137                        capacity = (u32)-1;
3138
3139                dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
3140                memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
3141                /* Mode data length (MSB) */
3142                mpd10.hd.data_length[0] = 0;
3143                /* Mode data length (LSB) */
3144                mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
3145                /* Medium type - default */
3146                mpd10.hd.med_type = 0;
3147                /* Device-specific param,
3148                   bit 8: 0/1 = write enabled/protected
3149                   bit 4: 0/1 = FUA enabled */
3150                mpd10.hd.dev_par = 0;
3151
3152                if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3153                        mpd10.hd.dev_par = 0x10;
3154                mpd10.hd.rsrvd[0] = 0;  /* reserved */
3155                mpd10.hd.rsrvd[1] = 0;  /* reserved */
3156                if (scsicmd->cmnd[1] & 0x8) {
3157                        /* Block descriptor length (MSB) */
3158                        mpd10.hd.bd_length[0] = 0;
3159                        /* Block descriptor length (LSB) */
3160                        mpd10.hd.bd_length[1] = 0;
3161                } else {
3162                        mpd10.hd.bd_length[0] = 0;
3163                        mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
3164
3165                        mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
3166
3167                        mpd10.bd.block_length[0] =
3168                                (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3169                        mpd10.bd.block_length[1] =
3170                                (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3171                        mpd10.bd.block_length[2] =
3172                                fsa_dev_ptr[cid].block_size  & 0xff;
3173
3174                        if (capacity > 0xffffff) {
3175                                mpd10.bd.block_count[0] = 0xff;
3176                                mpd10.bd.block_count[1] = 0xff;
3177                                mpd10.bd.block_count[2] = 0xff;
3178                        } else {
3179                                mpd10.bd.block_count[0] =
3180                                        (capacity >> 16) & 0xff;
3181                                mpd10.bd.block_count[1] =
3182                                        (capacity >> 8) & 0xff;
3183                                mpd10.bd.block_count[2] =
3184                                        capacity  & 0xff;
3185                        }
3186                }
3187                if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3188                  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3189                        mpd10.hd.data_length[1] += 3;
3190                        mpd10.mpc_buf[0] = 8;
3191                        mpd10.mpc_buf[1] = 1;
3192                        mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
3193                                ? 0 : 0x04; /* WCE */
3194                        mode_buf_length = sizeof(mpd10);
3195                        if (mode_buf_length > scsicmd->cmnd[8])
3196                                mode_buf_length = scsicmd->cmnd[8];
3197                }
3198                scsi_sg_copy_from_buffer(scsicmd,
3199                                         (char *)&mpd10,
3200                                         mode_buf_length);
3201
3202                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3203                                  SAM_STAT_GOOD;
3204                break;
3205        }
3206        case REQUEST_SENSE:
3207                dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
3208                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3209                                sizeof(struct sense_data));
3210                memset(&dev->fsa_dev[cid].sense_data, 0,
3211                                sizeof(struct sense_data));
3212                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3213                                  SAM_STAT_GOOD;
3214                break;
3215
3216        case ALLOW_MEDIUM_REMOVAL:
3217                dprintk((KERN_DEBUG "LOCK command.\n"));
3218                if (scsicmd->cmnd[4])
3219                        fsa_dev_ptr[cid].locked = 1;
3220                else
3221                        fsa_dev_ptr[cid].locked = 0;
3222
3223                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3224                                  SAM_STAT_GOOD;
3225                break;
3226        /*
3227         *      These commands are all No-Ops
3228         */
3229        case TEST_UNIT_READY:
3230                if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
3231                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3232                                SAM_STAT_CHECK_CONDITION;
3233                        set_sense(&dev->fsa_dev[cid].sense_data,
3234                                  NOT_READY, SENCODE_BECOMING_READY,
3235                                  ASENCODE_BECOMING_READY, 0, 0);
3236                        memcpy(scsicmd->sense_buffer,
3237                               &dev->fsa_dev[cid].sense_data,
3238                               min_t(size_t,
3239                                     sizeof(dev->fsa_dev[cid].sense_data),
3240                                     SCSI_SENSE_BUFFERSIZE));
3241                        break;
3242                }
3243                fallthrough;
3244        case RESERVE:
3245        case RELEASE:
3246        case REZERO_UNIT:
3247        case REASSIGN_BLOCKS:
3248        case SEEK_10:
3249                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3250                                  SAM_STAT_GOOD;
3251                break;
3252
3253        case START_STOP:
3254                return aac_start_stop(scsicmd);
3255
3256        default:
3257        /*
3258         *      Unhandled commands
3259         */
3260                dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
3261                                scsicmd->cmnd[0]));
3262                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3263                                SAM_STAT_CHECK_CONDITION;
3264                set_sense(&dev->fsa_dev[cid].sense_data,
3265                          ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
3266                          ASENCODE_INVALID_COMMAND, 0, 0);
3267                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3268                                min_t(size_t,
3269                                      sizeof(dev->fsa_dev[cid].sense_data),
3270                                      SCSI_SENSE_BUFFERSIZE));
3271        }
3272
3273scsi_done_ret:
3274
3275        scsicmd->scsi_done(scsicmd);
3276        return 0;
3277}
3278
3279static int query_disk(struct aac_dev *dev, void __user *arg)
3280{
3281        struct aac_query_disk qd;
3282        struct fsa_dev_info *fsa_dev_ptr;
3283
3284        fsa_dev_ptr = dev->fsa_dev;
3285        if (!fsa_dev_ptr)
3286                return -EBUSY;
3287        if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
3288                return -EFAULT;
3289        if (qd.cnum == -1) {
3290                if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
3291                        return -EINVAL;
3292                qd.cnum = qd.id;
3293        } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
3294                if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
3295                        return -EINVAL;
3296                qd.instance = dev->scsi_host_ptr->host_no;
3297                qd.bus = 0;
3298                qd.id = CONTAINER_TO_ID(qd.cnum);
3299                qd.lun = CONTAINER_TO_LUN(qd.cnum);
3300        }
3301        else return -EINVAL;
3302
3303        qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
3304        qd.locked = fsa_dev_ptr[qd.cnum].locked;
3305        qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
3306
3307        if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
3308                qd.unmapped = 1;
3309        else
3310                qd.unmapped = 0;
3311
3312        strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
3313          min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
3314
3315        if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
3316                return -EFAULT;
3317        return 0;
3318}
3319
3320static int force_delete_disk(struct aac_dev *dev, void __user *arg)
3321{
3322        struct aac_delete_disk dd;
3323        struct fsa_dev_info *fsa_dev_ptr;
3324
3325        fsa_dev_ptr = dev->fsa_dev;
3326        if (!fsa_dev_ptr)
3327                return -EBUSY;
3328
3329        if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3330                return -EFAULT;
3331
3332        if (dd.cnum >= dev->maximum_num_containers)
3333                return -EINVAL;
3334        /*
3335         *      Mark this container as being deleted.
3336         */
3337        fsa_dev_ptr[dd.cnum].deleted = 1;
3338        /*
3339         *      Mark the container as no longer valid
3340         */
3341        fsa_dev_ptr[dd.cnum].valid = 0;
3342        return 0;
3343}
3344
3345static int delete_disk(struct aac_dev *dev, void __user *arg)
3346{
3347        struct aac_delete_disk dd;
3348        struct fsa_dev_info *fsa_dev_ptr;
3349
3350        fsa_dev_ptr = dev->fsa_dev;
3351        if (!fsa_dev_ptr)
3352                return -EBUSY;
3353
3354        if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3355                return -EFAULT;
3356
3357        if (dd.cnum >= dev->maximum_num_containers)
3358                return -EINVAL;
3359        /*
3360         *      If the container is locked, it can not be deleted by the API.
3361         */
3362        if (fsa_dev_ptr[dd.cnum].locked)
3363                return -EBUSY;
3364        else {
3365                /*
3366                 *      Mark the container as no longer being valid.
3367                 */
3368                fsa_dev_ptr[dd.cnum].valid = 0;
3369                fsa_dev_ptr[dd.cnum].devname[0] = '\0';
3370                return 0;
3371        }
3372}
3373
3374int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
3375{
3376        switch (cmd) {
3377        case FSACTL_QUERY_DISK:
3378                return query_disk(dev, arg);
3379        case FSACTL_DELETE_DISK:
3380                return delete_disk(dev, arg);
3381        case FSACTL_FORCE_DELETE_DISK:
3382                return force_delete_disk(dev, arg);
3383        case FSACTL_GET_CONTAINERS:
3384                return aac_get_containers(dev);
3385        default:
3386                return -ENOTTY;
3387        }
3388}
3389
3390/**
3391 * aac_srb_callback
3392 * @context: the context set in the fib - here it is scsi cmd
3393 * @fibptr: pointer to the fib
3394 *
3395 * Handles the completion of a scsi command to a non dasd device
3396 */
3397static void aac_srb_callback(void *context, struct fib * fibptr)
3398{
3399        struct aac_srb_reply *srbreply;
3400        struct scsi_cmnd *scsicmd;
3401
3402        scsicmd = (struct scsi_cmnd *) context;
3403
3404        if (!aac_valid_context(scsicmd, fibptr))
3405                return;
3406
3407        BUG_ON(fibptr == NULL);
3408
3409        srbreply = (struct aac_srb_reply *) fib_data(fibptr);
3410
3411        scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
3412
3413        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3414                /* fast response */
3415                srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
3416                srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
3417        } else {
3418                /*
3419                 *      Calculate resid for sg
3420                 */
3421                scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
3422                                   - le32_to_cpu(srbreply->data_xfer_length));
3423        }
3424
3425
3426        scsi_dma_unmap(scsicmd);
3427
3428        /* expose physical device if expose_physicald flag is on */
3429        if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
3430          && expose_physicals > 0)
3431                aac_expose_phy_device(scsicmd);
3432
3433        /*
3434         * First check the fib status
3435         */
3436
3437        if (le32_to_cpu(srbreply->status) != ST_OK) {
3438                int len;
3439
3440                pr_warn("aac_srb_callback: srb failed, status = %d\n",
3441                                le32_to_cpu(srbreply->status));
3442                len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3443                            SCSI_SENSE_BUFFERSIZE);
3444                scsicmd->result = DID_ERROR << 16
3445                                | COMMAND_COMPLETE << 8
3446                                | SAM_STAT_CHECK_CONDITION;
3447                memcpy(scsicmd->sense_buffer,
3448                                srbreply->sense_data, len);
3449        }
3450
3451        /*
3452         * Next check the srb status
3453         */
3454        switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
3455        case SRB_STATUS_ERROR_RECOVERY:
3456        case SRB_STATUS_PENDING:
3457        case SRB_STATUS_SUCCESS:
3458                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3459                break;
3460        case SRB_STATUS_DATA_OVERRUN:
3461                switch (scsicmd->cmnd[0]) {
3462                case  READ_6:
3463                case  WRITE_6:
3464                case  READ_10:
3465                case  WRITE_10:
3466                case  READ_12:
3467                case  WRITE_12:
3468                case  READ_16:
3469                case  WRITE_16:
3470                        if (le32_to_cpu(srbreply->data_xfer_length)
3471                                                < scsicmd->underflow)
3472                                pr_warn("aacraid: SCSI CMD underflow\n");
3473                        else
3474                                pr_warn("aacraid: SCSI CMD Data Overrun\n");
3475                        scsicmd->result = DID_ERROR << 16
3476                                        | COMMAND_COMPLETE << 8;
3477                        break;
3478                case INQUIRY:
3479                        scsicmd->result = DID_OK << 16
3480                                        | COMMAND_COMPLETE << 8;
3481                        break;
3482                default:
3483                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3484                        break;
3485                }
3486                break;
3487        case SRB_STATUS_ABORTED:
3488                scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3489                break;
3490        case SRB_STATUS_ABORT_FAILED:
3491                /*
3492                 * Not sure about this one - but assuming the
3493                 * hba was trying to abort for some reason
3494                 */
3495                scsicmd->result = DID_ERROR << 16 | ABORT << 8;
3496                break;
3497        case SRB_STATUS_PARITY_ERROR:
3498                scsicmd->result = DID_PARITY << 16
3499                                | MSG_PARITY_ERROR << 8;
3500                break;
3501        case SRB_STATUS_NO_DEVICE:
3502        case SRB_STATUS_INVALID_PATH_ID:
3503        case SRB_STATUS_INVALID_TARGET_ID:
3504        case SRB_STATUS_INVALID_LUN:
3505        case SRB_STATUS_SELECTION_TIMEOUT:
3506                scsicmd->result = DID_NO_CONNECT << 16
3507                                | COMMAND_COMPLETE << 8;
3508                break;
3509
3510        case SRB_STATUS_COMMAND_TIMEOUT:
3511        case SRB_STATUS_TIMEOUT:
3512                scsicmd->result = DID_TIME_OUT << 16
3513                                | COMMAND_COMPLETE << 8;
3514                break;
3515
3516        case SRB_STATUS_BUSY:
3517                scsicmd->result = DID_BUS_BUSY << 16
3518                                | COMMAND_COMPLETE << 8;
3519                break;
3520
3521        case SRB_STATUS_BUS_RESET:
3522                scsicmd->result = DID_RESET << 16
3523                                | COMMAND_COMPLETE << 8;
3524                break;
3525
3526        case SRB_STATUS_MESSAGE_REJECTED:
3527                scsicmd->result = DID_ERROR << 16
3528                                | MESSAGE_REJECT << 8;
3529                break;
3530        case SRB_STATUS_REQUEST_FLUSHED:
3531        case SRB_STATUS_ERROR:
3532        case SRB_STATUS_INVALID_REQUEST:
3533        case SRB_STATUS_REQUEST_SENSE_FAILED:
3534        case SRB_STATUS_NO_HBA:
3535        case SRB_STATUS_UNEXPECTED_BUS_FREE:
3536        case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
3537        case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
3538        case SRB_STATUS_DELAYED_RETRY:
3539        case SRB_STATUS_BAD_FUNCTION:
3540        case SRB_STATUS_NOT_STARTED:
3541        case SRB_STATUS_NOT_IN_USE:
3542        case SRB_STATUS_FORCE_ABORT:
3543        case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
3544        default:
3545#ifdef AAC_DETAILED_STATUS_INFO
3546                pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
3547                        le32_to_cpu(srbreply->srb_status) & 0x3F,
3548                        aac_get_status_string(
3549                                le32_to_cpu(srbreply->srb_status) & 0x3F),
3550                        scsicmd->cmnd[0],
3551                        le32_to_cpu(srbreply->scsi_status));
3552#endif
3553                /*
3554                 * When the CC bit is SET by the host in ATA pass thru CDB,
3555                 *  driver is supposed to return DID_OK
3556                 *
3557                 * When the CC bit is RESET by the host, driver should
3558                 *  return DID_ERROR
3559                 */
3560                if ((scsicmd->cmnd[0] == ATA_12)
3561                        || (scsicmd->cmnd[0] == ATA_16)) {
3562
3563                        if (scsicmd->cmnd[2] & (0x01 << 5)) {
3564                                scsicmd->result = DID_OK << 16
3565                                        | COMMAND_COMPLETE << 8;
3566                        break;
3567                        } else {
3568                                scsicmd->result = DID_ERROR << 16
3569                                        | COMMAND_COMPLETE << 8;
3570                        break;
3571                        }
3572                } else {
3573                        scsicmd->result = DID_ERROR << 16
3574                                | COMMAND_COMPLETE << 8;
3575                        break;
3576                }
3577        }
3578        if (le32_to_cpu(srbreply->scsi_status)
3579                        == SAM_STAT_CHECK_CONDITION) {
3580                int len;
3581
3582                scsicmd->result |= SAM_STAT_CHECK_CONDITION;
3583                len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3584                            SCSI_SENSE_BUFFERSIZE);
3585#ifdef AAC_DETAILED_STATUS_INFO
3586                pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
3587                                        le32_to_cpu(srbreply->status), len);
3588#endif
3589                memcpy(scsicmd->sense_buffer,
3590                                srbreply->sense_data, len);
3591        }
3592
3593        /*
3594         * OR in the scsi status (already shifted up a bit)
3595         */
3596        scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
3597
3598        aac_fib_complete(fibptr);
3599        scsicmd->scsi_done(scsicmd);
3600}
3601
3602static void hba_resp_task_complete(struct aac_dev *dev,
3603                                        struct scsi_cmnd *scsicmd,
3604                                        struct aac_hba_resp *err) {
3605
3606        scsicmd->result = err->status;
3607        /* set residual count */
3608        scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
3609
3610        switch (err->status) {
3611        case SAM_STAT_GOOD:
3612                scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3613                break;
3614        case SAM_STAT_CHECK_CONDITION:
3615        {
3616                int len;
3617
3618                len = min_t(u8, err->sense_response_data_len,
3619                        SCSI_SENSE_BUFFERSIZE);
3620                if (len)
3621                        memcpy(scsicmd->sense_buffer,
3622                                err->sense_response_buf, len);
3623                scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3624                break;
3625        }
3626        case SAM_STAT_BUSY:
3627                scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
3628                break;
3629        case SAM_STAT_TASK_ABORTED:
3630                scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
3631                break;
3632        case SAM_STAT_RESERVATION_CONFLICT:
3633        case SAM_STAT_TASK_SET_FULL:
3634        default:
3635                scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3636                break;
3637        }
3638}
3639
3640static void hba_resp_task_failure(struct aac_dev *dev,
3641                                        struct scsi_cmnd *scsicmd,
3642                                        struct aac_hba_resp *err)
3643{
3644        switch (err->status) {
3645        case HBA_RESP_STAT_HBAMODE_DISABLED:
3646        {
3647                u32 bus, cid;
3648
3649                bus = aac_logical_to_phys(scmd_channel(scsicmd));
3650                cid = scmd_id(scsicmd);
3651                if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
3652                        dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
3653                        dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
3654                }
3655                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3656                break;
3657        }
3658        case HBA_RESP_STAT_IO_ERROR:
3659        case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
3660                scsicmd->result = DID_OK << 16 |
3661                        COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
3662                break;
3663        case HBA_RESP_STAT_IO_ABORTED:
3664                scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3665                break;
3666        case HBA_RESP_STAT_INVALID_DEVICE:
3667                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3668                break;
3669        case HBA_RESP_STAT_UNDERRUN:
3670                /* UNDERRUN is OK */
3671                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3672                break;
3673        case HBA_RESP_STAT_OVERRUN:
3674        default:
3675                scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3676                break;
3677        }
3678}
3679
3680/**
3681 * aac_hba_callback
3682 * @context: the context set in the fib - here it is scsi cmd
3683 * @fibptr: pointer to the fib
3684 *
3685 * Handles the completion of a native HBA scsi command
3686 */
3687void aac_hba_callback(void *context, struct fib *fibptr)
3688{
3689        struct aac_dev *dev;
3690        struct scsi_cmnd *scsicmd;
3691
3692        struct aac_hba_resp *err =
3693                        &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
3694
3695        scsicmd = (struct scsi_cmnd *) context;
3696
3697        if (!aac_valid_context(scsicmd, fibptr))
3698                return;
3699
3700        WARN_ON(fibptr == NULL);
3701        dev = fibptr->dev;
3702
3703        if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
3704                scsi_dma_unmap(scsicmd);
3705
3706        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3707                /* fast response */
3708                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3709                goto out;
3710        }
3711
3712        switch (err->service_response) {
3713        case HBA_RESP_SVCRES_TASK_COMPLETE:
3714                hba_resp_task_complete(dev, scsicmd, err);
3715                break;
3716        case HBA_RESP_SVCRES_FAILURE:
3717                hba_resp_task_failure(dev, scsicmd, err);
3718                break;
3719        case HBA_RESP_SVCRES_TMF_REJECTED:
3720                scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
3721                break;
3722        case HBA_RESP_SVCRES_TMF_LUN_INVALID:
3723                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3724                break;
3725        case HBA_RESP_SVCRES_TMF_COMPLETE:
3726        case HBA_RESP_SVCRES_TMF_SUCCEEDED:
3727                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3728                break;
3729        default:
3730                scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3731                break;
3732        }
3733
3734out:
3735        aac_fib_complete(fibptr);
3736
3737        if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
3738                scsicmd->SCp.sent_command = 1;
3739        else
3740                scsicmd->scsi_done(scsicmd);
3741}
3742
3743/**
3744 * aac_send_srb_fib
3745 * @scsicmd: the scsi command block
3746 *
3747 * This routine will form a FIB and fill in the aac_srb from the
3748 * scsicmd passed in.
3749 */
3750static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
3751{
3752        struct fib* cmd_fibcontext;
3753        struct aac_dev* dev;
3754        int status;
3755
3756        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3757        if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3758                        scsicmd->device->lun > 7) {
3759                scsicmd->result = DID_NO_CONNECT << 16;
3760                scsicmd->scsi_done(scsicmd);
3761                return 0;
3762        }
3763
3764        /*
3765         *      Allocate and initialize a Fib then setup a BlockWrite command
3766         */
3767        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3768        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3769        status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
3770
3771        /*
3772         *      Check that the command queued to the controller
3773         */
3774        if (status == -EINPROGRESS)
3775                return 0;
3776
3777        printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
3778        aac_fib_complete(cmd_fibcontext);
3779        aac_fib_free(cmd_fibcontext);
3780
3781        return -1;
3782}
3783
3784/**
3785 * aac_send_hba_fib
3786 * @scsicmd: the scsi command block
3787 *
3788 * This routine will form a FIB and fill in the aac_hba_cmd_req from the
3789 * scsicmd passed in.
3790 */
3791static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
3792{
3793        struct fib *cmd_fibcontext;
3794        struct aac_dev *dev;
3795        int status;
3796
3797        dev = shost_priv(scsicmd->device->host);
3798        if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3799                        scsicmd->device->lun > AAC_MAX_LUN - 1) {
3800                scsicmd->result = DID_NO_CONNECT << 16;
3801                scsicmd->scsi_done(scsicmd);
3802                return 0;
3803        }
3804
3805        /*
3806         *      Allocate and initialize a Fib then setup a BlockWrite command
3807         */
3808        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3809        if (!cmd_fibcontext)
3810                return -1;
3811
3812        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3813        status = aac_adapter_hba(cmd_fibcontext, scsicmd);
3814
3815        /*
3816         *      Check that the command queued to the controller
3817         */
3818        if (status == -EINPROGRESS)
3819                return 0;
3820
3821        pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
3822                status);
3823        aac_fib_complete(cmd_fibcontext);
3824        aac_fib_free(cmd_fibcontext);
3825
3826        return -1;
3827}
3828
3829
3830static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
3831{
3832        unsigned long byte_count = 0;
3833        int nseg;
3834        struct scatterlist *sg;
3835        int i;
3836
3837        // Get rid of old data
3838        psg->count = 0;
3839        psg->sg[0].addr = 0;
3840        psg->sg[0].count = 0;
3841
3842        nseg = scsi_dma_map(scsicmd);
3843        if (nseg <= 0)
3844                return nseg;
3845
3846        psg->count = cpu_to_le32(nseg);
3847
3848        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3849                psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
3850                psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
3851                byte_count += sg_dma_len(sg);
3852        }
3853        /* hba wants the size to be exact */
3854        if (byte_count > scsi_bufflen(scsicmd)) {
3855                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3856                        (byte_count - scsi_bufflen(scsicmd));
3857                psg->sg[i-1].count = cpu_to_le32(temp);
3858                byte_count = scsi_bufflen(scsicmd);
3859        }
3860        /* Check for command underflow */
3861        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3862                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3863                       byte_count, scsicmd->underflow);
3864        }
3865
3866        return byte_count;
3867}
3868
3869
3870static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
3871{
3872        unsigned long byte_count = 0;
3873        u64 addr;
3874        int nseg;
3875        struct scatterlist *sg;
3876        int i;
3877
3878        // Get rid of old data
3879        psg->count = 0;
3880        psg->sg[0].addr[0] = 0;
3881        psg->sg[0].addr[1] = 0;
3882        psg->sg[0].count = 0;
3883
3884        nseg = scsi_dma_map(scsicmd);
3885        if (nseg <= 0)
3886                return nseg;
3887
3888        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3889                int count = sg_dma_len(sg);
3890                addr = sg_dma_address(sg);
3891                psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
3892                psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
3893                psg->sg[i].count = cpu_to_le32(count);
3894                byte_count += count;
3895        }
3896        psg->count = cpu_to_le32(nseg);
3897        /* hba wants the size to be exact */
3898        if (byte_count > scsi_bufflen(scsicmd)) {
3899                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3900                        (byte_count - scsi_bufflen(scsicmd));
3901                psg->sg[i-1].count = cpu_to_le32(temp);
3902                byte_count = scsi_bufflen(scsicmd);
3903        }
3904        /* Check for command underflow */
3905        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3906                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3907                       byte_count, scsicmd->underflow);
3908        }
3909
3910        return byte_count;
3911}
3912
3913static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
3914{
3915        unsigned long byte_count = 0;
3916        int nseg;
3917        struct scatterlist *sg;
3918        int i;
3919
3920        // Get rid of old data
3921        psg->count = 0;
3922        psg->sg[0].next = 0;
3923        psg->sg[0].prev = 0;
3924        psg->sg[0].addr[0] = 0;
3925        psg->sg[0].addr[1] = 0;
3926        psg->sg[0].count = 0;
3927        psg->sg[0].flags = 0;
3928
3929        nseg = scsi_dma_map(scsicmd);
3930        if (nseg <= 0)
3931                return nseg;
3932
3933        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3934                int count = sg_dma_len(sg);
3935                u64 addr = sg_dma_address(sg);
3936                psg->sg[i].next = 0;
3937                psg->sg[i].prev = 0;
3938                psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
3939                psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
3940                psg->sg[i].count = cpu_to_le32(count);
3941                psg->sg[i].flags = 0;
3942                byte_count += count;
3943        }
3944        psg->count = cpu_to_le32(nseg);
3945        /* hba wants the size to be exact */
3946        if (byte_count > scsi_bufflen(scsicmd)) {
3947                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3948                        (byte_count - scsi_bufflen(scsicmd));
3949                psg->sg[i-1].count = cpu_to_le32(temp);
3950                byte_count = scsi_bufflen(scsicmd);
3951        }
3952        /* Check for command underflow */
3953        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3954                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3955                       byte_count, scsicmd->underflow);
3956        }
3957
3958        return byte_count;
3959}
3960
3961static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
3962                                struct aac_raw_io2 *rio2, int sg_max)
3963{
3964        unsigned long byte_count = 0;
3965        int nseg;
3966        struct scatterlist *sg;
3967        int i, conformable = 0;
3968        u32 min_size = PAGE_SIZE, cur_size;
3969
3970        nseg = scsi_dma_map(scsicmd);
3971        if (nseg <= 0)
3972                return nseg;
3973
3974        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3975                int count = sg_dma_len(sg);
3976                u64 addr = sg_dma_address(sg);
3977
3978                BUG_ON(i >= sg_max);
3979                rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
3980                rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
3981                cur_size = cpu_to_le32(count);
3982                rio2->sge[i].length = cur_size;
3983                rio2->sge[i].flags = 0;
3984                if (i == 0) {
3985                        conformable = 1;
3986                        rio2->sgeFirstSize = cur_size;
3987                } else if (i == 1) {
3988                        rio2->sgeNominalSize = cur_size;
3989                        min_size = cur_size;
3990                } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
3991                        conformable = 0;
3992                        if (cur_size < min_size)
3993                                min_size = cur_size;
3994                }
3995                byte_count += count;
3996        }
3997
3998        /* hba wants the size to be exact */
3999        if (byte_count > scsi_bufflen(scsicmd)) {
4000                u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
4001                        (byte_count - scsi_bufflen(scsicmd));
4002                rio2->sge[i-1].length = cpu_to_le32(temp);
4003                byte_count = scsi_bufflen(scsicmd);
4004        }
4005
4006        rio2->sgeCnt = cpu_to_le32(nseg);
4007        rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
4008        /* not conformable: evaluate required sg elements */
4009        if (!conformable) {
4010                int j, nseg_new = nseg, err_found;
4011                for (i = min_size / PAGE_SIZE; i >= 1; --i) {
4012                        err_found = 0;
4013                        nseg_new = 2;
4014                        for (j = 1; j < nseg - 1; ++j) {
4015                                if (rio2->sge[j].length % (i*PAGE_SIZE)) {
4016                                        err_found = 1;
4017                                        break;
4018                                }
4019                                nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
4020                        }
4021                        if (!err_found)
4022                                break;
4023                }
4024                if (i > 0 && nseg_new <= sg_max) {
4025                        int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new);
4026
4027                        if (ret < 0)
4028                                return ret;
4029                }
4030        } else
4031                rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4032
4033        /* Check for command underflow */
4034        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4035                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
4036                       byte_count, scsicmd->underflow);
4037        }
4038
4039        return byte_count;
4040}
4041
4042static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
4043{
4044        struct sge_ieee1212 *sge;
4045        int i, j, pos;
4046        u32 addr_low;
4047
4048        if (aac_convert_sgl == 0)
4049                return 0;
4050
4051        sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
4052        if (sge == NULL)
4053                return -ENOMEM;
4054
4055        for (i = 1, pos = 1; i < nseg-1; ++i) {
4056                for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
4057                        addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
4058                        sge[pos].addrLow = addr_low;
4059                        sge[pos].addrHigh = rio2->sge[i].addrHigh;
4060                        if (addr_low < rio2->sge[i].addrLow)
4061                                sge[pos].addrHigh++;
4062                        sge[pos].length = pages * PAGE_SIZE;
4063                        sge[pos].flags = 0;
4064                        pos++;
4065                }
4066        }
4067        sge[pos] = rio2->sge[nseg-1];
4068        memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
4069
4070        kfree(sge);
4071        rio2->sgeCnt = cpu_to_le32(nseg_new);
4072        rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4073        rio2->sgeNominalSize = pages * PAGE_SIZE;
4074        return 0;
4075}
4076
4077static long aac_build_sghba(struct scsi_cmnd *scsicmd,
4078                        struct aac_hba_cmd_req *hbacmd,
4079                        int sg_max,
4080                        u64 sg_address)
4081{
4082        unsigned long byte_count = 0;
4083        int nseg;
4084        struct scatterlist *sg;
4085        int i;
4086        u32 cur_size;
4087        struct aac_hba_sgl *sge;
4088
4089        nseg = scsi_dma_map(scsicmd);
4090        if (nseg <= 0) {
4091                byte_count = nseg;
4092                goto out;
4093        }
4094
4095        if (nseg > HBA_MAX_SG_EMBEDDED)
4096                sge = &hbacmd->sge[2];
4097        else
4098                sge = &hbacmd->sge[0];
4099
4100        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4101                int count = sg_dma_len(sg);
4102                u64 addr = sg_dma_address(sg);
4103
4104                WARN_ON(i >= sg_max);
4105                sge->addr_hi = cpu_to_le32((u32)(addr>>32));
4106                sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
4107                cur_size = cpu_to_le32(count);
4108                sge->len = cur_size;
4109                sge->flags = 0;
4110                byte_count += count;
4111                sge++;
4112        }
4113
4114        sge--;
4115        /* hba wants the size to be exact */
4116        if (byte_count > scsi_bufflen(scsicmd)) {
4117                u32 temp;
4118
4119                temp = le32_to_cpu(sge->len) - byte_count
4120                                                - scsi_bufflen(scsicmd);
4121                sge->len = cpu_to_le32(temp);
4122                byte_count = scsi_bufflen(scsicmd);
4123        }
4124
4125        if (nseg <= HBA_MAX_SG_EMBEDDED) {
4126                hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
4127                sge->flags = cpu_to_le32(0x40000000);
4128        } else {
4129                /* not embedded */
4130                hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
4131                hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
4132                hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
4133                hbacmd->sge[0].addr_lo =
4134                        cpu_to_le32((u32)(sg_address & 0xffffffff));
4135        }
4136
4137        /* Check for command underflow */
4138        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4139                pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
4140                                byte_count, scsicmd->underflow);
4141        }
4142out:
4143        return byte_count;
4144}
4145
4146#ifdef AAC_DETAILED_STATUS_INFO
4147
4148struct aac_srb_status_info {
4149        u32     status;
4150        char    *str;
4151};
4152
4153
4154static struct aac_srb_status_info srb_status_info[] = {
4155        { SRB_STATUS_PENDING,           "Pending Status"},
4156        { SRB_STATUS_SUCCESS,           "Success"},
4157        { SRB_STATUS_ABORTED,           "Aborted Command"},
4158        { SRB_STATUS_ABORT_FAILED,      "Abort Failed"},
4159        { SRB_STATUS_ERROR,             "Error Event"},
4160        { SRB_STATUS_BUSY,              "Device Busy"},
4161        { SRB_STATUS_INVALID_REQUEST,   "Invalid Request"},
4162        { SRB_STATUS_INVALID_PATH_ID,   "Invalid Path ID"},
4163        { SRB_STATUS_NO_DEVICE,         "No Device"},
4164        { SRB_STATUS_TIMEOUT,           "Timeout"},
4165        { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
4166        { SRB_STATUS_COMMAND_TIMEOUT,   "Command Timeout"},
4167        { SRB_STATUS_MESSAGE_REJECTED,  "Message Rejected"},
4168        { SRB_STATUS_BUS_RESET,         "Bus Reset"},
4169        { SRB_STATUS_PARITY_ERROR,      "Parity Error"},
4170        { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
4171        { SRB_STATUS_NO_HBA,            "No HBA"},
4172        { SRB_STATUS_DATA_OVERRUN,      "Data Overrun/Data Underrun"},
4173        { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
4174        { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
4175        { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
4176        { SRB_STATUS_REQUEST_FLUSHED,   "Request Flushed"},
4177        { SRB_STATUS_DELAYED_RETRY,     "Delayed Retry"},
4178        { SRB_STATUS_INVALID_LUN,       "Invalid LUN"},
4179        { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
4180        { SRB_STATUS_BAD_FUNCTION,      "Bad Function"},
4181        { SRB_STATUS_ERROR_RECOVERY,    "Error Recovery"},
4182        { SRB_STATUS_NOT_STARTED,       "Not Started"},
4183        { SRB_STATUS_NOT_IN_USE,        "Not In Use"},
4184        { SRB_STATUS_FORCE_ABORT,       "Force Abort"},
4185        { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
4186        { 0xff,                         "Unknown Error"}
4187};
4188
4189char *aac_get_status_string(u32 status)
4190{
4191        int i;
4192
4193        for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
4194                if (srb_status_info[i].status == status)
4195                        return srb_status_info[i].str;
4196
4197        return "Bad Status Code";
4198}
4199
4200#endif
4201