linux/drivers/scsi/aacraid/aachba.c
<<
>>
Prefs
   1/*
   2 *      Adaptec AAC series RAID controller driver
   3 *      (c) Copyright 2001 Red Hat Inc.
   4 *
   5 * based on the old aacraid driver that is..
   6 * Adaptec aacraid device driver for Linux.
   7 *
   8 * Copyright (c) 2000-2010 Adaptec, Inc.
   9 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10 *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; see the file COPYING.  If not, write to
  24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25 *
  26 * Module Name:
  27 *  aachba.c
  28 *
  29 * Abstract: Contains Interfaces to manage IOs.
  30 *
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/init.h>
  35#include <linux/types.h>
  36#include <linux/pci.h>
  37#include <linux/spinlock.h>
  38#include <linux/slab.h>
  39#include <linux/completion.h>
  40#include <linux/blkdev.h>
  41#include <linux/uaccess.h>
  42#include <linux/highmem.h> /* For flush_kernel_dcache_page */
  43#include <linux/module.h>
  44
  45#include <asm/unaligned.h>
  46
  47#include <scsi/scsi.h>
  48#include <scsi/scsi_cmnd.h>
  49#include <scsi/scsi_device.h>
  50#include <scsi/scsi_host.h>
  51
  52#include "aacraid.h"
  53
  54/* values for inqd_pdt: Peripheral device type in plain English */
  55#define INQD_PDT_DA     0x00    /* Direct-access (DISK) device */
  56#define INQD_PDT_PROC   0x03    /* Processor device */
  57#define INQD_PDT_CHNGR  0x08    /* Changer (jukebox, scsi2) */
  58#define INQD_PDT_COMM   0x09    /* Communication device (scsi2) */
  59#define INQD_PDT_NOLUN2 0x1f    /* Unknown Device (scsi2) */
  60#define INQD_PDT_NOLUN  0x7f    /* Logical Unit Not Present */
  61
  62#define INQD_PDT_DMASK  0x1F    /* Peripheral Device Type Mask */
  63#define INQD_PDT_QMASK  0xE0    /* Peripheral Device Qualifer Mask */
  64
  65/*
  66 *      Sense codes
  67 */
  68
  69#define SENCODE_NO_SENSE                        0x00
  70#define SENCODE_END_OF_DATA                     0x00
  71#define SENCODE_BECOMING_READY                  0x04
  72#define SENCODE_INIT_CMD_REQUIRED               0x04
  73#define SENCODE_UNRECOVERED_READ_ERROR          0x11
  74#define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
  75#define SENCODE_INVALID_COMMAND                 0x20
  76#define SENCODE_LBA_OUT_OF_RANGE                0x21
  77#define SENCODE_INVALID_CDB_FIELD               0x24
  78#define SENCODE_LUN_NOT_SUPPORTED               0x25
  79#define SENCODE_INVALID_PARAM_FIELD             0x26
  80#define SENCODE_PARAM_NOT_SUPPORTED             0x26
  81#define SENCODE_PARAM_VALUE_INVALID             0x26
  82#define SENCODE_RESET_OCCURRED                  0x29
  83#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
  84#define SENCODE_INQUIRY_DATA_CHANGED            0x3F
  85#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
  86#define SENCODE_DIAGNOSTIC_FAILURE              0x40
  87#define SENCODE_INTERNAL_TARGET_FAILURE         0x44
  88#define SENCODE_INVALID_MESSAGE_ERROR           0x49
  89#define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
  90#define SENCODE_OVERLAPPED_COMMAND              0x4E
  91
  92/*
  93 *      Additional sense codes
  94 */
  95
  96#define ASENCODE_NO_SENSE                       0x00
  97#define ASENCODE_END_OF_DATA                    0x05
  98#define ASENCODE_BECOMING_READY                 0x01
  99#define ASENCODE_INIT_CMD_REQUIRED              0x02
 100#define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
 101#define ASENCODE_INVALID_COMMAND                0x00
 102#define ASENCODE_LBA_OUT_OF_RANGE               0x00
 103#define ASENCODE_INVALID_CDB_FIELD              0x00
 104#define ASENCODE_LUN_NOT_SUPPORTED              0x00
 105#define ASENCODE_INVALID_PARAM_FIELD            0x00
 106#define ASENCODE_PARAM_NOT_SUPPORTED            0x01
 107#define ASENCODE_PARAM_VALUE_INVALID            0x02
 108#define ASENCODE_RESET_OCCURRED                 0x00
 109#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
 110#define ASENCODE_INQUIRY_DATA_CHANGED           0x03
 111#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
 112#define ASENCODE_DIAGNOSTIC_FAILURE             0x80
 113#define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
 114#define ASENCODE_INVALID_MESSAGE_ERROR          0x00
 115#define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
 116#define ASENCODE_OVERLAPPED_COMMAND             0x00
 117
 118#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
 119
 120#define BYTE0(x) (unsigned char)(x)
 121#define BYTE1(x) (unsigned char)((x) >> 8)
 122#define BYTE2(x) (unsigned char)((x) >> 16)
 123#define BYTE3(x) (unsigned char)((x) >> 24)
 124
 125/* MODE_SENSE data format */
 126typedef struct {
 127        struct {
 128                u8      data_length;
 129                u8      med_type;
 130                u8      dev_par;
 131                u8      bd_length;
 132        } __attribute__((packed)) hd;
 133        struct {
 134                u8      dens_code;
 135                u8      block_count[3];
 136                u8      reserved;
 137                u8      block_length[3];
 138        } __attribute__((packed)) bd;
 139                u8      mpc_buf[3];
 140} __attribute__((packed)) aac_modep_data;
 141
 142/* MODE_SENSE_10 data format */
 143typedef struct {
 144        struct {
 145                u8      data_length[2];
 146                u8      med_type;
 147                u8      dev_par;
 148                u8      rsrvd[2];
 149                u8      bd_length[2];
 150        } __attribute__((packed)) hd;
 151        struct {
 152                u8      dens_code;
 153                u8      block_count[3];
 154                u8      reserved;
 155                u8      block_length[3];
 156        } __attribute__((packed)) bd;
 157                u8      mpc_buf[3];
 158} __attribute__((packed)) aac_modep10_data;
 159
 160/*------------------------------------------------------------------------------
 161 *              S T R U C T S / T Y P E D E F S
 162 *----------------------------------------------------------------------------*/
 163/* SCSI inquiry data */
 164struct inquiry_data {
 165        u8 inqd_pdt;    /* Peripheral qualifier | Peripheral Device Type */
 166        u8 inqd_dtq;    /* RMB | Device Type Qualifier */
 167        u8 inqd_ver;    /* ISO version | ECMA version | ANSI-approved version */
 168        u8 inqd_rdf;    /* AENC | TrmIOP | Response data format */
 169        u8 inqd_len;    /* Additional length (n-4) */
 170        u8 inqd_pad1[2];/* Reserved - must be zero */
 171        u8 inqd_pad2;   /* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
 172        u8 inqd_vid[8]; /* Vendor ID */
 173        u8 inqd_pid[16];/* Product ID */
 174        u8 inqd_prl[4]; /* Product Revision Level */
 175};
 176
 177/* Added for VPD 0x83 */
 178struct  tvpd_id_descriptor_type_1 {
 179        u8 codeset:4;           /* VPD_CODE_SET */
 180        u8 reserved:4;
 181        u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
 182        u8 reserved2:4;
 183        u8 reserved3;
 184        u8 identifierlength;
 185        u8 venid[8];
 186        u8 productid[16];
 187        u8 serialnumber[8];     /* SN in ASCII */
 188
 189};
 190
 191struct tvpd_id_descriptor_type_2 {
 192        u8 codeset:4;           /* VPD_CODE_SET */
 193        u8 reserved:4;
 194        u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
 195        u8 reserved2:4;
 196        u8 reserved3;
 197        u8 identifierlength;
 198        struct teu64id {
 199                u32 Serial;
 200                 /* The serial number supposed to be 40 bits,
 201                  * bit we only support 32, so make the last byte zero. */
 202                u8 reserved;
 203                u8 venid[3];
 204        } eu64id;
 205
 206};
 207
 208struct tvpd_id_descriptor_type_3 {
 209        u8 codeset : 4;          /* VPD_CODE_SET */
 210        u8 reserved : 4;
 211        u8 identifiertype : 4;   /* VPD_IDENTIFIER_TYPE */
 212        u8 reserved2 : 4;
 213        u8 reserved3;
 214        u8 identifierlength;
 215        u8 Identifier[16];
 216};
 217
 218struct tvpd_page83 {
 219        u8 DeviceType:5;
 220        u8 DeviceTypeQualifier:3;
 221        u8 PageCode;
 222        u8 reserved;
 223        u8 PageLength;
 224        struct tvpd_id_descriptor_type_1 type1;
 225        struct tvpd_id_descriptor_type_2 type2;
 226        struct tvpd_id_descriptor_type_3 type3;
 227};
 228
 229/*
 230 *              M O D U L E   G L O B A L S
 231 */
 232
 233static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
 234static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
 235static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
 236static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
 237                                struct aac_raw_io2 *rio2, int sg_max);
 238static long aac_build_sghba(struct scsi_cmnd *scsicmd,
 239                                struct aac_hba_cmd_req *hbacmd,
 240                                int sg_max, u64 sg_address);
 241static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
 242                                int pages, int nseg, int nseg_new);
 243static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
 244static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
 245#ifdef AAC_DETAILED_STATUS_INFO
 246static char *aac_get_status_string(u32 status);
 247#endif
 248
 249/*
 250 *      Non dasd selection is handled entirely in aachba now
 251 */
 252
 253static int nondasd = -1;
 254static int aac_cache = 2;       /* WCE=0 to avoid performance problems */
 255static int dacmode = -1;
 256int aac_msi;
 257int aac_commit = -1;
 258int startup_timeout = 180;
 259int aif_timeout = 120;
 260int aac_sync_mode;  /* Only Sync. transfer - disabled */
 261int aac_convert_sgl = 1;        /* convert non-conformable s/g list - enabled */
 262
 263module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
 264MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
 265        " 0=off, 1=on");
 266module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
 267MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
 268        " 0=off, 1=on");
 269module_param(nondasd, int, S_IRUGO|S_IWUSR);
 270MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
 271        " 0=off, 1=on");
 272module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
 273MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
 274        "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
 275        "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
 276        "\tbit 2 - Disable only if Battery is protecting Cache");
 277module_param(dacmode, int, S_IRUGO|S_IWUSR);
 278MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
 279        " 0=off, 1=on");
 280module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
 281MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
 282        " adapter for foreign arrays.\n"
 283        "This is typically needed in systems that do not have a BIOS."
 284        " 0=off, 1=on");
 285module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
 286MODULE_PARM_DESC(msi, "IRQ handling."
 287        " 0=PIC(default), 1=MSI, 2=MSI-X)");
 288module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
 289MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
 290        " adapter to have it's kernel up and\n"
 291        "running. This is typically adjusted for large systems that do not"
 292        " have a BIOS.");
 293module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
 294MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
 295        " applications to pick up AIFs before\n"
 296        "deregistering them. This is typically adjusted for heavily burdened"
 297        " systems.");
 298
 299int aac_fib_dump;
 300module_param(aac_fib_dump, int, 0644);
 301MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on");
 302
 303int numacb = -1;
 304module_param(numacb, int, S_IRUGO|S_IWUSR);
 305MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
 306        " blocks (FIB) allocated. Valid values are 512 and down. Default is"
 307        " to use suggestion from Firmware.");
 308
 309int acbsize = -1;
 310module_param(acbsize, int, S_IRUGO|S_IWUSR);
 311MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
 312        " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
 313        " suggestion from Firmware.");
 314
 315int update_interval = 30 * 60;
 316module_param(update_interval, int, S_IRUGO|S_IWUSR);
 317MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
 318        " updates issued to adapter.");
 319
 320int check_interval = 60;
 321module_param(check_interval, int, S_IRUGO|S_IWUSR);
 322MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
 323        " checks.");
 324
 325int aac_check_reset = 1;
 326module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
 327MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
 328        " adapter. a value of -1 forces the reset to adapters programmed to"
 329        " ignore it.");
 330
 331int expose_physicals = -1;
 332module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
 333MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
 334        " -1=protect 0=off, 1=on");
 335
 336int aac_reset_devices;
 337module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
 338MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
 339
 340int aac_wwn = 1;
 341module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
 342MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
 343        "\t0 - Disable\n"
 344        "\t1 - Array Meta Data Signature (default)\n"
 345        "\t2 - Adapter Serial Number");
 346
 347
 348static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
 349                struct fib *fibptr) {
 350        struct scsi_device *device;
 351
 352        if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
 353                dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
 354                aac_fib_complete(fibptr);
 355                return 0;
 356        }
 357        scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
 358        device = scsicmd->device;
 359        if (unlikely(!device)) {
 360                dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
 361                aac_fib_complete(fibptr);
 362                return 0;
 363        }
 364        return 1;
 365}
 366
 367/**
 368 *      aac_get_config_status   -       check the adapter configuration
 369 *      @common: adapter to query
 370 *
 371 *      Query config status, and commit the configuration if needed.
 372 */
 373int aac_get_config_status(struct aac_dev *dev, int commit_flag)
 374{
 375        int status = 0;
 376        struct fib * fibptr;
 377
 378        if (!(fibptr = aac_fib_alloc(dev)))
 379                return -ENOMEM;
 380
 381        aac_fib_init(fibptr);
 382        {
 383                struct aac_get_config_status *dinfo;
 384                dinfo = (struct aac_get_config_status *) fib_data(fibptr);
 385
 386                dinfo->command = cpu_to_le32(VM_ContainerConfig);
 387                dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
 388                dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
 389        }
 390
 391        status = aac_fib_send(ContainerCommand,
 392                            fibptr,
 393                            sizeof (struct aac_get_config_status),
 394                            FsaNormal,
 395                            1, 1,
 396                            NULL, NULL);
 397        if (status < 0) {
 398                printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
 399        } else {
 400                struct aac_get_config_status_resp *reply
 401                  = (struct aac_get_config_status_resp *) fib_data(fibptr);
 402                dprintk((KERN_WARNING
 403                  "aac_get_config_status: response=%d status=%d action=%d\n",
 404                  le32_to_cpu(reply->response),
 405                  le32_to_cpu(reply->status),
 406                  le32_to_cpu(reply->data.action)));
 407                if ((le32_to_cpu(reply->response) != ST_OK) ||
 408                     (le32_to_cpu(reply->status) != CT_OK) ||
 409                     (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
 410                        printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
 411                        status = -EINVAL;
 412                }
 413        }
 414        /* Do not set XferState to zero unless receives a response from F/W */
 415        if (status >= 0)
 416                aac_fib_complete(fibptr);
 417
 418        /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
 419        if (status >= 0) {
 420                if ((aac_commit == 1) || commit_flag) {
 421                        struct aac_commit_config * dinfo;
 422                        aac_fib_init(fibptr);
 423                        dinfo = (struct aac_commit_config *) fib_data(fibptr);
 424
 425                        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 426                        dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
 427
 428                        status = aac_fib_send(ContainerCommand,
 429                                    fibptr,
 430                                    sizeof (struct aac_commit_config),
 431                                    FsaNormal,
 432                                    1, 1,
 433                                    NULL, NULL);
 434                        /* Do not set XferState to zero unless
 435                         * receives a response from F/W */
 436                        if (status >= 0)
 437                                aac_fib_complete(fibptr);
 438                } else if (aac_commit == 0) {
 439                        printk(KERN_WARNING
 440                          "aac_get_config_status: Foreign device configurations are being ignored\n");
 441                }
 442        }
 443        /* FIB should be freed only after getting the response from the F/W */
 444        if (status != -ERESTARTSYS)
 445                aac_fib_free(fibptr);
 446        return status;
 447}
 448
 449static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
 450{
 451        char inq_data;
 452        scsi_sg_copy_to_buffer(scsicmd,  &inq_data, sizeof(inq_data));
 453        if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
 454                inq_data &= 0xdf;
 455                scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
 456        }
 457}
 458
 459/**
 460 *      aac_get_containers      -       list containers
 461 *      @common: adapter to probe
 462 *
 463 *      Make a list of all containers on this controller
 464 */
 465int aac_get_containers(struct aac_dev *dev)
 466{
 467        struct fsa_dev_info *fsa_dev_ptr;
 468        u32 index;
 469        int status = 0;
 470        struct fib * fibptr;
 471        struct aac_get_container_count *dinfo;
 472        struct aac_get_container_count_resp *dresp;
 473        int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
 474
 475        if (!(fibptr = aac_fib_alloc(dev)))
 476                return -ENOMEM;
 477
 478        aac_fib_init(fibptr);
 479        dinfo = (struct aac_get_container_count *) fib_data(fibptr);
 480        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 481        dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
 482
 483        status = aac_fib_send(ContainerCommand,
 484                    fibptr,
 485                    sizeof (struct aac_get_container_count),
 486                    FsaNormal,
 487                    1, 1,
 488                    NULL, NULL);
 489        if (status >= 0) {
 490                dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
 491                maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
 492                if (fibptr->dev->supplement_adapter_info.supported_options2 &
 493                    AAC_OPTION_SUPPORTED_240_VOLUMES) {
 494                        maximum_num_containers =
 495                                le32_to_cpu(dresp->MaxSimpleVolumes);
 496                }
 497                aac_fib_complete(fibptr);
 498        }
 499        /* FIB should be freed only after getting the response from the F/W */
 500        if (status != -ERESTARTSYS)
 501                aac_fib_free(fibptr);
 502
 503        if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
 504                maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
 505        if (dev->fsa_dev == NULL ||
 506                dev->maximum_num_containers != maximum_num_containers) {
 507
 508                fsa_dev_ptr = dev->fsa_dev;
 509
 510                dev->fsa_dev = kcalloc(maximum_num_containers,
 511                                        sizeof(*fsa_dev_ptr), GFP_KERNEL);
 512
 513                kfree(fsa_dev_ptr);
 514                fsa_dev_ptr = NULL;
 515
 516
 517                if (!dev->fsa_dev)
 518                        return -ENOMEM;
 519
 520                dev->maximum_num_containers = maximum_num_containers;
 521        }
 522        for (index = 0; index < dev->maximum_num_containers; index++) {
 523                dev->fsa_dev[index].devname[0] = '\0';
 524                dev->fsa_dev[index].valid = 0;
 525
 526                status = aac_probe_container(dev, index);
 527
 528                if (status < 0) {
 529                        printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
 530                        break;
 531                }
 532        }
 533        return status;
 534}
 535
 536static void get_container_name_callback(void *context, struct fib * fibptr)
 537{
 538        struct aac_get_name_resp * get_name_reply;
 539        struct scsi_cmnd * scsicmd;
 540
 541        scsicmd = (struct scsi_cmnd *) context;
 542
 543        if (!aac_valid_context(scsicmd, fibptr))
 544                return;
 545
 546        dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
 547        BUG_ON(fibptr == NULL);
 548
 549        get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
 550        /* Failure is irrelevant, using default value instead */
 551        if ((le32_to_cpu(get_name_reply->status) == CT_OK)
 552         && (get_name_reply->data[0] != '\0')) {
 553                char *sp = get_name_reply->data;
 554                int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
 555
 556                sp[data_size - 1] = '\0';
 557                while (*sp == ' ')
 558                        ++sp;
 559                if (*sp) {
 560                        struct inquiry_data inq;
 561                        char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
 562                        int count = sizeof(d);
 563                        char *dp = d;
 564                        do {
 565                                *dp++ = (*sp) ? *sp++ : ' ';
 566                        } while (--count > 0);
 567
 568                        scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
 569                        memcpy(inq.inqd_pid, d, sizeof(d));
 570                        scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
 571                }
 572        }
 573
 574        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 575
 576        aac_fib_complete(fibptr);
 577        scsicmd->scsi_done(scsicmd);
 578}
 579
 580/**
 581 *      aac_get_container_name  -       get container name, none blocking.
 582 */
 583static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 584{
 585        int status;
 586        int data_size;
 587        struct aac_get_name *dinfo;
 588        struct fib * cmd_fibcontext;
 589        struct aac_dev * dev;
 590
 591        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 592
 593        data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
 594
 595        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
 596
 597        aac_fib_init(cmd_fibcontext);
 598        dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
 599        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 600
 601        dinfo->command = cpu_to_le32(VM_ContainerConfig);
 602        dinfo->type = cpu_to_le32(CT_READ_NAME);
 603        dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
 604        dinfo->count = cpu_to_le32(data_size - 1);
 605
 606        status = aac_fib_send(ContainerCommand,
 607                  cmd_fibcontext,
 608                  sizeof(struct aac_get_name_resp),
 609                  FsaNormal,
 610                  0, 1,
 611                  (fib_callback)get_container_name_callback,
 612                  (void *) scsicmd);
 613
 614        /*
 615         *      Check that the command queued to the controller
 616         */
 617        if (status == -EINPROGRESS)
 618                return 0;
 619
 620        printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
 621        aac_fib_complete(cmd_fibcontext);
 622        return -1;
 623}
 624
 625static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
 626{
 627        struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
 628
 629        if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
 630                return aac_scsi_cmd(scsicmd);
 631
 632        scsicmd->result = DID_NO_CONNECT << 16;
 633        scsicmd->scsi_done(scsicmd);
 634        return 0;
 635}
 636
 637static void _aac_probe_container2(void * context, struct fib * fibptr)
 638{
 639        struct fsa_dev_info *fsa_dev_ptr;
 640        int (*callback)(struct scsi_cmnd *);
 641        struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
 642        int i;
 643
 644
 645        if (!aac_valid_context(scsicmd, fibptr))
 646                return;
 647
 648        scsicmd->SCp.Status = 0;
 649        fsa_dev_ptr = fibptr->dev->fsa_dev;
 650        if (fsa_dev_ptr) {
 651                struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
 652                __le32 sup_options2;
 653
 654                fsa_dev_ptr += scmd_id(scsicmd);
 655                sup_options2 =
 656                        fibptr->dev->supplement_adapter_info.supported_options2;
 657
 658                if ((le32_to_cpu(dresp->status) == ST_OK) &&
 659                    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
 660                    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
 661                        if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
 662                                dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
 663                                fsa_dev_ptr->block_size = 0x200;
 664                        } else {
 665                                fsa_dev_ptr->block_size =
 666                                        le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
 667                        }
 668                        for (i = 0; i < 16; i++)
 669                                fsa_dev_ptr->identifier[i] =
 670                                        dresp->mnt[0].fileinfo.bdevinfo
 671                                                                .identifier[i];
 672                        fsa_dev_ptr->valid = 1;
 673                        /* sense_key holds the current state of the spin-up */
 674                        if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
 675                                fsa_dev_ptr->sense_data.sense_key = NOT_READY;
 676                        else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
 677                                fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
 678                        fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
 679                        fsa_dev_ptr->size
 680                          = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
 681                            (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
 682                        fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
 683                }
 684                if ((fsa_dev_ptr->valid & 1) == 0)
 685                        fsa_dev_ptr->valid = 0;
 686                scsicmd->SCp.Status = le32_to_cpu(dresp->count);
 687        }
 688        aac_fib_complete(fibptr);
 689        aac_fib_free(fibptr);
 690        callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
 691        scsicmd->SCp.ptr = NULL;
 692        (*callback)(scsicmd);
 693        return;
 694}
 695
 696static void _aac_probe_container1(void * context, struct fib * fibptr)
 697{
 698        struct scsi_cmnd * scsicmd;
 699        struct aac_mount * dresp;
 700        struct aac_query_mount *dinfo;
 701        int status;
 702
 703        dresp = (struct aac_mount *) fib_data(fibptr);
 704        if (!aac_supports_2T(fibptr->dev)) {
 705                dresp->mnt[0].capacityhigh = 0;
 706                if ((le32_to_cpu(dresp->status) == ST_OK) &&
 707                        (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
 708                        _aac_probe_container2(context, fibptr);
 709                        return;
 710                }
 711        }
 712        scsicmd = (struct scsi_cmnd *) context;
 713
 714        if (!aac_valid_context(scsicmd, fibptr))
 715                return;
 716
 717        aac_fib_init(fibptr);
 718
 719        dinfo = (struct aac_query_mount *)fib_data(fibptr);
 720
 721        if (fibptr->dev->supplement_adapter_info.supported_options2 &
 722            AAC_OPTION_VARIABLE_BLOCK_SIZE)
 723                dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
 724        else
 725                dinfo->command = cpu_to_le32(VM_NameServe64);
 726
 727        dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 728        dinfo->type = cpu_to_le32(FT_FILESYS);
 729        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 730
 731        status = aac_fib_send(ContainerCommand,
 732                          fibptr,
 733                          sizeof(struct aac_query_mount),
 734                          FsaNormal,
 735                          0, 1,
 736                          _aac_probe_container2,
 737                          (void *) scsicmd);
 738        /*
 739         *      Check that the command queued to the controller
 740         */
 741        if (status < 0 && status != -EINPROGRESS) {
 742                /* Inherit results from VM_NameServe, if any */
 743                dresp->status = cpu_to_le32(ST_OK);
 744                _aac_probe_container2(context, fibptr);
 745        }
 746}
 747
 748static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
 749{
 750        struct fib * fibptr;
 751        int status = -ENOMEM;
 752
 753        if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
 754                struct aac_query_mount *dinfo;
 755
 756                aac_fib_init(fibptr);
 757
 758                dinfo = (struct aac_query_mount *)fib_data(fibptr);
 759
 760                if (fibptr->dev->supplement_adapter_info.supported_options2 &
 761                    AAC_OPTION_VARIABLE_BLOCK_SIZE)
 762                        dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
 763                else
 764                        dinfo->command = cpu_to_le32(VM_NameServe);
 765
 766                dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 767                dinfo->type = cpu_to_le32(FT_FILESYS);
 768                scsicmd->SCp.ptr = (char *)callback;
 769                scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 770
 771                status = aac_fib_send(ContainerCommand,
 772                          fibptr,
 773                          sizeof(struct aac_query_mount),
 774                          FsaNormal,
 775                          0, 1,
 776                          _aac_probe_container1,
 777                          (void *) scsicmd);
 778                /*
 779                 *      Check that the command queued to the controller
 780                 */
 781                if (status == -EINPROGRESS)
 782                        return 0;
 783
 784                if (status < 0) {
 785                        scsicmd->SCp.ptr = NULL;
 786                        aac_fib_complete(fibptr);
 787                        aac_fib_free(fibptr);
 788                }
 789        }
 790        if (status < 0) {
 791                struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
 792                if (fsa_dev_ptr) {
 793                        fsa_dev_ptr += scmd_id(scsicmd);
 794                        if ((fsa_dev_ptr->valid & 1) == 0) {
 795                                fsa_dev_ptr->valid = 0;
 796                                return (*callback)(scsicmd);
 797                        }
 798                }
 799        }
 800        return status;
 801}
 802
 803/**
 804 *      aac_probe_container             -       query a logical volume
 805 *      @dev: device to query
 806 *      @cid: container identifier
 807 *
 808 *      Queries the controller about the given volume. The volume information
 809 *      is updated in the struct fsa_dev_info structure rather than returned.
 810 */
 811static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
 812{
 813        scsicmd->device = NULL;
 814        return 0;
 815}
 816
 817int aac_probe_container(struct aac_dev *dev, int cid)
 818{
 819        struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
 820        struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
 821        int status;
 822
 823        if (!scsicmd || !scsidev) {
 824                kfree(scsicmd);
 825                kfree(scsidev);
 826                return -ENOMEM;
 827        }
 828        scsicmd->list.next = NULL;
 829        scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
 830
 831        scsicmd->device = scsidev;
 832        scsidev->sdev_state = 0;
 833        scsidev->id = cid;
 834        scsidev->host = dev->scsi_host_ptr;
 835
 836        if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
 837                while (scsicmd->device == scsidev)
 838                        schedule();
 839        kfree(scsidev);
 840        status = scsicmd->SCp.Status;
 841        kfree(scsicmd);
 842        return status;
 843}
 844
 845/* Local Structure to set SCSI inquiry data strings */
 846struct scsi_inq {
 847        char vid[8];         /* Vendor ID */
 848        char pid[16];        /* Product ID */
 849        char prl[4];         /* Product Revision Level */
 850};
 851
 852/**
 853 *      InqStrCopy      -       string merge
 854 *      @a:     string to copy from
 855 *      @b:     string to copy to
 856 *
 857 *      Copy a String from one location to another
 858 *      without copying \0
 859 */
 860
 861static void inqstrcpy(char *a, char *b)
 862{
 863
 864        while (*a != (char)0)
 865                *b++ = *a++;
 866}
 867
 868static char *container_types[] = {
 869        "None",
 870        "Volume",
 871        "Mirror",
 872        "Stripe",
 873        "RAID5",
 874        "SSRW",
 875        "SSRO",
 876        "Morph",
 877        "Legacy",
 878        "RAID4",
 879        "RAID10",
 880        "RAID00",
 881        "V-MIRRORS",
 882        "PSEUDO R4",
 883        "RAID50",
 884        "RAID5D",
 885        "RAID5D0",
 886        "RAID1E",
 887        "RAID6",
 888        "RAID60",
 889        "Unknown"
 890};
 891
 892char * get_container_type(unsigned tindex)
 893{
 894        if (tindex >= ARRAY_SIZE(container_types))
 895                tindex = ARRAY_SIZE(container_types) - 1;
 896        return container_types[tindex];
 897}
 898
 899/* Function: setinqstr
 900 *
 901 * Arguments: [1] pointer to void [1] int
 902 *
 903 * Purpose: Sets SCSI inquiry data strings for vendor, product
 904 * and revision level. Allows strings to be set in platform dependent
 905 * files instead of in OS dependent driver source.
 906 */
 907
 908static void setinqstr(struct aac_dev *dev, void *data, int tindex)
 909{
 910        struct scsi_inq *str;
 911        struct aac_supplement_adapter_info *sup_adap_info;
 912
 913        sup_adap_info = &dev->supplement_adapter_info;
 914        str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
 915        memset(str, ' ', sizeof(*str));
 916
 917        if (sup_adap_info->adapter_type_text[0]) {
 918                int c;
 919                char *cp;
 920                char *cname = kmemdup(sup_adap_info->adapter_type_text,
 921                                sizeof(sup_adap_info->adapter_type_text),
 922                                                                GFP_ATOMIC);
 923                if (!cname)
 924                        return;
 925
 926                cp = cname;
 927                if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
 928                        inqstrcpy("SMC", str->vid);
 929                else {
 930                        c = sizeof(str->vid);
 931                        while (*cp && *cp != ' ' && --c)
 932                                ++cp;
 933                        c = *cp;
 934                        *cp = '\0';
 935                        inqstrcpy(cname, str->vid);
 936                        *cp = c;
 937                        while (*cp && *cp != ' ')
 938                                ++cp;
 939                }
 940                while (*cp == ' ')
 941                        ++cp;
 942                /* last six chars reserved for vol type */
 943                if (strlen(cp) > sizeof(str->pid))
 944                        cp[sizeof(str->pid)] = '\0';
 945                inqstrcpy (cp, str->pid);
 946
 947                kfree(cname);
 948        } else {
 949                struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
 950
 951                inqstrcpy (mp->vname, str->vid);
 952                /* last six chars reserved for vol type */
 953                inqstrcpy (mp->model, str->pid);
 954        }
 955
 956        if (tindex < ARRAY_SIZE(container_types)){
 957                char *findit = str->pid;
 958
 959                for ( ; *findit != ' '; findit++); /* walk till we find a space */
 960                /* RAID is superfluous in the context of a RAID device */
 961                if (memcmp(findit-4, "RAID", 4) == 0)
 962                        *(findit -= 4) = ' ';
 963                if (((findit - str->pid) + strlen(container_types[tindex]))
 964                 < (sizeof(str->pid) + sizeof(str->prl)))
 965                        inqstrcpy (container_types[tindex], findit + 1);
 966        }
 967        inqstrcpy ("V1.0", str->prl);
 968}
 969
 970static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
 971                struct aac_dev *dev, struct scsi_cmnd *scsicmd)
 972{
 973        int container;
 974
 975        vpdpage83data->type3.codeset = 1;
 976        vpdpage83data->type3.identifiertype = 3;
 977        vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
 978                        - 4;
 979
 980        for (container = 0; container < dev->maximum_num_containers;
 981                        container++) {
 982
 983                if (scmd_id(scsicmd) == container) {
 984                        memcpy(vpdpage83data->type3.Identifier,
 985                                        dev->fsa_dev[container].identifier,
 986                                        16);
 987                        break;
 988                }
 989        }
 990}
 991
 992static void get_container_serial_callback(void *context, struct fib * fibptr)
 993{
 994        struct aac_get_serial_resp * get_serial_reply;
 995        struct scsi_cmnd * scsicmd;
 996
 997        BUG_ON(fibptr == NULL);
 998
 999        scsicmd = (struct scsi_cmnd *) context;
1000        if (!aac_valid_context(scsicmd, fibptr))
1001                return;
1002
1003        get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
1004        /* Failure is irrelevant, using default value instead */
1005        if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
1006                /*Check to see if it's for VPD 0x83 or 0x80 */
1007                if (scsicmd->cmnd[2] == 0x83) {
1008                        /* vpd page 0x83 - Device Identification Page */
1009                        struct aac_dev *dev;
1010                        int i;
1011                        struct tvpd_page83 vpdpage83data;
1012
1013                        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1014
1015                        memset(((u8 *)&vpdpage83data), 0,
1016                               sizeof(vpdpage83data));
1017
1018                        /* DIRECT_ACCESS_DEVIC */
1019                        vpdpage83data.DeviceType = 0;
1020                        /* DEVICE_CONNECTED */
1021                        vpdpage83data.DeviceTypeQualifier = 0;
1022                        /* VPD_DEVICE_IDENTIFIERS */
1023                        vpdpage83data.PageCode = 0x83;
1024                        vpdpage83data.reserved = 0;
1025                        vpdpage83data.PageLength =
1026                                sizeof(vpdpage83data.type1) +
1027                                sizeof(vpdpage83data.type2);
1028
1029                        /* VPD 83 Type 3 is not supported for ARC */
1030                        if (dev->sa_firmware)
1031                                vpdpage83data.PageLength +=
1032                                sizeof(vpdpage83data.type3);
1033
1034                        /* T10 Vendor Identifier Field Format */
1035                        /* VpdcodesetAscii */
1036                        vpdpage83data.type1.codeset = 2;
1037                        /* VpdIdentifierTypeVendorId */
1038                        vpdpage83data.type1.identifiertype = 1;
1039                        vpdpage83data.type1.identifierlength =
1040                                sizeof(vpdpage83data.type1) - 4;
1041
1042                        /* "ADAPTEC " for adaptec */
1043                        memcpy(vpdpage83data.type1.venid,
1044                                "ADAPTEC ",
1045                                sizeof(vpdpage83data.type1.venid));
1046                        memcpy(vpdpage83data.type1.productid,
1047                                "ARRAY           ",
1048                                sizeof(
1049                                vpdpage83data.type1.productid));
1050
1051                        /* Convert to ascii based serial number.
1052                         * The LSB is the the end.
1053                         */
1054                        for (i = 0; i < 8; i++) {
1055                                u8 temp =
1056                                        (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
1057                                if (temp  > 0x9) {
1058                                        vpdpage83data.type1.serialnumber[i] =
1059                                                        'A' + (temp - 0xA);
1060                                } else {
1061                                        vpdpage83data.type1.serialnumber[i] =
1062                                                        '0' + temp;
1063                                }
1064                        }
1065
1066                        /* VpdCodeSetBinary */
1067                        vpdpage83data.type2.codeset = 1;
1068                        /* VpdidentifiertypeEUI64 */
1069                        vpdpage83data.type2.identifiertype = 2;
1070                        vpdpage83data.type2.identifierlength =
1071                                sizeof(vpdpage83data.type2) - 4;
1072
1073                        vpdpage83data.type2.eu64id.venid[0] = 0xD0;
1074                        vpdpage83data.type2.eu64id.venid[1] = 0;
1075                        vpdpage83data.type2.eu64id.venid[2] = 0;
1076
1077                        vpdpage83data.type2.eu64id.Serial =
1078                                                        get_serial_reply->uid;
1079                        vpdpage83data.type2.eu64id.reserved = 0;
1080
1081                        /*
1082                         * VpdIdentifierTypeFCPHName
1083                         * VPD 0x83 Type 3 not supported for ARC
1084                         */
1085                        if (dev->sa_firmware) {
1086                                build_vpd83_type3(&vpdpage83data,
1087                                                dev, scsicmd);
1088                        }
1089
1090                        /* Move the inquiry data to the response buffer. */
1091                        scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
1092                                                 sizeof(vpdpage83data));
1093                } else {
1094                        /* It must be for VPD 0x80 */
1095                        char sp[13];
1096                        /* EVPD bit set */
1097                        sp[0] = INQD_PDT_DA;
1098                        sp[1] = scsicmd->cmnd[2];
1099                        sp[2] = 0;
1100                        sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
1101                                le32_to_cpu(get_serial_reply->uid));
1102                        scsi_sg_copy_from_buffer(scsicmd, sp,
1103                                                 sizeof(sp));
1104                }
1105        }
1106
1107        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1108
1109        aac_fib_complete(fibptr);
1110        scsicmd->scsi_done(scsicmd);
1111}
1112
1113/**
1114 *      aac_get_container_serial - get container serial, none blocking.
1115 */
1116static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
1117{
1118        int status;
1119        struct aac_get_serial *dinfo;
1120        struct fib * cmd_fibcontext;
1121        struct aac_dev * dev;
1122
1123        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1124
1125        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
1126
1127        aac_fib_init(cmd_fibcontext);
1128        dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
1129
1130        dinfo->command = cpu_to_le32(VM_ContainerConfig);
1131        dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
1132        dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
1133        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1134
1135        status = aac_fib_send(ContainerCommand,
1136                  cmd_fibcontext,
1137                  sizeof(struct aac_get_serial_resp),
1138                  FsaNormal,
1139                  0, 1,
1140                  (fib_callback) get_container_serial_callback,
1141                  (void *) scsicmd);
1142
1143        /*
1144         *      Check that the command queued to the controller
1145         */
1146        if (status == -EINPROGRESS)
1147                return 0;
1148
1149        printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
1150        aac_fib_complete(cmd_fibcontext);
1151        return -1;
1152}
1153
1154/* Function: setinqserial
1155 *
1156 * Arguments: [1] pointer to void [1] int
1157 *
1158 * Purpose: Sets SCSI Unit Serial number.
1159 *          This is a fake. We should read a proper
1160 *          serial number from the container. <SuSE>But
1161 *          without docs it's quite hard to do it :-)
1162 *          So this will have to do in the meantime.</SuSE>
1163 */
1164
1165static int setinqserial(struct aac_dev *dev, void *data, int cid)
1166{
1167        /*
1168         *      This breaks array migration.
1169         */
1170        return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
1171                        le32_to_cpu(dev->adapter_info.serial[0]), cid);
1172}
1173
1174static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
1175        u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
1176{
1177        u8 *sense_buf = (u8 *)sense_data;
1178        /* Sense data valid, err code 70h */
1179        sense_buf[0] = 0x70; /* No info field */
1180        sense_buf[1] = 0;       /* Segment number, always zero */
1181
1182        sense_buf[2] = sense_key;       /* Sense key */
1183
1184        sense_buf[12] = sense_code;     /* Additional sense code */
1185        sense_buf[13] = a_sense_code;   /* Additional sense code qualifier */
1186
1187        if (sense_key == ILLEGAL_REQUEST) {
1188                sense_buf[7] = 10;      /* Additional sense length */
1189
1190                sense_buf[15] = bit_pointer;
1191                /* Illegal parameter is in the parameter block */
1192                if (sense_code == SENCODE_INVALID_CDB_FIELD)
1193                        sense_buf[15] |= 0xc0;/* Std sense key specific field */
1194                /* Illegal parameter is in the CDB block */
1195                sense_buf[16] = field_pointer >> 8;     /* MSB */
1196                sense_buf[17] = field_pointer;          /* LSB */
1197        } else
1198                sense_buf[7] = 6;       /* Additional sense length */
1199}
1200
1201static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1202{
1203        if (lba & 0xffffffff00000000LL) {
1204                int cid = scmd_id(cmd);
1205                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
1206                cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1207                        SAM_STAT_CHECK_CONDITION;
1208                set_sense(&dev->fsa_dev[cid].sense_data,
1209                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1210                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1211                memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1212                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1213                             SCSI_SENSE_BUFFERSIZE));
1214                cmd->scsi_done(cmd);
1215                return 1;
1216        }
1217        return 0;
1218}
1219
1220static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1221{
1222        return 0;
1223}
1224
1225static void io_callback(void *context, struct fib * fibptr);
1226
1227static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1228{
1229        struct aac_dev *dev = fib->dev;
1230        u16 fibsize, command;
1231        long ret;
1232
1233        aac_fib_init(fib);
1234        if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1235                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1236                !dev->sync_mode) {
1237                struct aac_raw_io2 *readcmd2;
1238                readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
1239                memset(readcmd2, 0, sizeof(struct aac_raw_io2));
1240                readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1241                readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1242                readcmd2->byteCount = cpu_to_le32(count *
1243                        dev->fsa_dev[scmd_id(cmd)].block_size);
1244                readcmd2->cid = cpu_to_le16(scmd_id(cmd));
1245                readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
1246                ret = aac_build_sgraw2(cmd, readcmd2,
1247                                dev->scsi_host_ptr->sg_tablesize);
1248                if (ret < 0)
1249                        return ret;
1250                command = ContainerRawIo2;
1251                fibsize = sizeof(struct aac_raw_io2) +
1252                        ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1253        } else {
1254                struct aac_raw_io *readcmd;
1255                readcmd = (struct aac_raw_io *) fib_data(fib);
1256                readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1257                readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1258                readcmd->count = cpu_to_le32(count *
1259                        dev->fsa_dev[scmd_id(cmd)].block_size);
1260                readcmd->cid = cpu_to_le16(scmd_id(cmd));
1261                readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
1262                readcmd->bpTotal = 0;
1263                readcmd->bpComplete = 0;
1264                ret = aac_build_sgraw(cmd, &readcmd->sg);
1265                if (ret < 0)
1266                        return ret;
1267                command = ContainerRawIo;
1268                fibsize = sizeof(struct aac_raw_io) +
1269                        ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
1270        }
1271
1272        BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1273        /*
1274         *      Now send the Fib to the adapter
1275         */
1276        return aac_fib_send(command,
1277                          fib,
1278                          fibsize,
1279                          FsaNormal,
1280                          0, 1,
1281                          (fib_callback) io_callback,
1282                          (void *) cmd);
1283}
1284
1285static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1286{
1287        u16 fibsize;
1288        struct aac_read64 *readcmd;
1289        long ret;
1290
1291        aac_fib_init(fib);
1292        readcmd = (struct aac_read64 *) fib_data(fib);
1293        readcmd->command = cpu_to_le32(VM_CtHostRead64);
1294        readcmd->cid = cpu_to_le16(scmd_id(cmd));
1295        readcmd->sector_count = cpu_to_le16(count);
1296        readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1297        readcmd->pad   = 0;
1298        readcmd->flags = 0;
1299
1300        ret = aac_build_sg64(cmd, &readcmd->sg);
1301        if (ret < 0)
1302                return ret;
1303        fibsize = sizeof(struct aac_read64) +
1304                ((le32_to_cpu(readcmd->sg.count) - 1) *
1305                 sizeof (struct sgentry64));
1306        BUG_ON (fibsize > (fib->dev->max_fib_size -
1307                                sizeof(struct aac_fibhdr)));
1308        /*
1309         *      Now send the Fib to the adapter
1310         */
1311        return aac_fib_send(ContainerCommand64,
1312                          fib,
1313                          fibsize,
1314                          FsaNormal,
1315                          0, 1,
1316                          (fib_callback) io_callback,
1317                          (void *) cmd);
1318}
1319
1320static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1321{
1322        u16 fibsize;
1323        struct aac_read *readcmd;
1324        struct aac_dev *dev = fib->dev;
1325        long ret;
1326
1327        aac_fib_init(fib);
1328        readcmd = (struct aac_read *) fib_data(fib);
1329        readcmd->command = cpu_to_le32(VM_CtBlockRead);
1330        readcmd->cid = cpu_to_le32(scmd_id(cmd));
1331        readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1332        readcmd->count = cpu_to_le32(count *
1333                dev->fsa_dev[scmd_id(cmd)].block_size);
1334
1335        ret = aac_build_sg(cmd, &readcmd->sg);
1336        if (ret < 0)
1337                return ret;
1338        fibsize = sizeof(struct aac_read) +
1339                        ((le32_to_cpu(readcmd->sg.count) - 1) *
1340                         sizeof (struct sgentry));
1341        BUG_ON (fibsize > (fib->dev->max_fib_size -
1342                                sizeof(struct aac_fibhdr)));
1343        /*
1344         *      Now send the Fib to the adapter
1345         */
1346        return aac_fib_send(ContainerCommand,
1347                          fib,
1348                          fibsize,
1349                          FsaNormal,
1350                          0, 1,
1351                          (fib_callback) io_callback,
1352                          (void *) cmd);
1353}
1354
1355static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1356{
1357        struct aac_dev *dev = fib->dev;
1358        u16 fibsize, command;
1359        long ret;
1360
1361        aac_fib_init(fib);
1362        if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1363                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1364                !dev->sync_mode) {
1365                struct aac_raw_io2 *writecmd2;
1366                writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
1367                memset(writecmd2, 0, sizeof(struct aac_raw_io2));
1368                writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1369                writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1370                writecmd2->byteCount = cpu_to_le32(count *
1371                        dev->fsa_dev[scmd_id(cmd)].block_size);
1372                writecmd2->cid = cpu_to_le16(scmd_id(cmd));
1373                writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
1374                                                   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1375                        cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
1376                        cpu_to_le16(RIO2_IO_TYPE_WRITE);
1377                ret = aac_build_sgraw2(cmd, writecmd2,
1378                                dev->scsi_host_ptr->sg_tablesize);
1379                if (ret < 0)
1380                        return ret;
1381                command = ContainerRawIo2;
1382                fibsize = sizeof(struct aac_raw_io2) +
1383                        ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1384        } else {
1385                struct aac_raw_io *writecmd;
1386                writecmd = (struct aac_raw_io *) fib_data(fib);
1387                writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1388                writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1389                writecmd->count = cpu_to_le32(count *
1390                        dev->fsa_dev[scmd_id(cmd)].block_size);
1391                writecmd->cid = cpu_to_le16(scmd_id(cmd));
1392                writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
1393                                                   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1394                        cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
1395                        cpu_to_le16(RIO_TYPE_WRITE);
1396                writecmd->bpTotal = 0;
1397                writecmd->bpComplete = 0;
1398                ret = aac_build_sgraw(cmd, &writecmd->sg);
1399                if (ret < 0)
1400                        return ret;
1401                command = ContainerRawIo;
1402                fibsize = sizeof(struct aac_raw_io) +
1403                        ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
1404        }
1405
1406        BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1407        /*
1408         *      Now send the Fib to the adapter
1409         */
1410        return aac_fib_send(command,
1411                          fib,
1412                          fibsize,
1413                          FsaNormal,
1414                          0, 1,
1415                          (fib_callback) io_callback,
1416                          (void *) cmd);
1417}
1418
1419static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1420{
1421        u16 fibsize;
1422        struct aac_write64 *writecmd;
1423        long ret;
1424
1425        aac_fib_init(fib);
1426        writecmd = (struct aac_write64 *) fib_data(fib);
1427        writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1428        writecmd->cid = cpu_to_le16(scmd_id(cmd));
1429        writecmd->sector_count = cpu_to_le16(count);
1430        writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1431        writecmd->pad   = 0;
1432        writecmd->flags = 0;
1433
1434        ret = aac_build_sg64(cmd, &writecmd->sg);
1435        if (ret < 0)
1436                return ret;
1437        fibsize = sizeof(struct aac_write64) +
1438                ((le32_to_cpu(writecmd->sg.count) - 1) *
1439                 sizeof (struct sgentry64));
1440        BUG_ON (fibsize > (fib->dev->max_fib_size -
1441                                sizeof(struct aac_fibhdr)));
1442        /*
1443         *      Now send the Fib to the adapter
1444         */
1445        return aac_fib_send(ContainerCommand64,
1446                          fib,
1447                          fibsize,
1448                          FsaNormal,
1449                          0, 1,
1450                          (fib_callback) io_callback,
1451                          (void *) cmd);
1452}
1453
1454static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1455{
1456        u16 fibsize;
1457        struct aac_write *writecmd;
1458        struct aac_dev *dev = fib->dev;
1459        long ret;
1460
1461        aac_fib_init(fib);
1462        writecmd = (struct aac_write *) fib_data(fib);
1463        writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1464        writecmd->cid = cpu_to_le32(scmd_id(cmd));
1465        writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1466        writecmd->count = cpu_to_le32(count *
1467                dev->fsa_dev[scmd_id(cmd)].block_size);
1468        writecmd->sg.count = cpu_to_le32(1);
1469        /* ->stable is not used - it did mean which type of write */
1470
1471        ret = aac_build_sg(cmd, &writecmd->sg);
1472        if (ret < 0)
1473                return ret;
1474        fibsize = sizeof(struct aac_write) +
1475                ((le32_to_cpu(writecmd->sg.count) - 1) *
1476                 sizeof (struct sgentry));
1477        BUG_ON (fibsize > (fib->dev->max_fib_size -
1478                                sizeof(struct aac_fibhdr)));
1479        /*
1480         *      Now send the Fib to the adapter
1481         */
1482        return aac_fib_send(ContainerCommand,
1483                          fib,
1484                          fibsize,
1485                          FsaNormal,
1486                          0, 1,
1487                          (fib_callback) io_callback,
1488                          (void *) cmd);
1489}
1490
1491static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
1492{
1493        struct aac_srb * srbcmd;
1494        u32 flag;
1495        u32 timeout;
1496
1497        aac_fib_init(fib);
1498        switch(cmd->sc_data_direction){
1499        case DMA_TO_DEVICE:
1500                flag = SRB_DataOut;
1501                break;
1502        case DMA_BIDIRECTIONAL:
1503                flag = SRB_DataIn | SRB_DataOut;
1504                break;
1505        case DMA_FROM_DEVICE:
1506                flag = SRB_DataIn;
1507                break;
1508        case DMA_NONE:
1509        default:        /* shuts up some versions of gcc */
1510                flag = SRB_NoDataXfer;
1511                break;
1512        }
1513
1514        srbcmd = (struct aac_srb*) fib_data(fib);
1515        srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1516        srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
1517        srbcmd->id       = cpu_to_le32(scmd_id(cmd));
1518        srbcmd->lun      = cpu_to_le32(cmd->device->lun);
1519        srbcmd->flags    = cpu_to_le32(flag);
1520        timeout = cmd->request->timeout/HZ;
1521        if (timeout == 0)
1522                timeout = 1;
1523        srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
1524        srbcmd->retry_limit = 0; /* Obsolete parameter */
1525        srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
1526        return srbcmd;
1527}
1528
1529static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
1530                                                        struct scsi_cmnd *cmd)
1531{
1532        struct aac_hba_cmd_req *hbacmd;
1533        struct aac_dev *dev;
1534        int bus, target;
1535        u64 address;
1536
1537        dev = (struct aac_dev *)cmd->device->host->hostdata;
1538
1539        hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
1540        memset(hbacmd, 0, 96);  /* sizeof(*hbacmd) is not necessary */
1541        /* iu_type is a parameter of aac_hba_send */
1542        switch (cmd->sc_data_direction) {
1543        case DMA_TO_DEVICE:
1544                hbacmd->byte1 = 2;
1545                break;
1546        case DMA_FROM_DEVICE:
1547        case DMA_BIDIRECTIONAL:
1548                hbacmd->byte1 = 1;
1549                break;
1550        case DMA_NONE:
1551        default:
1552                break;
1553        }
1554        hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
1555
1556        bus = aac_logical_to_phys(scmd_channel(cmd));
1557        target = scmd_id(cmd);
1558        hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
1559
1560        /* we fill in reply_qid later in aac_src_deliver_message */
1561        /* we fill in iu_type, request_id later in aac_hba_send */
1562        /* we fill in emb_data_desc_count later in aac_build_sghba */
1563
1564        memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
1565        hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
1566
1567        address = (u64)fib->hw_error_pa;
1568        hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
1569        hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
1570        hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
1571
1572        return hbacmd;
1573}
1574
1575static void aac_srb_callback(void *context, struct fib * fibptr);
1576
1577static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
1578{
1579        u16 fibsize;
1580        struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1581        long ret;
1582
1583        ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
1584        if (ret < 0)
1585                return ret;
1586        srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1587
1588        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1589        memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1590        /*
1591         *      Build Scatter/Gather list
1592         */
1593        fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
1594                ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
1595                 sizeof (struct sgentry64));
1596        BUG_ON (fibsize > (fib->dev->max_fib_size -
1597                                sizeof(struct aac_fibhdr)));
1598
1599        /*
1600         *      Now send the Fib to the adapter
1601         */
1602        return aac_fib_send(ScsiPortCommand64, fib,
1603                                fibsize, FsaNormal, 0, 1,
1604                                  (fib_callback) aac_srb_callback,
1605                                  (void *) cmd);
1606}
1607
1608static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
1609{
1610        u16 fibsize;
1611        struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1612        long ret;
1613
1614        ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
1615        if (ret < 0)
1616                return ret;
1617        srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1618
1619        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1620        memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1621        /*
1622         *      Build Scatter/Gather list
1623         */
1624        fibsize = sizeof (struct aac_srb) +
1625                (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
1626                 sizeof (struct sgentry));
1627        BUG_ON (fibsize > (fib->dev->max_fib_size -
1628                                sizeof(struct aac_fibhdr)));
1629
1630        /*
1631         *      Now send the Fib to the adapter
1632         */
1633        return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
1634                                  (fib_callback) aac_srb_callback, (void *) cmd);
1635}
1636
1637static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
1638{
1639        if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
1640            (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
1641                return FAILED;
1642        return aac_scsi_32(fib, cmd);
1643}
1644
1645static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
1646{
1647        struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
1648        struct aac_dev *dev;
1649        long ret;
1650
1651        dev = (struct aac_dev *)cmd->device->host->hostdata;
1652
1653        ret = aac_build_sghba(cmd, hbacmd,
1654                dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
1655        if (ret < 0)
1656                return ret;
1657
1658        /*
1659         *      Now send the HBA command to the adapter
1660         */
1661        fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
1662                sizeof(struct aac_hba_sgl);
1663
1664        return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
1665                                  (fib_callback) aac_hba_callback,
1666                                  (void *) cmd);
1667}
1668
1669static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
1670        struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
1671{
1672        struct fib      *fibptr;
1673        dma_addr_t      addr;
1674        int             rcode;
1675        int             fibsize;
1676        struct aac_srb  *srb;
1677        struct aac_srb_reply *srb_reply;
1678        struct sgmap64  *sg64;
1679        u32 vbus;
1680        u32 vid;
1681
1682        if (!dev->sa_firmware)
1683                return 0;
1684
1685        /* allocate FIB */
1686        fibptr = aac_fib_alloc(dev);
1687        if (!fibptr)
1688                return -ENOMEM;
1689
1690        aac_fib_init(fibptr);
1691        fibptr->hw_fib_va->header.XferState &=
1692                ~cpu_to_le32(FastResponseCapable);
1693
1694        fibsize  = sizeof(struct aac_srb) - sizeof(struct sgentry) +
1695                                                sizeof(struct sgentry64);
1696
1697        /* allocate DMA buffer for response */
1698        addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
1699                                                        DMA_BIDIRECTIONAL);
1700        if (dma_mapping_error(&dev->pdev->dev, addr)) {
1701                rcode = -ENOMEM;
1702                goto fib_error;
1703        }
1704
1705        srb = fib_data(fibptr);
1706        memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
1707
1708        vbus = (u32)le16_to_cpu(
1709                        dev->supplement_adapter_info.virt_device_bus);
1710        vid  = (u32)le16_to_cpu(
1711                        dev->supplement_adapter_info.virt_device_target);
1712
1713        /* set the common request fields */
1714        srb->channel            = cpu_to_le32(vbus);
1715        srb->id                 = cpu_to_le32(vid);
1716        srb->lun                = 0;
1717        srb->function           = cpu_to_le32(SRBF_ExecuteScsi);
1718        srb->timeout            = 0;
1719        srb->retry_limit        = 0;
1720        srb->cdb_size           = cpu_to_le32(16);
1721        srb->count              = cpu_to_le32(xfer_len);
1722
1723        sg64 = (struct sgmap64 *)&srb->sg;
1724        sg64->count             = cpu_to_le32(1);
1725        sg64->sg[0].addr[1]     = cpu_to_le32(upper_32_bits(addr));
1726        sg64->sg[0].addr[0]     = cpu_to_le32(lower_32_bits(addr));
1727        sg64->sg[0].count       = cpu_to_le32(xfer_len);
1728
1729        /*
1730         * Copy the updated data for other dumping or other usage if needed
1731         */
1732        memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
1733
1734        /* issue request to the controller */
1735        rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
1736                                        1, 1, NULL, NULL);
1737
1738        if (rcode == -ERESTARTSYS)
1739                rcode = -ERESTART;
1740
1741        if (unlikely(rcode < 0))
1742                goto bmic_error;
1743
1744        srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
1745        memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
1746
1747bmic_error:
1748        dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
1749fib_error:
1750        aac_fib_complete(fibptr);
1751        aac_fib_free(fibptr);
1752        return rcode;
1753}
1754
1755static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
1756{
1757
1758        struct aac_ciss_identify_pd *identify_resp;
1759
1760        if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
1761                return;
1762
1763        identify_resp = dev->hba_map[bus][target].safw_identify_resp;
1764        if (identify_resp == NULL) {
1765                dev->hba_map[bus][target].qd_limit = 32;
1766                return;
1767        }
1768
1769        if (identify_resp->current_queue_depth_limit <= 0 ||
1770                identify_resp->current_queue_depth_limit > 255)
1771                dev->hba_map[bus][target].qd_limit = 32;
1772        else
1773                dev->hba_map[bus][target].qd_limit =
1774                        identify_resp->current_queue_depth_limit;
1775}
1776
1777static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
1778        struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
1779{
1780        int rcode = -ENOMEM;
1781        int datasize;
1782        struct aac_srb_unit srbu;
1783        struct aac_srb *srbcmd;
1784        struct aac_ciss_identify_pd *identify_reply;
1785
1786        datasize = sizeof(struct aac_ciss_identify_pd);
1787        identify_reply = kmalloc(datasize, GFP_KERNEL);
1788        if (!identify_reply)
1789                goto out;
1790
1791        memset(&srbu, 0, sizeof(struct aac_srb_unit));
1792
1793        srbcmd = &srbu.srb;
1794        srbcmd->flags   = cpu_to_le32(SRB_DataIn);
1795        srbcmd->cdb[0]  = 0x26;
1796        srbcmd->cdb[2]  = (u8)((AAC_MAX_LUN + target) & 0x00FF);
1797        srbcmd->cdb[6]  = CISS_IDENTIFY_PHYSICAL_DEVICE;
1798
1799        rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
1800        if (unlikely(rcode < 0))
1801                goto mem_free_all;
1802
1803        *identify_resp = identify_reply;
1804
1805out:
1806        return rcode;
1807mem_free_all:
1808        kfree(identify_reply);
1809        goto out;
1810}
1811
1812static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
1813{
1814        kfree(dev->safw_phys_luns);
1815        dev->safw_phys_luns = NULL;
1816}
1817
1818/**
1819 *      aac_get_safw_ciss_luns()        Process topology change
1820 *      @dev:           aac_dev structure
1821 *
1822 *      Execute a CISS REPORT PHYS LUNS and process the results into
1823 *      the current hba_map.
1824 */
1825static int aac_get_safw_ciss_luns(struct aac_dev *dev)
1826{
1827        int rcode = -ENOMEM;
1828        int datasize;
1829        struct aac_srb *srbcmd;
1830        struct aac_srb_unit srbu;
1831        struct aac_ciss_phys_luns_resp *phys_luns;
1832
1833        datasize = sizeof(struct aac_ciss_phys_luns_resp) +
1834                (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
1835        phys_luns = kmalloc(datasize, GFP_KERNEL);
1836        if (phys_luns == NULL)
1837                goto out;
1838
1839        memset(&srbu, 0, sizeof(struct aac_srb_unit));
1840
1841        srbcmd = &srbu.srb;
1842        srbcmd->flags   = cpu_to_le32(SRB_DataIn);
1843        srbcmd->cdb[0]  = CISS_REPORT_PHYSICAL_LUNS;
1844        srbcmd->cdb[1]  = 2; /* extended reporting */
1845        srbcmd->cdb[8]  = (u8)(datasize >> 8);
1846        srbcmd->cdb[9]  = (u8)(datasize);
1847
1848        rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
1849        if (unlikely(rcode < 0))
1850                goto mem_free_all;
1851
1852        if (phys_luns->resp_flag != 2) {
1853                rcode = -ENOMSG;
1854                goto mem_free_all;
1855        }
1856
1857        dev->safw_phys_luns = phys_luns;
1858
1859out:
1860        return rcode;
1861mem_free_all:
1862        kfree(phys_luns);
1863        goto out;
1864}
1865
1866static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
1867{
1868        return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
1869}
1870
1871static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
1872{
1873        return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
1874}
1875
1876static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
1877{
1878        return dev->safw_phys_luns->lun[lun].level2[0];
1879}
1880
1881static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
1882{
1883        return dev->safw_phys_luns->lun[lun].bus >> 6;
1884}
1885
1886static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
1887{
1888        return dev->safw_phys_luns->lun[lun].node_ident[9];
1889}
1890
1891static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
1892{
1893        return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
1894}
1895
1896static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
1897{
1898        return dev->safw_phys_luns->lun[lun].node_ident[8];
1899}
1900
1901static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
1902                                                int bus, int target)
1903{
1904        kfree(dev->hba_map[bus][target].safw_identify_resp);
1905        dev->hba_map[bus][target].safw_identify_resp = NULL;
1906}
1907
1908static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
1909        int lun_count)
1910{
1911        int luns;
1912        int i;
1913        u32 bus;
1914        u32 target;
1915
1916        luns = aac_get_safw_phys_lun_count(dev);
1917
1918        if (luns < lun_count)
1919                lun_count = luns;
1920        else if (lun_count < 0)
1921                lun_count = luns;
1922
1923        for (i = 0; i < lun_count; i++) {
1924                bus = aac_get_safw_phys_bus(dev, i);
1925                target = aac_get_safw_phys_target(dev, i);
1926
1927                aac_free_safw_identify_resp(dev, bus, target);
1928        }
1929}
1930
1931static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
1932{
1933        int i;
1934        int rcode = 0;
1935        u32 lun_count;
1936        u32 bus;
1937        u32 target;
1938        struct aac_ciss_identify_pd *identify_resp = NULL;
1939
1940        lun_count = aac_get_safw_phys_lun_count(dev);
1941
1942        for (i = 0; i < lun_count; ++i) {
1943
1944                bus = aac_get_safw_phys_bus(dev, i);
1945                target = aac_get_safw_phys_target(dev, i);
1946
1947                rcode = aac_issue_safw_bmic_identify(dev,
1948                                                &identify_resp, bus, target);
1949
1950                if (unlikely(rcode < 0))
1951                        goto free_identify_resp;
1952
1953                dev->hba_map[bus][target].safw_identify_resp = identify_resp;
1954        }
1955
1956out:
1957        return rcode;
1958free_identify_resp:
1959        aac_free_safw_all_identify_resp(dev, i);
1960        goto out;
1961}
1962
1963/**
1964 *      aac_set_safw_attr_all_targets-  update current hba map with data from FW
1965 *      @dev:   aac_dev structure
1966 *      @phys_luns: FW information from report phys luns
1967 *      @rescan: Indicates scan type
1968 *
1969 *      Update our hba map with the information gathered from the FW
1970 */
1971static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1972{
1973        /* ok and extended reporting */
1974        u32 lun_count, nexus;
1975        u32 i, bus, target;
1976        u8 expose_flag, attribs;
1977        u8 devtype;
1978
1979        lun_count = aac_get_safw_phys_lun_count(dev);
1980
1981        dev->scan_counter++;
1982
1983        for (i = 0; i < lun_count; ++i) {
1984
1985                bus = aac_get_safw_phys_bus(dev, i);
1986                target = aac_get_safw_phys_target(dev, i);
1987                expose_flag = aac_get_safw_phys_expose_flag(dev, i);
1988                attribs = aac_get_safw_phys_attribs(dev, i);
1989                nexus = aac_get_safw_phys_nexus(dev, i);
1990
1991                if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
1992                        continue;
1993
1994                if (expose_flag != 0) {
1995                        devtype = AAC_DEVTYPE_RAID_MEMBER;
1996                        goto update_devtype;
1997                }
1998
1999                if (nexus != 0 && (attribs & 8)) {
2000                        devtype = AAC_DEVTYPE_NATIVE_RAW;
2001                        dev->hba_map[bus][target].rmw_nexus =
2002                                        nexus;
2003                } else
2004                        devtype = AAC_DEVTYPE_ARC_RAW;
2005
2006                dev->hba_map[bus][target].scan_counter = dev->scan_counter;
2007
2008                aac_set_safw_target_qd(dev, bus, target);
2009
2010update_devtype:
2011                dev->hba_map[bus][target].devtype = devtype;
2012        }
2013}
2014
2015static int aac_setup_safw_targets(struct aac_dev *dev)
2016{
2017        int rcode = 0;
2018
2019        rcode = aac_get_containers(dev);
2020        if (unlikely(rcode < 0))
2021                goto out;
2022
2023        rcode = aac_get_safw_ciss_luns(dev);
2024        if (unlikely(rcode < 0))
2025                goto out;
2026
2027        rcode = aac_get_safw_attr_all_targets(dev);
2028        if (unlikely(rcode < 0))
2029                goto free_ciss_luns;
2030
2031        aac_set_safw_attr_all_targets(dev);
2032
2033        aac_free_safw_all_identify_resp(dev, -1);
2034free_ciss_luns:
2035        aac_free_safw_ciss_luns(dev);
2036out:
2037        return rcode;
2038}
2039
2040int aac_setup_safw_adapter(struct aac_dev *dev)
2041{
2042        return aac_setup_safw_targets(dev);
2043}
2044
2045int aac_get_adapter_info(struct aac_dev* dev)
2046{
2047        struct fib* fibptr;
2048        int rcode;
2049        u32 tmp, bus, target;
2050        struct aac_adapter_info *info;
2051        struct aac_bus_info *command;
2052        struct aac_bus_info_response *bus_info;
2053
2054        if (!(fibptr = aac_fib_alloc(dev)))
2055                return -ENOMEM;
2056
2057        aac_fib_init(fibptr);
2058        info = (struct aac_adapter_info *) fib_data(fibptr);
2059        memset(info,0,sizeof(*info));
2060
2061        rcode = aac_fib_send(RequestAdapterInfo,
2062                         fibptr,
2063                         sizeof(*info),
2064                         FsaNormal,
2065                         -1, 1, /* First `interrupt' command uses special wait */
2066                         NULL,
2067                         NULL);
2068
2069        if (rcode < 0) {
2070                /* FIB should be freed only after
2071                 * getting the response from the F/W */
2072                if (rcode != -ERESTARTSYS) {
2073                        aac_fib_complete(fibptr);
2074                        aac_fib_free(fibptr);
2075                }
2076                return rcode;
2077        }
2078        memcpy(&dev->adapter_info, info, sizeof(*info));
2079
2080        dev->supplement_adapter_info.virt_device_bus = 0xffff;
2081        if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
2082                struct aac_supplement_adapter_info * sinfo;
2083
2084                aac_fib_init(fibptr);
2085
2086                sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
2087
2088                memset(sinfo,0,sizeof(*sinfo));
2089
2090                rcode = aac_fib_send(RequestSupplementAdapterInfo,
2091                                 fibptr,
2092                                 sizeof(*sinfo),
2093                                 FsaNormal,
2094                                 1, 1,
2095                                 NULL,
2096                                 NULL);
2097
2098                if (rcode >= 0)
2099                        memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
2100                if (rcode == -ERESTARTSYS) {
2101                        fibptr = aac_fib_alloc(dev);
2102                        if (!fibptr)
2103                                return -ENOMEM;
2104                }
2105
2106        }
2107
2108        /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
2109        for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
2110                for (target = 0; target < AAC_MAX_TARGETS; target++) {
2111                        dev->hba_map[bus][target].devtype = 0;
2112                        dev->hba_map[bus][target].qd_limit = 0;
2113                }
2114        }
2115
2116        /*
2117         * GetBusInfo
2118         */
2119
2120        aac_fib_init(fibptr);
2121
2122        bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
2123
2124        memset(bus_info, 0, sizeof(*bus_info));
2125
2126        command = (struct aac_bus_info *)bus_info;
2127
2128        command->Command = cpu_to_le32(VM_Ioctl);
2129        command->ObjType = cpu_to_le32(FT_DRIVE);
2130        command->MethodId = cpu_to_le32(1);
2131        command->CtlCmd = cpu_to_le32(GetBusInfo);
2132
2133        rcode = aac_fib_send(ContainerCommand,
2134                         fibptr,
2135                         sizeof (*bus_info),
2136                         FsaNormal,
2137                         1, 1,
2138                         NULL, NULL);
2139
2140        /* reasoned default */
2141        dev->maximum_num_physicals = 16;
2142        if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
2143                dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
2144                dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
2145        }
2146
2147        if (!dev->in_reset) {
2148                char buffer[16];
2149                tmp = le32_to_cpu(dev->adapter_info.kernelrev);
2150                printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
2151                        dev->name,
2152                        dev->id,
2153                        tmp>>24,
2154                        (tmp>>16)&0xff,
2155                        tmp&0xff,
2156                        le32_to_cpu(dev->adapter_info.kernelbuild),
2157                        (int)sizeof(dev->supplement_adapter_info.build_date),
2158                        dev->supplement_adapter_info.build_date);
2159                tmp = le32_to_cpu(dev->adapter_info.monitorrev);
2160                printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
2161                        dev->name, dev->id,
2162                        tmp>>24,(tmp>>16)&0xff,tmp&0xff,
2163                        le32_to_cpu(dev->adapter_info.monitorbuild));
2164                tmp = le32_to_cpu(dev->adapter_info.biosrev);
2165                printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
2166                        dev->name, dev->id,
2167                        tmp>>24,(tmp>>16)&0xff,tmp&0xff,
2168                        le32_to_cpu(dev->adapter_info.biosbuild));
2169                buffer[0] = '\0';
2170                if (aac_get_serial_number(
2171                  shost_to_class(dev->scsi_host_ptr), buffer))
2172                        printk(KERN_INFO "%s%d: serial %s",
2173                          dev->name, dev->id, buffer);
2174                if (dev->supplement_adapter_info.vpd_info.tsid[0]) {
2175                        printk(KERN_INFO "%s%d: TSID %.*s\n",
2176                          dev->name, dev->id,
2177                          (int)sizeof(dev->supplement_adapter_info
2178                                                        .vpd_info.tsid),
2179                                dev->supplement_adapter_info.vpd_info.tsid);
2180                }
2181                if (!aac_check_reset || ((aac_check_reset == 1) &&
2182                  (dev->supplement_adapter_info.supported_options2 &
2183                  AAC_OPTION_IGNORE_RESET))) {
2184                        printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
2185                          dev->name, dev->id);
2186                }
2187        }
2188
2189        dev->cache_protected = 0;
2190        dev->jbod = ((dev->supplement_adapter_info.feature_bits &
2191                AAC_FEATURE_JBOD) != 0);
2192        dev->nondasd_support = 0;
2193        dev->raid_scsi_mode = 0;
2194        if(dev->adapter_info.options & AAC_OPT_NONDASD)
2195                dev->nondasd_support = 1;
2196
2197        /*
2198         * If the firmware supports ROMB RAID/SCSI mode and we are currently
2199         * in RAID/SCSI mode, set the flag. For now if in this mode we will
2200         * force nondasd support on. If we decide to allow the non-dasd flag
2201         * additional changes changes will have to be made to support
2202         * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
2203         * changed to support the new dev->raid_scsi_mode flag instead of
2204         * leaching off of the dev->nondasd_support flag. Also in linit.c the
2205         * function aac_detect will have to be modified where it sets up the
2206         * max number of channels based on the aac->nondasd_support flag only.
2207         */
2208        if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
2209            (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
2210                dev->nondasd_support = 1;
2211                dev->raid_scsi_mode = 1;
2212        }
2213        if (dev->raid_scsi_mode != 0)
2214                printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
2215                                dev->name, dev->id);
2216
2217        if (nondasd != -1)
2218                dev->nondasd_support = (nondasd!=0);
2219        if (dev->nondasd_support && !dev->in_reset)
2220                printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
2221
2222        if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
2223                dev->needs_dac = 1;
2224        dev->dac_support = 0;
2225        if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
2226            (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
2227                if (!dev->in_reset)
2228                        printk(KERN_INFO "%s%d: 64bit support enabled.\n",
2229                                dev->name, dev->id);
2230                dev->dac_support = 1;
2231        }
2232
2233        if(dacmode != -1) {
2234                dev->dac_support = (dacmode!=0);
2235        }
2236
2237        /* avoid problems with AAC_QUIRK_SCSI_32 controllers */
2238        if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
2239                & AAC_QUIRK_SCSI_32)) {
2240                dev->nondasd_support = 0;
2241                dev->jbod = 0;
2242                expose_physicals = 0;
2243        }
2244
2245        if (dev->dac_support) {
2246                if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
2247                        if (!dev->in_reset)
2248                                dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
2249                } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
2250                        dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
2251                        dev->dac_support = 0;
2252                } else {
2253                        dev_info(&dev->pdev->dev, "No suitable DMA available\n");
2254                        rcode = -ENOMEM;
2255                }
2256        }
2257        /*
2258         * Deal with configuring for the individualized limits of each packet
2259         * interface.
2260         */
2261        dev->a_ops.adapter_scsi = (dev->dac_support)
2262          ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
2263                                ? aac_scsi_32_64
2264                                : aac_scsi_64)
2265                                : aac_scsi_32;
2266        if (dev->raw_io_interface) {
2267                dev->a_ops.adapter_bounds = (dev->raw_io_64)
2268                                        ? aac_bounds_64
2269                                        : aac_bounds_32;
2270                dev->a_ops.adapter_read = aac_read_raw_io;
2271                dev->a_ops.adapter_write = aac_write_raw_io;
2272        } else {
2273                dev->a_ops.adapter_bounds = aac_bounds_32;
2274                dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
2275                        sizeof(struct aac_fibhdr) -
2276                        sizeof(struct aac_write) + sizeof(struct sgentry)) /
2277                                sizeof(struct sgentry);
2278                if (dev->dac_support) {
2279                        dev->a_ops.adapter_read = aac_read_block64;
2280                        dev->a_ops.adapter_write = aac_write_block64;
2281                        /*
2282                         * 38 scatter gather elements
2283                         */
2284                        dev->scsi_host_ptr->sg_tablesize =
2285                                (dev->max_fib_size -
2286                                sizeof(struct aac_fibhdr) -
2287                                sizeof(struct aac_write64) +
2288                                sizeof(struct sgentry64)) /
2289                                        sizeof(struct sgentry64);
2290                } else {
2291                        dev->a_ops.adapter_read = aac_read_block;
2292                        dev->a_ops.adapter_write = aac_write_block;
2293                }
2294                dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
2295                if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
2296                        /*
2297                         * Worst case size that could cause sg overflow when
2298                         * we break up SG elements that are larger than 64KB.
2299                         * Would be nice if we could tell the SCSI layer what
2300                         * the maximum SG element size can be. Worst case is
2301                         * (sg_tablesize-1) 4KB elements with one 64KB
2302                         * element.
2303                         *      32bit -> 468 or 238KB   64bit -> 424 or 212KB
2304                         */
2305                        dev->scsi_host_ptr->max_sectors =
2306                          (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
2307                }
2308        }
2309        if (!dev->sync_mode && dev->sa_firmware &&
2310                dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
2311                dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
2312                        HBA_MAX_SG_SEPARATE;
2313
2314        /* FIB should be freed only after getting the response from the F/W */
2315        if (rcode != -ERESTARTSYS) {
2316                aac_fib_complete(fibptr);
2317                aac_fib_free(fibptr);
2318        }
2319
2320        return rcode;
2321}
2322
2323
2324static void io_callback(void *context, struct fib * fibptr)
2325{
2326        struct aac_dev *dev;
2327        struct aac_read_reply *readreply;
2328        struct scsi_cmnd *scsicmd;
2329        u32 cid;
2330
2331        scsicmd = (struct scsi_cmnd *) context;
2332
2333        if (!aac_valid_context(scsicmd, fibptr))
2334                return;
2335
2336        dev = fibptr->dev;
2337        cid = scmd_id(scsicmd);
2338
2339        if (nblank(dprintk(x))) {
2340                u64 lba;
2341                switch (scsicmd->cmnd[0]) {
2342                case WRITE_6:
2343                case READ_6:
2344                        lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2345                            (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2346                        break;
2347                case WRITE_16:
2348                case READ_16:
2349                        lba = ((u64)scsicmd->cmnd[2] << 56) |
2350                              ((u64)scsicmd->cmnd[3] << 48) |
2351                              ((u64)scsicmd->cmnd[4] << 40) |
2352                              ((u64)scsicmd->cmnd[5] << 32) |
2353                              ((u64)scsicmd->cmnd[6] << 24) |
2354                              (scsicmd->cmnd[7] << 16) |
2355                              (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2356                        break;
2357                case WRITE_12:
2358                case READ_12:
2359                        lba = ((u64)scsicmd->cmnd[2] << 24) |
2360                              (scsicmd->cmnd[3] << 16) |
2361                              (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2362                        break;
2363                default:
2364                        lba = ((u64)scsicmd->cmnd[2] << 24) |
2365                               (scsicmd->cmnd[3] << 16) |
2366                               (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2367                        break;
2368                }
2369                printk(KERN_DEBUG
2370                  "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
2371                  smp_processor_id(), (unsigned long long)lba, jiffies);
2372        }
2373
2374        BUG_ON(fibptr == NULL);
2375
2376        scsi_dma_unmap(scsicmd);
2377
2378        readreply = (struct aac_read_reply *)fib_data(fibptr);
2379        switch (le32_to_cpu(readreply->status)) {
2380        case ST_OK:
2381                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2382                        SAM_STAT_GOOD;
2383                dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
2384                break;
2385        case ST_NOT_READY:
2386                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2387                        SAM_STAT_CHECK_CONDITION;
2388                set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
2389                  SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
2390                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2391                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2392                             SCSI_SENSE_BUFFERSIZE));
2393                break;
2394        case ST_MEDERR:
2395                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2396                        SAM_STAT_CHECK_CONDITION;
2397                set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
2398                  SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
2399                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2400                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2401                             SCSI_SENSE_BUFFERSIZE));
2402                break;
2403        default:
2404#ifdef AAC_DETAILED_STATUS_INFO
2405                printk(KERN_WARNING "io_callback: io failed, status = %d\n",
2406                  le32_to_cpu(readreply->status));
2407#endif
2408                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2409                        SAM_STAT_CHECK_CONDITION;
2410                set_sense(&dev->fsa_dev[cid].sense_data,
2411                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2412                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2413                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2414                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2415                             SCSI_SENSE_BUFFERSIZE));
2416                break;
2417        }
2418        aac_fib_complete(fibptr);
2419
2420        scsicmd->scsi_done(scsicmd);
2421}
2422
2423static int aac_read(struct scsi_cmnd * scsicmd)
2424{
2425        u64 lba;
2426        u32 count;
2427        int status;
2428        struct aac_dev *dev;
2429        struct fib * cmd_fibcontext;
2430        int cid;
2431
2432        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2433        /*
2434         *      Get block address and transfer length
2435         */
2436        switch (scsicmd->cmnd[0]) {
2437        case READ_6:
2438                dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
2439
2440                lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2441                        (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2442                count = scsicmd->cmnd[4];
2443
2444                if (count == 0)
2445                        count = 256;
2446                break;
2447        case READ_16:
2448                dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
2449
2450                lba =   ((u64)scsicmd->cmnd[2] << 56) |
2451                        ((u64)scsicmd->cmnd[3] << 48) |
2452                        ((u64)scsicmd->cmnd[4] << 40) |
2453                        ((u64)scsicmd->cmnd[5] << 32) |
2454                        ((u64)scsicmd->cmnd[6] << 24) |
2455                        (scsicmd->cmnd[7] << 16) |
2456                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2457                count = (scsicmd->cmnd[10] << 24) |
2458                        (scsicmd->cmnd[11] << 16) |
2459                        (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2460                break;
2461        case READ_12:
2462                dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
2463
2464                lba = ((u64)scsicmd->cmnd[2] << 24) |
2465                        (scsicmd->cmnd[3] << 16) |
2466                        (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2467                count = (scsicmd->cmnd[6] << 24) |
2468                        (scsicmd->cmnd[7] << 16) |
2469                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2470                break;
2471        default:
2472                dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
2473
2474                lba = ((u64)scsicmd->cmnd[2] << 24) |
2475                        (scsicmd->cmnd[3] << 16) |
2476                        (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2477                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2478                break;
2479        }
2480
2481        if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2482                cid = scmd_id(scsicmd);
2483                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2484                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2485                        SAM_STAT_CHECK_CONDITION;
2486                set_sense(&dev->fsa_dev[cid].sense_data,
2487                          HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2488                          ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2489                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2490                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2491                             SCSI_SENSE_BUFFERSIZE));
2492                scsicmd->scsi_done(scsicmd);
2493                return 1;
2494        }
2495
2496        dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
2497          smp_processor_id(), (unsigned long long)lba, jiffies));
2498        if (aac_adapter_bounds(dev,scsicmd,lba))
2499                return 0;
2500        /*
2501         *      Alocate and initialize a Fib
2502         */
2503        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2504        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2505        status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
2506
2507        /*
2508         *      Check that the command queued to the controller
2509         */
2510        if (status == -EINPROGRESS)
2511                return 0;
2512
2513        printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
2514        /*
2515         *      For some reason, the Fib didn't queue, return QUEUE_FULL
2516         */
2517        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2518        scsicmd->scsi_done(scsicmd);
2519        aac_fib_complete(cmd_fibcontext);
2520        aac_fib_free(cmd_fibcontext);
2521        return 0;
2522}
2523
2524static int aac_write(struct scsi_cmnd * scsicmd)
2525{
2526        u64 lba;
2527        u32 count;
2528        int fua;
2529        int status;
2530        struct aac_dev *dev;
2531        struct fib * cmd_fibcontext;
2532        int cid;
2533
2534        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2535        /*
2536         *      Get block address and transfer length
2537         */
2538        if (scsicmd->cmnd[0] == WRITE_6)        /* 6 byte command */
2539        {
2540                lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2541                count = scsicmd->cmnd[4];
2542                if (count == 0)
2543                        count = 256;
2544                fua = 0;
2545        } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
2546                dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
2547
2548                lba =   ((u64)scsicmd->cmnd[2] << 56) |
2549                        ((u64)scsicmd->cmnd[3] << 48) |
2550                        ((u64)scsicmd->cmnd[4] << 40) |
2551                        ((u64)scsicmd->cmnd[5] << 32) |
2552                        ((u64)scsicmd->cmnd[6] << 24) |
2553                        (scsicmd->cmnd[7] << 16) |
2554                        (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2555                count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
2556                        (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2557                fua = scsicmd->cmnd[1] & 0x8;
2558        } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
2559                dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
2560
2561                lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
2562                    | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2563                count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
2564                      | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2565                fua = scsicmd->cmnd[1] & 0x8;
2566        } else {
2567                dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
2568                lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2569                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2570                fua = scsicmd->cmnd[1] & 0x8;
2571        }
2572
2573        if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2574                cid = scmd_id(scsicmd);
2575                dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2576                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2577                        SAM_STAT_CHECK_CONDITION;
2578                set_sense(&dev->fsa_dev[cid].sense_data,
2579                          HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2580                          ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2581                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2582                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2583                             SCSI_SENSE_BUFFERSIZE));
2584                scsicmd->scsi_done(scsicmd);
2585                return 1;
2586        }
2587
2588        dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
2589          smp_processor_id(), (unsigned long long)lba, jiffies));
2590        if (aac_adapter_bounds(dev,scsicmd,lba))
2591                return 0;
2592        /*
2593         *      Allocate and initialize a Fib then setup a BlockWrite command
2594         */
2595        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2596        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2597        status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
2598
2599        /*
2600         *      Check that the command queued to the controller
2601         */
2602        if (status == -EINPROGRESS)
2603                return 0;
2604
2605        printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
2606        /*
2607         *      For some reason, the Fib didn't queue, return QUEUE_FULL
2608         */
2609        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2610        scsicmd->scsi_done(scsicmd);
2611
2612        aac_fib_complete(cmd_fibcontext);
2613        aac_fib_free(cmd_fibcontext);
2614        return 0;
2615}
2616
2617static void synchronize_callback(void *context, struct fib *fibptr)
2618{
2619        struct aac_synchronize_reply *synchronizereply;
2620        struct scsi_cmnd *cmd;
2621
2622        cmd = context;
2623
2624        if (!aac_valid_context(cmd, fibptr))
2625                return;
2626
2627        dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
2628                                smp_processor_id(), jiffies));
2629        BUG_ON(fibptr == NULL);
2630
2631
2632        synchronizereply = fib_data(fibptr);
2633        if (le32_to_cpu(synchronizereply->status) == CT_OK)
2634                cmd->result = DID_OK << 16 |
2635                        COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2636        else {
2637                struct scsi_device *sdev = cmd->device;
2638                struct aac_dev *dev = fibptr->dev;
2639                u32 cid = sdev_id(sdev);
2640                printk(KERN_WARNING
2641                     "synchronize_callback: synchronize failed, status = %d\n",
2642                     le32_to_cpu(synchronizereply->status));
2643                cmd->result = DID_OK << 16 |
2644                        COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2645                set_sense(&dev->fsa_dev[cid].sense_data,
2646                  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2647                  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2648                memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2649                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2650                             SCSI_SENSE_BUFFERSIZE));
2651        }
2652
2653        aac_fib_complete(fibptr);
2654        aac_fib_free(fibptr);
2655        cmd->scsi_done(cmd);
2656}
2657
2658static int aac_synchronize(struct scsi_cmnd *scsicmd)
2659{
2660        int status;
2661        struct fib *cmd_fibcontext;
2662        struct aac_synchronize *synchronizecmd;
2663        struct scsi_cmnd *cmd;
2664        struct scsi_device *sdev = scsicmd->device;
2665        int active = 0;
2666        struct aac_dev *aac;
2667        u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
2668                (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2669        u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2670        unsigned long flags;
2671
2672        /*
2673         * Wait for all outstanding queued commands to complete to this
2674         * specific target (block).
2675         */
2676        spin_lock_irqsave(&sdev->list_lock, flags);
2677        list_for_each_entry(cmd, &sdev->cmd_list, list)
2678                if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
2679                        u64 cmnd_lba;
2680                        u32 cmnd_count;
2681
2682                        if (cmd->cmnd[0] == WRITE_6) {
2683                                cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
2684                                        (cmd->cmnd[2] << 8) |
2685                                        cmd->cmnd[3];
2686                                cmnd_count = cmd->cmnd[4];
2687                                if (cmnd_count == 0)
2688                                        cmnd_count = 256;
2689                        } else if (cmd->cmnd[0] == WRITE_16) {
2690                                cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
2691                                        ((u64)cmd->cmnd[3] << 48) |
2692                                        ((u64)cmd->cmnd[4] << 40) |
2693                                        ((u64)cmd->cmnd[5] << 32) |
2694                                        ((u64)cmd->cmnd[6] << 24) |
2695                                        (cmd->cmnd[7] << 16) |
2696                                        (cmd->cmnd[8] << 8) |
2697                                        cmd->cmnd[9];
2698                                cmnd_count = (cmd->cmnd[10] << 24) |
2699                                        (cmd->cmnd[11] << 16) |
2700                                        (cmd->cmnd[12] << 8) |
2701                                        cmd->cmnd[13];
2702                        } else if (cmd->cmnd[0] == WRITE_12) {
2703                                cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
2704                                        (cmd->cmnd[3] << 16) |
2705                                        (cmd->cmnd[4] << 8) |
2706                                        cmd->cmnd[5];
2707                                cmnd_count = (cmd->cmnd[6] << 24) |
2708                                        (cmd->cmnd[7] << 16) |
2709                                        (cmd->cmnd[8] << 8) |
2710                                        cmd->cmnd[9];
2711                        } else if (cmd->cmnd[0] == WRITE_10) {
2712                                cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
2713                                        (cmd->cmnd[3] << 16) |
2714                                        (cmd->cmnd[4] << 8) |
2715                                        cmd->cmnd[5];
2716                                cmnd_count = (cmd->cmnd[7] << 8) |
2717                                        cmd->cmnd[8];
2718                        } else
2719                                continue;
2720                        if (((cmnd_lba + cmnd_count) < lba) ||
2721                          (count && ((lba + count) < cmnd_lba)))
2722                                continue;
2723                        ++active;
2724                        break;
2725                }
2726
2727        spin_unlock_irqrestore(&sdev->list_lock, flags);
2728
2729        /*
2730         *      Yield the processor (requeue for later)
2731         */
2732        if (active)
2733                return SCSI_MLQUEUE_DEVICE_BUSY;
2734
2735        aac = (struct aac_dev *)sdev->host->hostdata;
2736        if (aac->in_reset)
2737                return SCSI_MLQUEUE_HOST_BUSY;
2738
2739        /*
2740         *      Allocate and initialize a Fib
2741         */
2742        if (!(cmd_fibcontext = aac_fib_alloc(aac)))
2743                return SCSI_MLQUEUE_HOST_BUSY;
2744
2745        aac_fib_init(cmd_fibcontext);
2746
2747        synchronizecmd = fib_data(cmd_fibcontext);
2748        synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
2749        synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
2750        synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
2751        synchronizecmd->count =
2752             cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
2753        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2754
2755        /*
2756         *      Now send the Fib to the adapter
2757         */
2758        status = aac_fib_send(ContainerCommand,
2759                  cmd_fibcontext,
2760                  sizeof(struct aac_synchronize),
2761                  FsaNormal,
2762                  0, 1,
2763                  (fib_callback)synchronize_callback,
2764                  (void *)scsicmd);
2765
2766        /*
2767         *      Check that the command queued to the controller
2768         */
2769        if (status == -EINPROGRESS)
2770                return 0;
2771
2772        printk(KERN_WARNING
2773                "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
2774        aac_fib_complete(cmd_fibcontext);
2775        aac_fib_free(cmd_fibcontext);
2776        return SCSI_MLQUEUE_HOST_BUSY;
2777}
2778
2779static void aac_start_stop_callback(void *context, struct fib *fibptr)
2780{
2781        struct scsi_cmnd *scsicmd = context;
2782
2783        if (!aac_valid_context(scsicmd, fibptr))
2784                return;
2785
2786        BUG_ON(fibptr == NULL);
2787
2788        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2789
2790        aac_fib_complete(fibptr);
2791        aac_fib_free(fibptr);
2792        scsicmd->scsi_done(scsicmd);
2793}
2794
2795static int aac_start_stop(struct scsi_cmnd *scsicmd)
2796{
2797        int status;
2798        struct fib *cmd_fibcontext;
2799        struct aac_power_management *pmcmd;
2800        struct scsi_device *sdev = scsicmd->device;
2801        struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
2802
2803        if (!(aac->supplement_adapter_info.supported_options2 &
2804              AAC_OPTION_POWER_MANAGEMENT)) {
2805                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2806                                  SAM_STAT_GOOD;
2807                scsicmd->scsi_done(scsicmd);
2808                return 0;
2809        }
2810
2811        if (aac->in_reset)
2812                return SCSI_MLQUEUE_HOST_BUSY;
2813
2814        /*
2815         *      Allocate and initialize a Fib
2816         */
2817        cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
2818
2819        aac_fib_init(cmd_fibcontext);
2820
2821        pmcmd = fib_data(cmd_fibcontext);
2822        pmcmd->command = cpu_to_le32(VM_ContainerConfig);
2823        pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
2824        /* Eject bit ignored, not relevant */
2825        pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
2826                cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
2827        pmcmd->cid = cpu_to_le32(sdev_id(sdev));
2828        pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
2829                cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
2830        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2831
2832        /*
2833         *      Now send the Fib to the adapter
2834         */
2835        status = aac_fib_send(ContainerCommand,
2836                  cmd_fibcontext,
2837                  sizeof(struct aac_power_management),
2838                  FsaNormal,
2839                  0, 1,
2840                  (fib_callback)aac_start_stop_callback,
2841                  (void *)scsicmd);
2842
2843        /*
2844         *      Check that the command queued to the controller
2845         */
2846        if (status == -EINPROGRESS)
2847                return 0;
2848
2849        aac_fib_complete(cmd_fibcontext);
2850        aac_fib_free(cmd_fibcontext);
2851        return SCSI_MLQUEUE_HOST_BUSY;
2852}
2853
2854/**
2855 *      aac_scsi_cmd()          -       Process SCSI command
2856 *      @scsicmd:               SCSI command block
2857 *
2858 *      Emulate a SCSI command and queue the required request for the
2859 *      aacraid firmware.
2860 */
2861
2862int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2863{
2864        u32 cid, bus;
2865        struct Scsi_Host *host = scsicmd->device->host;
2866        struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2867        struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
2868
2869        if (fsa_dev_ptr == NULL)
2870                return -1;
2871        /*
2872         *      If the bus, id or lun is out of range, return fail
2873         *      Test does not apply to ID 16, the pseudo id for the controller
2874         *      itself.
2875         */
2876        cid = scmd_id(scsicmd);
2877        if (cid != host->this_id) {
2878                if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
2879                        if((cid >= dev->maximum_num_containers) ||
2880                                        (scsicmd->device->lun != 0)) {
2881                                scsicmd->result = DID_NO_CONNECT << 16;
2882                                goto scsi_done_ret;
2883                        }
2884
2885                        /*
2886                         *      If the target container doesn't exist, it may have
2887                         *      been newly created
2888                         */
2889                        if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
2890                          (fsa_dev_ptr[cid].sense_data.sense_key ==
2891                           NOT_READY)) {
2892                                switch (scsicmd->cmnd[0]) {
2893                                case SERVICE_ACTION_IN_16:
2894                                        if (!(dev->raw_io_interface) ||
2895                                            !(dev->raw_io_64) ||
2896                                            ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
2897                                                break;
2898                                case INQUIRY:
2899                                case READ_CAPACITY:
2900                                case TEST_UNIT_READY:
2901                                        if (dev->in_reset)
2902                                                return -1;
2903                                        return _aac_probe_container(scsicmd,
2904                                                        aac_probe_container_callback2);
2905                                default:
2906                                        break;
2907                                }
2908                        }
2909                } else {  /* check for physical non-dasd devices */
2910                        bus = aac_logical_to_phys(scmd_channel(scsicmd));
2911
2912                        if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2913                                dev->hba_map[bus][cid].devtype
2914                                        == AAC_DEVTYPE_NATIVE_RAW) {
2915                                if (dev->in_reset)
2916                                        return -1;
2917                                return aac_send_hba_fib(scsicmd);
2918                        } else if (dev->nondasd_support || expose_physicals ||
2919                                dev->jbod) {
2920                                if (dev->in_reset)
2921                                        return -1;
2922                                return aac_send_srb_fib(scsicmd);
2923                        } else {
2924                                scsicmd->result = DID_NO_CONNECT << 16;
2925                                goto scsi_done_ret;
2926                        }
2927                }
2928        }
2929        /*
2930         * else Command for the controller itself
2931         */
2932        else if ((scsicmd->cmnd[0] != INQUIRY) &&       /* only INQUIRY & TUR cmnd supported for controller */
2933                (scsicmd->cmnd[0] != TEST_UNIT_READY))
2934        {
2935                dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
2936                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2937                set_sense(&dev->fsa_dev[cid].sense_data,
2938                  ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2939                  ASENCODE_INVALID_COMMAND, 0, 0);
2940                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2941                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2942                             SCSI_SENSE_BUFFERSIZE));
2943                goto scsi_done_ret;
2944        }
2945
2946        switch (scsicmd->cmnd[0]) {
2947        case READ_6:
2948        case READ_10:
2949        case READ_12:
2950        case READ_16:
2951                if (dev->in_reset)
2952                        return -1;
2953                return aac_read(scsicmd);
2954
2955        case WRITE_6:
2956        case WRITE_10:
2957        case WRITE_12:
2958        case WRITE_16:
2959                if (dev->in_reset)
2960                        return -1;
2961                return aac_write(scsicmd);
2962
2963        case SYNCHRONIZE_CACHE:
2964                if (((aac_cache & 6) == 6) && dev->cache_protected) {
2965                        scsicmd->result = AAC_STAT_GOOD;
2966                        break;
2967                }
2968                /* Issue FIB to tell Firmware to flush it's cache */
2969                if ((aac_cache & 6) != 2)
2970                        return aac_synchronize(scsicmd);
2971        case INQUIRY:
2972        {
2973                struct inquiry_data inq_data;
2974
2975                dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
2976                memset(&inq_data, 0, sizeof (struct inquiry_data));
2977
2978                if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
2979                        char *arr = (char *)&inq_data;
2980
2981                        /* EVPD bit set */
2982                        arr[0] = (scmd_id(scsicmd) == host->this_id) ?
2983                          INQD_PDT_PROC : INQD_PDT_DA;
2984                        if (scsicmd->cmnd[2] == 0) {
2985                                /* supported vital product data pages */
2986                                arr[3] = 3;
2987                                arr[4] = 0x0;
2988                                arr[5] = 0x80;
2989                                arr[6] = 0x83;
2990                                arr[1] = scsicmd->cmnd[2];
2991                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2992                                                         sizeof(inq_data));
2993                                scsicmd->result = AAC_STAT_GOOD;
2994                        } else if (scsicmd->cmnd[2] == 0x80) {
2995                                /* unit serial number page */
2996                                arr[3] = setinqserial(dev, &arr[4],
2997                                  scmd_id(scsicmd));
2998                                arr[1] = scsicmd->cmnd[2];
2999                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
3000                                                         sizeof(inq_data));
3001                                if (aac_wwn != 2)
3002                                        return aac_get_container_serial(
3003                                                scsicmd);
3004                                scsicmd->result = AAC_STAT_GOOD;
3005                        } else if (scsicmd->cmnd[2] == 0x83) {
3006                                /* vpd page 0x83 - Device Identification Page */
3007                                char *sno = (char *)&inq_data;
3008                                sno[3] = setinqserial(dev, &sno[4],
3009                                                      scmd_id(scsicmd));
3010                                if (aac_wwn != 2)
3011                                        return aac_get_container_serial(
3012                                                scsicmd);
3013                                scsicmd->result = AAC_STAT_GOOD;
3014                        } else {
3015                                /* vpd page not implemented */
3016                                scsicmd->result = DID_OK << 16 |
3017                                  COMMAND_COMPLETE << 8 |
3018                                  SAM_STAT_CHECK_CONDITION;
3019                                set_sense(&dev->fsa_dev[cid].sense_data,
3020                                  ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
3021                                  ASENCODE_NO_SENSE, 7, 2);
3022                                memcpy(scsicmd->sense_buffer,
3023                                  &dev->fsa_dev[cid].sense_data,
3024                                  min_t(size_t,
3025                                        sizeof(dev->fsa_dev[cid].sense_data),
3026                                        SCSI_SENSE_BUFFERSIZE));
3027                        }
3028                        break;
3029                }
3030                inq_data.inqd_ver = 2;  /* claim compliance to SCSI-2 */
3031                inq_data.inqd_rdf = 2;  /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
3032                inq_data.inqd_len = 31;
3033                /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
3034                inq_data.inqd_pad2= 0x32 ;       /*WBus16|Sync|CmdQue */
3035                /*
3036                 *      Set the Vendor, Product, and Revision Level
3037                 *      see: <vendor>.c i.e. aac.c
3038                 */
3039                if (cid == host->this_id) {
3040                        setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
3041                        inq_data.inqd_pdt = INQD_PDT_PROC;      /* Processor device */
3042                        scsi_sg_copy_from_buffer(scsicmd, &inq_data,
3043                                                 sizeof(inq_data));
3044                        scsicmd->result = AAC_STAT_GOOD;
3045                        break;
3046                }
3047                if (dev->in_reset)
3048                        return -1;
3049                setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
3050                inq_data.inqd_pdt = INQD_PDT_DA;        /* Direct/random access device */
3051                scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
3052                return aac_get_container_name(scsicmd);
3053        }
3054        case SERVICE_ACTION_IN_16:
3055                if (!(dev->raw_io_interface) ||
3056                    !(dev->raw_io_64) ||
3057                    ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
3058                        break;
3059        {
3060                u64 capacity;
3061                char cp[13];
3062                unsigned int alloc_len;
3063
3064                dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
3065                capacity = fsa_dev_ptr[cid].size - 1;
3066                cp[0] = (capacity >> 56) & 0xff;
3067                cp[1] = (capacity >> 48) & 0xff;
3068                cp[2] = (capacity >> 40) & 0xff;
3069                cp[3] = (capacity >> 32) & 0xff;
3070                cp[4] = (capacity >> 24) & 0xff;
3071                cp[5] = (capacity >> 16) & 0xff;
3072                cp[6] = (capacity >> 8) & 0xff;
3073                cp[7] = (capacity >> 0) & 0xff;
3074                cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
3075                cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3076                cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3077                cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
3078                cp[12] = 0;
3079
3080                alloc_len = ((scsicmd->cmnd[10] << 24)
3081                             + (scsicmd->cmnd[11] << 16)
3082                             + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
3083
3084                alloc_len = min_t(size_t, alloc_len, sizeof(cp));
3085                scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
3086                if (alloc_len < scsi_bufflen(scsicmd))
3087                        scsi_set_resid(scsicmd,
3088                                       scsi_bufflen(scsicmd) - alloc_len);
3089
3090                /* Do not cache partition table for arrays */
3091                scsicmd->device->removable = 1;
3092
3093                scsicmd->result = AAC_STAT_GOOD;
3094                break;
3095        }
3096
3097        case READ_CAPACITY:
3098        {
3099                u32 capacity;
3100                char cp[8];
3101
3102                dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
3103                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3104                        capacity = fsa_dev_ptr[cid].size - 1;
3105                else
3106                        capacity = (u32)-1;
3107
3108                cp[0] = (capacity >> 24) & 0xff;
3109                cp[1] = (capacity >> 16) & 0xff;
3110                cp[2] = (capacity >> 8) & 0xff;
3111                cp[3] = (capacity >> 0) & 0xff;
3112                cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
3113                cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3114                cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3115                cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
3116                scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
3117                /* Do not cache partition table for arrays */
3118                scsicmd->device->removable = 1;
3119                scsicmd->result = AAC_STAT_GOOD;
3120                break;
3121        }
3122
3123        case MODE_SENSE:
3124        {
3125                int mode_buf_length = 4;
3126                u32 capacity;
3127                aac_modep_data mpd;
3128
3129                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3130                        capacity = fsa_dev_ptr[cid].size - 1;
3131                else
3132                        capacity = (u32)-1;
3133
3134                dprintk((KERN_DEBUG "MODE SENSE command.\n"));
3135                memset((char *)&mpd, 0, sizeof(aac_modep_data));
3136
3137                /* Mode data length */
3138                mpd.hd.data_length = sizeof(mpd.hd) - 1;
3139                /* Medium type - default */
3140                mpd.hd.med_type = 0;
3141                /* Device-specific param,
3142                   bit 8: 0/1 = write enabled/protected
3143                   bit 4: 0/1 = FUA enabled */
3144                mpd.hd.dev_par = 0;
3145
3146                if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3147                        mpd.hd.dev_par = 0x10;
3148                if (scsicmd->cmnd[1] & 0x8)
3149                        mpd.hd.bd_length = 0;   /* Block descriptor length */
3150                else {
3151                        mpd.hd.bd_length = sizeof(mpd.bd);
3152                        mpd.hd.data_length += mpd.hd.bd_length;
3153                        mpd.bd.block_length[0] =
3154                                (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3155                        mpd.bd.block_length[1] =
3156                                (fsa_dev_ptr[cid].block_size >> 8) &  0xff;
3157                        mpd.bd.block_length[2] =
3158                                fsa_dev_ptr[cid].block_size  & 0xff;
3159
3160                        mpd.mpc_buf[0] = scsicmd->cmnd[2];
3161                        if (scsicmd->cmnd[2] == 0x1C) {
3162                                /* page length */
3163                                mpd.mpc_buf[1] = 0xa;
3164                                /* Mode data length */
3165                                mpd.hd.data_length = 23;
3166                        } else {
3167                                /* Mode data length */
3168                                mpd.hd.data_length = 15;
3169                        }
3170
3171                        if (capacity > 0xffffff) {
3172                                mpd.bd.block_count[0] = 0xff;
3173                                mpd.bd.block_count[1] = 0xff;
3174                                mpd.bd.block_count[2] = 0xff;
3175                        } else {
3176                                mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
3177                                mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
3178                                mpd.bd.block_count[2] = capacity  & 0xff;
3179                        }
3180                }
3181                if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3182                  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3183                        mpd.hd.data_length += 3;
3184                        mpd.mpc_buf[0] = 8;
3185                        mpd.mpc_buf[1] = 1;
3186                        mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
3187                                ? 0 : 0x04; /* WCE */
3188                        mode_buf_length = sizeof(mpd);
3189                }
3190
3191                if (mode_buf_length > scsicmd->cmnd[4])
3192                        mode_buf_length = scsicmd->cmnd[4];
3193                else
3194                        mode_buf_length = sizeof(mpd);
3195                scsi_sg_copy_from_buffer(scsicmd,
3196                                         (char *)&mpd,
3197                                         mode_buf_length);
3198                scsicmd->result = AAC_STAT_GOOD;
3199                break;
3200        }
3201        case MODE_SENSE_10:
3202        {
3203                u32 capacity;
3204                int mode_buf_length = 8;
3205                aac_modep10_data mpd10;
3206
3207                if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3208                        capacity = fsa_dev_ptr[cid].size - 1;
3209                else
3210                        capacity = (u32)-1;
3211
3212                dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
3213                memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
3214                /* Mode data length (MSB) */
3215                mpd10.hd.data_length[0] = 0;
3216                /* Mode data length (LSB) */
3217                mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
3218                /* Medium type - default */
3219                mpd10.hd.med_type = 0;
3220                /* Device-specific param,
3221                   bit 8: 0/1 = write enabled/protected
3222                   bit 4: 0/1 = FUA enabled */
3223                mpd10.hd.dev_par = 0;
3224
3225                if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3226                        mpd10.hd.dev_par = 0x10;
3227                mpd10.hd.rsrvd[0] = 0;  /* reserved */
3228                mpd10.hd.rsrvd[1] = 0;  /* reserved */
3229                if (scsicmd->cmnd[1] & 0x8) {
3230                        /* Block descriptor length (MSB) */
3231                        mpd10.hd.bd_length[0] = 0;
3232                        /* Block descriptor length (LSB) */
3233                        mpd10.hd.bd_length[1] = 0;
3234                } else {
3235                        mpd10.hd.bd_length[0] = 0;
3236                        mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
3237
3238                        mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
3239
3240                        mpd10.bd.block_length[0] =
3241                                (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3242                        mpd10.bd.block_length[1] =
3243                                (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3244                        mpd10.bd.block_length[2] =
3245                                fsa_dev_ptr[cid].block_size  & 0xff;
3246
3247                        if (capacity > 0xffffff) {
3248                                mpd10.bd.block_count[0] = 0xff;
3249                                mpd10.bd.block_count[1] = 0xff;
3250                                mpd10.bd.block_count[2] = 0xff;
3251                        } else {
3252                                mpd10.bd.block_count[0] =
3253                                        (capacity >> 16) & 0xff;
3254                                mpd10.bd.block_count[1] =
3255                                        (capacity >> 8) & 0xff;
3256                                mpd10.bd.block_count[2] =
3257                                        capacity  & 0xff;
3258                        }
3259                }
3260                if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3261                  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3262                        mpd10.hd.data_length[1] += 3;
3263                        mpd10.mpc_buf[0] = 8;
3264                        mpd10.mpc_buf[1] = 1;
3265                        mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
3266                                ? 0 : 0x04; /* WCE */
3267                        mode_buf_length = sizeof(mpd10);
3268                        if (mode_buf_length > scsicmd->cmnd[8])
3269                                mode_buf_length = scsicmd->cmnd[8];
3270                }
3271                scsi_sg_copy_from_buffer(scsicmd,
3272                                         (char *)&mpd10,
3273                                         mode_buf_length);
3274
3275                scsicmd->result = AAC_STAT_GOOD;
3276                break;
3277        }
3278        case REQUEST_SENSE:
3279                dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
3280                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3281                                sizeof(struct sense_data));
3282                memset(&dev->fsa_dev[cid].sense_data, 0,
3283                                sizeof(struct sense_data));
3284                scsicmd->result = AAC_STAT_GOOD;
3285                break;
3286
3287        case ALLOW_MEDIUM_REMOVAL:
3288                dprintk((KERN_DEBUG "LOCK command.\n"));
3289                if (scsicmd->cmnd[4])
3290                        fsa_dev_ptr[cid].locked = 1;
3291                else
3292                        fsa_dev_ptr[cid].locked = 0;
3293
3294                scsicmd->result = AAC_STAT_GOOD;
3295                break;
3296        /*
3297         *      These commands are all No-Ops
3298         */
3299        case TEST_UNIT_READY:
3300                if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
3301                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3302                                SAM_STAT_CHECK_CONDITION;
3303                        set_sense(&dev->fsa_dev[cid].sense_data,
3304                                  NOT_READY, SENCODE_BECOMING_READY,
3305                                  ASENCODE_BECOMING_READY, 0, 0);
3306                        memcpy(scsicmd->sense_buffer,
3307                               &dev->fsa_dev[cid].sense_data,
3308                               min_t(size_t,
3309                                     sizeof(dev->fsa_dev[cid].sense_data),
3310                                     SCSI_SENSE_BUFFERSIZE));
3311                break;
3312                }
3313        case RESERVE:
3314        case RELEASE:
3315        case REZERO_UNIT:
3316        case REASSIGN_BLOCKS:
3317        case SEEK_10:
3318                scsicmd->result = AAC_STAT_GOOD;
3319                break;
3320
3321        case START_STOP:
3322                return aac_start_stop(scsicmd);
3323
3324        /* FALLTHRU */
3325        default:
3326        /*
3327         *      Unhandled commands
3328         */
3329                dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
3330                                scsicmd->cmnd[0]));
3331                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3332                                SAM_STAT_CHECK_CONDITION;
3333                set_sense(&dev->fsa_dev[cid].sense_data,
3334                          ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
3335                          ASENCODE_INVALID_COMMAND, 0, 0);
3336                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3337                                min_t(size_t,
3338                                      sizeof(dev->fsa_dev[cid].sense_data),
3339                                      SCSI_SENSE_BUFFERSIZE));
3340        }
3341
3342scsi_done_ret:
3343
3344        scsicmd->scsi_done(scsicmd);
3345        return 0;
3346}
3347
3348static int query_disk(struct aac_dev *dev, void __user *arg)
3349{
3350        struct aac_query_disk qd;
3351        struct fsa_dev_info *fsa_dev_ptr;
3352
3353        fsa_dev_ptr = dev->fsa_dev;
3354        if (!fsa_dev_ptr)
3355                return -EBUSY;
3356        if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
3357                return -EFAULT;
3358        if (qd.cnum == -1) {
3359                if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
3360                        return -EINVAL;
3361                qd.cnum = qd.id;
3362        } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
3363                if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
3364                        return -EINVAL;
3365                qd.instance = dev->scsi_host_ptr->host_no;
3366                qd.bus = 0;
3367                qd.id = CONTAINER_TO_ID(qd.cnum);
3368                qd.lun = CONTAINER_TO_LUN(qd.cnum);
3369        }
3370        else return -EINVAL;
3371
3372        qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
3373        qd.locked = fsa_dev_ptr[qd.cnum].locked;
3374        qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
3375
3376        if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
3377                qd.unmapped = 1;
3378        else
3379                qd.unmapped = 0;
3380
3381        strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
3382          min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
3383
3384        if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
3385                return -EFAULT;
3386        return 0;
3387}
3388
3389static int force_delete_disk(struct aac_dev *dev, void __user *arg)
3390{
3391        struct aac_delete_disk dd;
3392        struct fsa_dev_info *fsa_dev_ptr;
3393
3394        fsa_dev_ptr = dev->fsa_dev;
3395        if (!fsa_dev_ptr)
3396                return -EBUSY;
3397
3398        if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3399                return -EFAULT;
3400
3401        if (dd.cnum >= dev->maximum_num_containers)
3402                return -EINVAL;
3403        /*
3404         *      Mark this container as being deleted.
3405         */
3406        fsa_dev_ptr[dd.cnum].deleted = 1;
3407        /*
3408         *      Mark the container as no longer valid
3409         */
3410        fsa_dev_ptr[dd.cnum].valid = 0;
3411        return 0;
3412}
3413
3414static int delete_disk(struct aac_dev *dev, void __user *arg)
3415{
3416        struct aac_delete_disk dd;
3417        struct fsa_dev_info *fsa_dev_ptr;
3418
3419        fsa_dev_ptr = dev->fsa_dev;
3420        if (!fsa_dev_ptr)
3421                return -EBUSY;
3422
3423        if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3424                return -EFAULT;
3425
3426        if (dd.cnum >= dev->maximum_num_containers)
3427                return -EINVAL;
3428        /*
3429         *      If the container is locked, it can not be deleted by the API.
3430         */
3431        if (fsa_dev_ptr[dd.cnum].locked)
3432                return -EBUSY;
3433        else {
3434                /*
3435                 *      Mark the container as no longer being valid.
3436                 */
3437                fsa_dev_ptr[dd.cnum].valid = 0;
3438                fsa_dev_ptr[dd.cnum].devname[0] = '\0';
3439                return 0;
3440        }
3441}
3442
3443int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
3444{
3445        switch (cmd) {
3446        case FSACTL_QUERY_DISK:
3447                return query_disk(dev, arg);
3448        case FSACTL_DELETE_DISK:
3449                return delete_disk(dev, arg);
3450        case FSACTL_FORCE_DELETE_DISK:
3451                return force_delete_disk(dev, arg);
3452        case FSACTL_GET_CONTAINERS:
3453                return aac_get_containers(dev);
3454        default:
3455                return -ENOTTY;
3456        }
3457}
3458
3459/**
3460 *
3461 * aac_srb_callback
3462 * @context: the context set in the fib - here it is scsi cmd
3463 * @fibptr: pointer to the fib
3464 *
3465 * Handles the completion of a scsi command to a non dasd device
3466 *
3467 */
3468
3469static void aac_srb_callback(void *context, struct fib * fibptr)
3470{
3471        struct aac_dev *dev;
3472        struct aac_srb_reply *srbreply;
3473        struct scsi_cmnd *scsicmd;
3474
3475        scsicmd = (struct scsi_cmnd *) context;
3476
3477        if (!aac_valid_context(scsicmd, fibptr))
3478                return;
3479
3480        BUG_ON(fibptr == NULL);
3481
3482        dev = fibptr->dev;
3483
3484        srbreply = (struct aac_srb_reply *) fib_data(fibptr);
3485
3486        scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
3487
3488        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3489                /* fast response */
3490                srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
3491                srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
3492        } else {
3493                /*
3494                 *      Calculate resid for sg
3495                 */
3496                scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
3497                                   - le32_to_cpu(srbreply->data_xfer_length));
3498        }
3499
3500
3501        scsi_dma_unmap(scsicmd);
3502
3503        /* expose physical device if expose_physicald flag is on */
3504        if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
3505          && expose_physicals > 0)
3506                aac_expose_phy_device(scsicmd);
3507
3508        /*
3509         * First check the fib status
3510         */
3511
3512        if (le32_to_cpu(srbreply->status) != ST_OK) {
3513                int len;
3514
3515                pr_warn("aac_srb_callback: srb failed, status = %d\n",
3516                                le32_to_cpu(srbreply->status));
3517                len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3518                            SCSI_SENSE_BUFFERSIZE);
3519                scsicmd->result = DID_ERROR << 16
3520                                | COMMAND_COMPLETE << 8
3521                                | SAM_STAT_CHECK_CONDITION;
3522                memcpy(scsicmd->sense_buffer,
3523                                srbreply->sense_data, len);
3524        }
3525
3526        /*
3527         * Next check the srb status
3528         */
3529        switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
3530        case SRB_STATUS_ERROR_RECOVERY:
3531        case SRB_STATUS_PENDING:
3532        case SRB_STATUS_SUCCESS:
3533                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3534                break;
3535        case SRB_STATUS_DATA_OVERRUN:
3536                switch (scsicmd->cmnd[0]) {
3537                case  READ_6:
3538                case  WRITE_6:
3539                case  READ_10:
3540                case  WRITE_10:
3541                case  READ_12:
3542                case  WRITE_12:
3543                case  READ_16:
3544                case  WRITE_16:
3545                        if (le32_to_cpu(srbreply->data_xfer_length)
3546                                                < scsicmd->underflow)
3547                                pr_warn("aacraid: SCSI CMD underflow\n");
3548                        else
3549                                pr_warn("aacraid: SCSI CMD Data Overrun\n");
3550                        scsicmd->result = DID_ERROR << 16
3551                                        | COMMAND_COMPLETE << 8;
3552                        break;
3553                case INQUIRY:
3554                        scsicmd->result = DID_OK << 16
3555                                        | COMMAND_COMPLETE << 8;
3556                        break;
3557                default:
3558                        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3559                        break;
3560                }
3561                break;
3562        case SRB_STATUS_ABORTED:
3563                scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3564                break;
3565        case SRB_STATUS_ABORT_FAILED:
3566                /*
3567                 * Not sure about this one - but assuming the
3568                 * hba was trying to abort for some reason
3569                 */
3570                scsicmd->result = DID_ERROR << 16 | ABORT << 8;
3571                break;
3572        case SRB_STATUS_PARITY_ERROR:
3573                scsicmd->result = DID_PARITY << 16
3574                                | MSG_PARITY_ERROR << 8;
3575                break;
3576        case SRB_STATUS_NO_DEVICE:
3577        case SRB_STATUS_INVALID_PATH_ID:
3578        case SRB_STATUS_INVALID_TARGET_ID:
3579        case SRB_STATUS_INVALID_LUN:
3580        case SRB_STATUS_SELECTION_TIMEOUT:
3581                scsicmd->result = DID_NO_CONNECT << 16
3582                                | COMMAND_COMPLETE << 8;
3583                break;
3584
3585        case SRB_STATUS_COMMAND_TIMEOUT:
3586        case SRB_STATUS_TIMEOUT:
3587                scsicmd->result = DID_TIME_OUT << 16
3588                                | COMMAND_COMPLETE << 8;
3589                break;
3590
3591        case SRB_STATUS_BUSY:
3592                scsicmd->result = DID_BUS_BUSY << 16
3593                                | COMMAND_COMPLETE << 8;
3594                break;
3595
3596        case SRB_STATUS_BUS_RESET:
3597                scsicmd->result = DID_RESET << 16
3598                                | COMMAND_COMPLETE << 8;
3599                break;
3600
3601        case SRB_STATUS_MESSAGE_REJECTED:
3602                scsicmd->result = DID_ERROR << 16
3603                                | MESSAGE_REJECT << 8;
3604                break;
3605        case SRB_STATUS_REQUEST_FLUSHED:
3606        case SRB_STATUS_ERROR:
3607        case SRB_STATUS_INVALID_REQUEST:
3608        case SRB_STATUS_REQUEST_SENSE_FAILED:
3609        case SRB_STATUS_NO_HBA:
3610        case SRB_STATUS_UNEXPECTED_BUS_FREE:
3611        case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
3612        case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
3613        case SRB_STATUS_DELAYED_RETRY:
3614        case SRB_STATUS_BAD_FUNCTION:
3615        case SRB_STATUS_NOT_STARTED:
3616        case SRB_STATUS_NOT_IN_USE:
3617        case SRB_STATUS_FORCE_ABORT:
3618        case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
3619        default:
3620#ifdef AAC_DETAILED_STATUS_INFO
3621                pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
3622                        le32_to_cpu(srbreply->srb_status) & 0x3F,
3623                        aac_get_status_string(
3624                                le32_to_cpu(srbreply->srb_status) & 0x3F),
3625                        scsicmd->cmnd[0],
3626                        le32_to_cpu(srbreply->scsi_status));
3627#endif
3628                /*
3629                 * When the CC bit is SET by the host in ATA pass thru CDB,
3630                 *  driver is supposed to return DID_OK
3631                 *
3632                 * When the CC bit is RESET by the host, driver should
3633                 *  return DID_ERROR
3634                 */
3635                if ((scsicmd->cmnd[0] == ATA_12)
3636                        || (scsicmd->cmnd[0] == ATA_16)) {
3637
3638                        if (scsicmd->cmnd[2] & (0x01 << 5)) {
3639                                scsicmd->result = DID_OK << 16
3640                                        | COMMAND_COMPLETE << 8;
3641                        break;
3642                        } else {
3643                                scsicmd->result = DID_ERROR << 16
3644                                        | COMMAND_COMPLETE << 8;
3645                        break;
3646                        }
3647                } else {
3648                        scsicmd->result = DID_ERROR << 16
3649                                | COMMAND_COMPLETE << 8;
3650                        break;
3651                }
3652        }
3653        if (le32_to_cpu(srbreply->scsi_status)
3654                        == SAM_STAT_CHECK_CONDITION) {
3655                int len;
3656
3657                scsicmd->result |= SAM_STAT_CHECK_CONDITION;
3658                len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3659                            SCSI_SENSE_BUFFERSIZE);
3660#ifdef AAC_DETAILED_STATUS_INFO
3661                pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
3662                                        le32_to_cpu(srbreply->status), len);
3663#endif
3664                memcpy(scsicmd->sense_buffer,
3665                                srbreply->sense_data, len);
3666        }
3667
3668        /*
3669         * OR in the scsi status (already shifted up a bit)
3670         */
3671        scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
3672
3673        aac_fib_complete(fibptr);
3674        scsicmd->scsi_done(scsicmd);
3675}
3676
3677static void hba_resp_task_complete(struct aac_dev *dev,
3678                                        struct scsi_cmnd *scsicmd,
3679                                        struct aac_hba_resp *err) {
3680
3681        scsicmd->result = err->status;
3682        /* set residual count */
3683        scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
3684
3685        switch (err->status) {
3686        case SAM_STAT_GOOD:
3687                scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3688                break;
3689        case SAM_STAT_CHECK_CONDITION:
3690        {
3691                int len;
3692
3693                len = min_t(u8, err->sense_response_data_len,
3694                        SCSI_SENSE_BUFFERSIZE);
3695                if (len)
3696                        memcpy(scsicmd->sense_buffer,
3697                                err->sense_response_buf, len);
3698                scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3699                break;
3700        }
3701        case SAM_STAT_BUSY:
3702                scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
3703                break;
3704        case SAM_STAT_TASK_ABORTED:
3705                scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
3706                break;
3707        case SAM_STAT_RESERVATION_CONFLICT:
3708        case SAM_STAT_TASK_SET_FULL:
3709        default:
3710                scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3711                break;
3712        }
3713}
3714
3715static void hba_resp_task_failure(struct aac_dev *dev,
3716                                        struct scsi_cmnd *scsicmd,
3717                                        struct aac_hba_resp *err)
3718{
3719        switch (err->status) {
3720        case HBA_RESP_STAT_HBAMODE_DISABLED:
3721        {
3722                u32 bus, cid;
3723
3724                bus = aac_logical_to_phys(scmd_channel(scsicmd));
3725                cid = scmd_id(scsicmd);
3726                if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
3727                        dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
3728                        dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
3729                }
3730                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3731                break;
3732        }
3733        case HBA_RESP_STAT_IO_ERROR:
3734        case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
3735                scsicmd->result = DID_OK << 16 |
3736                        COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
3737                break;
3738        case HBA_RESP_STAT_IO_ABORTED:
3739                scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3740                break;
3741        case HBA_RESP_STAT_INVALID_DEVICE:
3742                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3743                break;
3744        case HBA_RESP_STAT_UNDERRUN:
3745                /* UNDERRUN is OK */
3746                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3747                break;
3748        case HBA_RESP_STAT_OVERRUN:
3749        default:
3750                scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3751                break;
3752        }
3753}
3754
3755/**
3756 *
3757 * aac_hba_callback
3758 * @context: the context set in the fib - here it is scsi cmd
3759 * @fibptr: pointer to the fib
3760 *
3761 * Handles the completion of a native HBA scsi command
3762 *
3763 */
3764void aac_hba_callback(void *context, struct fib *fibptr)
3765{
3766        struct aac_dev *dev;
3767        struct scsi_cmnd *scsicmd;
3768
3769        struct aac_hba_resp *err =
3770                        &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
3771
3772        scsicmd = (struct scsi_cmnd *) context;
3773
3774        if (!aac_valid_context(scsicmd, fibptr))
3775                return;
3776
3777        WARN_ON(fibptr == NULL);
3778        dev = fibptr->dev;
3779
3780        if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
3781                scsi_dma_unmap(scsicmd);
3782
3783        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3784                /* fast response */
3785                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3786                goto out;
3787        }
3788
3789        switch (err->service_response) {
3790        case HBA_RESP_SVCRES_TASK_COMPLETE:
3791                hba_resp_task_complete(dev, scsicmd, err);
3792                break;
3793        case HBA_RESP_SVCRES_FAILURE:
3794                hba_resp_task_failure(dev, scsicmd, err);
3795                break;
3796        case HBA_RESP_SVCRES_TMF_REJECTED:
3797                scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
3798                break;
3799        case HBA_RESP_SVCRES_TMF_LUN_INVALID:
3800                scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3801                break;
3802        case HBA_RESP_SVCRES_TMF_COMPLETE:
3803        case HBA_RESP_SVCRES_TMF_SUCCEEDED:
3804                scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3805                break;
3806        default:
3807                scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3808                break;
3809        }
3810
3811out:
3812        aac_fib_complete(fibptr);
3813
3814        if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
3815                scsicmd->SCp.sent_command = 1;
3816        else
3817                scsicmd->scsi_done(scsicmd);
3818}
3819
3820/**
3821 *
3822 * aac_send_srb_fib
3823 * @scsicmd: the scsi command block
3824 *
3825 * This routine will form a FIB and fill in the aac_srb from the
3826 * scsicmd passed in.
3827 */
3828
3829static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
3830{
3831        struct fib* cmd_fibcontext;
3832        struct aac_dev* dev;
3833        int status;
3834
3835        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3836        if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3837                        scsicmd->device->lun > 7) {
3838                scsicmd->result = DID_NO_CONNECT << 16;
3839                scsicmd->scsi_done(scsicmd);
3840                return 0;
3841        }
3842
3843        /*
3844         *      Allocate and initialize a Fib then setup a BlockWrite command
3845         */
3846        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3847        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3848        status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
3849
3850        /*
3851         *      Check that the command queued to the controller
3852         */
3853        if (status == -EINPROGRESS)
3854                return 0;
3855
3856        printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
3857        aac_fib_complete(cmd_fibcontext);
3858        aac_fib_free(cmd_fibcontext);
3859
3860        return -1;
3861}
3862
3863/**
3864 *
3865 * aac_send_hba_fib
3866 * @scsicmd: the scsi command block
3867 *
3868 * This routine will form a FIB and fill in the aac_hba_cmd_req from the
3869 * scsicmd passed in.
3870 */
3871static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
3872{
3873        struct fib *cmd_fibcontext;
3874        struct aac_dev *dev;
3875        int status;
3876
3877        dev = shost_priv(scsicmd->device->host);
3878        if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3879                        scsicmd->device->lun > AAC_MAX_LUN - 1) {
3880                scsicmd->result = DID_NO_CONNECT << 16;
3881                scsicmd->scsi_done(scsicmd);
3882                return 0;
3883        }
3884
3885        /*
3886         *      Allocate and initialize a Fib then setup a BlockWrite command
3887         */
3888        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3889        if (!cmd_fibcontext)
3890                return -1;
3891
3892        scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3893        status = aac_adapter_hba(cmd_fibcontext, scsicmd);
3894
3895        /*
3896         *      Check that the command queued to the controller
3897         */
3898        if (status == -EINPROGRESS)
3899                return 0;
3900
3901        pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
3902                status);
3903        aac_fib_complete(cmd_fibcontext);
3904        aac_fib_free(cmd_fibcontext);
3905
3906        return -1;
3907}
3908
3909
3910static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
3911{
3912        struct aac_dev *dev;
3913        unsigned long byte_count = 0;
3914        int nseg;
3915        struct scatterlist *sg;
3916        int i;
3917
3918        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3919        // Get rid of old data
3920        psg->count = 0;
3921        psg->sg[0].addr = 0;
3922        psg->sg[0].count = 0;
3923
3924        nseg = scsi_dma_map(scsicmd);
3925        if (nseg <= 0)
3926                return nseg;
3927
3928        psg->count = cpu_to_le32(nseg);
3929
3930        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3931                psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
3932                psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
3933                byte_count += sg_dma_len(sg);
3934        }
3935        /* hba wants the size to be exact */
3936        if (byte_count > scsi_bufflen(scsicmd)) {
3937                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3938                        (byte_count - scsi_bufflen(scsicmd));
3939                psg->sg[i-1].count = cpu_to_le32(temp);
3940                byte_count = scsi_bufflen(scsicmd);
3941        }
3942        /* Check for command underflow */
3943        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3944                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3945                       byte_count, scsicmd->underflow);
3946        }
3947
3948        return byte_count;
3949}
3950
3951
3952static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
3953{
3954        struct aac_dev *dev;
3955        unsigned long byte_count = 0;
3956        u64 addr;
3957        int nseg;
3958        struct scatterlist *sg;
3959        int i;
3960
3961        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3962        // Get rid of old data
3963        psg->count = 0;
3964        psg->sg[0].addr[0] = 0;
3965        psg->sg[0].addr[1] = 0;
3966        psg->sg[0].count = 0;
3967
3968        nseg = scsi_dma_map(scsicmd);
3969        if (nseg <= 0)
3970                return nseg;
3971
3972        scsi_for_each_sg(scsicmd, sg, nseg, i) {
3973                int count = sg_dma_len(sg);
3974                addr = sg_dma_address(sg);
3975                psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
3976                psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
3977                psg->sg[i].count = cpu_to_le32(count);
3978                byte_count += count;
3979        }
3980        psg->count = cpu_to_le32(nseg);
3981        /* hba wants the size to be exact */
3982        if (byte_count > scsi_bufflen(scsicmd)) {
3983                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3984                        (byte_count - scsi_bufflen(scsicmd));
3985                psg->sg[i-1].count = cpu_to_le32(temp);
3986                byte_count = scsi_bufflen(scsicmd);
3987        }
3988        /* Check for command underflow */
3989        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3990                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3991                       byte_count, scsicmd->underflow);
3992        }
3993
3994        return byte_count;
3995}
3996
3997static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
3998{
3999        unsigned long byte_count = 0;
4000        int nseg;
4001        struct scatterlist *sg;
4002        int i;
4003
4004        // Get rid of old data
4005        psg->count = 0;
4006        psg->sg[0].next = 0;
4007        psg->sg[0].prev = 0;
4008        psg->sg[0].addr[0] = 0;
4009        psg->sg[0].addr[1] = 0;
4010        psg->sg[0].count = 0;
4011        psg->sg[0].flags = 0;
4012
4013        nseg = scsi_dma_map(scsicmd);
4014        if (nseg <= 0)
4015                return nseg;
4016
4017        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4018                int count = sg_dma_len(sg);
4019                u64 addr = sg_dma_address(sg);
4020                psg->sg[i].next = 0;
4021                psg->sg[i].prev = 0;
4022                psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
4023                psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
4024                psg->sg[i].count = cpu_to_le32(count);
4025                psg->sg[i].flags = 0;
4026                byte_count += count;
4027        }
4028        psg->count = cpu_to_le32(nseg);
4029        /* hba wants the size to be exact */
4030        if (byte_count > scsi_bufflen(scsicmd)) {
4031                u32 temp = le32_to_cpu(psg->sg[i-1].count) -
4032                        (byte_count - scsi_bufflen(scsicmd));
4033                psg->sg[i-1].count = cpu_to_le32(temp);
4034                byte_count = scsi_bufflen(scsicmd);
4035        }
4036        /* Check for command underflow */
4037        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4038                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
4039                       byte_count, scsicmd->underflow);
4040        }
4041
4042        return byte_count;
4043}
4044
4045static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
4046                                struct aac_raw_io2 *rio2, int sg_max)
4047{
4048        unsigned long byte_count = 0;
4049        int nseg;
4050        struct scatterlist *sg;
4051        int i, conformable = 0;
4052        u32 min_size = PAGE_SIZE, cur_size;
4053
4054        nseg = scsi_dma_map(scsicmd);
4055        if (nseg <= 0)
4056                return nseg;
4057
4058        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4059                int count = sg_dma_len(sg);
4060                u64 addr = sg_dma_address(sg);
4061
4062                BUG_ON(i >= sg_max);
4063                rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
4064                rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
4065                cur_size = cpu_to_le32(count);
4066                rio2->sge[i].length = cur_size;
4067                rio2->sge[i].flags = 0;
4068                if (i == 0) {
4069                        conformable = 1;
4070                        rio2->sgeFirstSize = cur_size;
4071                } else if (i == 1) {
4072                        rio2->sgeNominalSize = cur_size;
4073                        min_size = cur_size;
4074                } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
4075                        conformable = 0;
4076                        if (cur_size < min_size)
4077                                min_size = cur_size;
4078                }
4079                byte_count += count;
4080        }
4081
4082        /* hba wants the size to be exact */
4083        if (byte_count > scsi_bufflen(scsicmd)) {
4084                u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
4085                        (byte_count - scsi_bufflen(scsicmd));
4086                rio2->sge[i-1].length = cpu_to_le32(temp);
4087                byte_count = scsi_bufflen(scsicmd);
4088        }
4089
4090        rio2->sgeCnt = cpu_to_le32(nseg);
4091        rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
4092        /* not conformable: evaluate required sg elements */
4093        if (!conformable) {
4094                int j, nseg_new = nseg, err_found;
4095                for (i = min_size / PAGE_SIZE; i >= 1; --i) {
4096                        err_found = 0;
4097                        nseg_new = 2;
4098                        for (j = 1; j < nseg - 1; ++j) {
4099                                if (rio2->sge[j].length % (i*PAGE_SIZE)) {
4100                                        err_found = 1;
4101                                        break;
4102                                }
4103                                nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
4104                        }
4105                        if (!err_found)
4106                                break;
4107                }
4108                if (i > 0 && nseg_new <= sg_max) {
4109                        int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new);
4110
4111                        if (ret < 0)
4112                                return ret;
4113                }
4114        } else
4115                rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4116
4117        /* Check for command underflow */
4118        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4119                printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
4120                       byte_count, scsicmd->underflow);
4121        }
4122
4123        return byte_count;
4124}
4125
4126static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
4127{
4128        struct sge_ieee1212 *sge;
4129        int i, j, pos;
4130        u32 addr_low;
4131
4132        if (aac_convert_sgl == 0)
4133                return 0;
4134
4135        sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
4136        if (sge == NULL)
4137                return -ENOMEM;
4138
4139        for (i = 1, pos = 1; i < nseg-1; ++i) {
4140                for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
4141                        addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
4142                        sge[pos].addrLow = addr_low;
4143                        sge[pos].addrHigh = rio2->sge[i].addrHigh;
4144                        if (addr_low < rio2->sge[i].addrLow)
4145                                sge[pos].addrHigh++;
4146                        sge[pos].length = pages * PAGE_SIZE;
4147                        sge[pos].flags = 0;
4148                        pos++;
4149                }
4150        }
4151        sge[pos] = rio2->sge[nseg-1];
4152        memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
4153
4154        kfree(sge);
4155        rio2->sgeCnt = cpu_to_le32(nseg_new);
4156        rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4157        rio2->sgeNominalSize = pages * PAGE_SIZE;
4158        return 0;
4159}
4160
4161static long aac_build_sghba(struct scsi_cmnd *scsicmd,
4162                        struct aac_hba_cmd_req *hbacmd,
4163                        int sg_max,
4164                        u64 sg_address)
4165{
4166        unsigned long byte_count = 0;
4167        int nseg;
4168        struct scatterlist *sg;
4169        int i;
4170        u32 cur_size;
4171        struct aac_hba_sgl *sge;
4172
4173        nseg = scsi_dma_map(scsicmd);
4174        if (nseg <= 0) {
4175                byte_count = nseg;
4176                goto out;
4177        }
4178
4179        if (nseg > HBA_MAX_SG_EMBEDDED)
4180                sge = &hbacmd->sge[2];
4181        else
4182                sge = &hbacmd->sge[0];
4183
4184        scsi_for_each_sg(scsicmd, sg, nseg, i) {
4185                int count = sg_dma_len(sg);
4186                u64 addr = sg_dma_address(sg);
4187
4188                WARN_ON(i >= sg_max);
4189                sge->addr_hi = cpu_to_le32((u32)(addr>>32));
4190                sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
4191                cur_size = cpu_to_le32(count);
4192                sge->len = cur_size;
4193                sge->flags = 0;
4194                byte_count += count;
4195                sge++;
4196        }
4197
4198        sge--;
4199        /* hba wants the size to be exact */
4200        if (byte_count > scsi_bufflen(scsicmd)) {
4201                u32 temp;
4202
4203                temp = le32_to_cpu(sge->len) - byte_count
4204                                                - scsi_bufflen(scsicmd);
4205                sge->len = cpu_to_le32(temp);
4206                byte_count = scsi_bufflen(scsicmd);
4207        }
4208
4209        if (nseg <= HBA_MAX_SG_EMBEDDED) {
4210                hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
4211                sge->flags = cpu_to_le32(0x40000000);
4212        } else {
4213                /* not embedded */
4214                hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
4215                hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
4216                hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
4217                hbacmd->sge[0].addr_lo =
4218                        cpu_to_le32((u32)(sg_address & 0xffffffff));
4219        }
4220
4221        /* Check for command underflow */
4222        if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4223                pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
4224                                byte_count, scsicmd->underflow);
4225        }
4226out:
4227        return byte_count;
4228}
4229
4230#ifdef AAC_DETAILED_STATUS_INFO
4231
4232struct aac_srb_status_info {
4233        u32     status;
4234        char    *str;
4235};
4236
4237
4238static struct aac_srb_status_info srb_status_info[] = {
4239        { SRB_STATUS_PENDING,           "Pending Status"},
4240        { SRB_STATUS_SUCCESS,           "Success"},
4241        { SRB_STATUS_ABORTED,           "Aborted Command"},
4242        { SRB_STATUS_ABORT_FAILED,      "Abort Failed"},
4243        { SRB_STATUS_ERROR,             "Error Event"},
4244        { SRB_STATUS_BUSY,              "Device Busy"},
4245        { SRB_STATUS_INVALID_REQUEST,   "Invalid Request"},
4246        { SRB_STATUS_INVALID_PATH_ID,   "Invalid Path ID"},
4247        { SRB_STATUS_NO_DEVICE,         "No Device"},
4248        { SRB_STATUS_TIMEOUT,           "Timeout"},
4249        { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
4250        { SRB_STATUS_COMMAND_TIMEOUT,   "Command Timeout"},
4251        { SRB_STATUS_MESSAGE_REJECTED,  "Message Rejected"},
4252        { SRB_STATUS_BUS_RESET,         "Bus Reset"},
4253        { SRB_STATUS_PARITY_ERROR,      "Parity Error"},
4254        { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
4255        { SRB_STATUS_NO_HBA,            "No HBA"},
4256        { SRB_STATUS_DATA_OVERRUN,      "Data Overrun/Data Underrun"},
4257        { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
4258        { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
4259        { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
4260        { SRB_STATUS_REQUEST_FLUSHED,   "Request Flushed"},
4261        { SRB_STATUS_DELAYED_RETRY,     "Delayed Retry"},
4262        { SRB_STATUS_INVALID_LUN,       "Invalid LUN"},
4263        { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
4264        { SRB_STATUS_BAD_FUNCTION,      "Bad Function"},
4265        { SRB_STATUS_ERROR_RECOVERY,    "Error Recovery"},
4266        { SRB_STATUS_NOT_STARTED,       "Not Started"},
4267        { SRB_STATUS_NOT_IN_USE,        "Not In Use"},
4268        { SRB_STATUS_FORCE_ABORT,       "Force Abort"},
4269        { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
4270        { 0xff,                         "Unknown Error"}
4271};
4272
4273char *aac_get_status_string(u32 status)
4274{
4275        int i;
4276
4277        for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
4278                if (srb_status_info[i].status == status)
4279                        return srb_status_info[i].str;
4280
4281        return "Bad Status Code";
4282}
4283
4284#endif
4285