linux/drivers/scsi/aacraid/commsup.c
<<
>>
Prefs
   1/*
   2 *      Adaptec AAC series RAID controller driver
   3 *      (c) Copyright 2001 Red Hat Inc.
   4 *
   5 * based on the old aacraid driver that is..
   6 * Adaptec aacraid device driver for Linux.
   7 *
   8 * Copyright (c) 2000-2010 Adaptec, Inc.
   9 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10 *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; see the file COPYING.  If not, write to
  24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25 *
  26 * Module Name:
  27 *  commsup.c
  28 *
  29 * Abstract: Contain all routines that are required for FSA host/adapter
  30 *    communication.
  31 *
  32 */
  33
  34#include <linux/kernel.h>
  35#include <linux/init.h>
  36#include <linux/crash_dump.h>
  37#include <linux/types.h>
  38#include <linux/sched.h>
  39#include <linux/pci.h>
  40#include <linux/spinlock.h>
  41#include <linux/slab.h>
  42#include <linux/completion.h>
  43#include <linux/blkdev.h>
  44#include <linux/delay.h>
  45#include <linux/kthread.h>
  46#include <linux/interrupt.h>
  47#include <linux/bcd.h>
  48#include <scsi/scsi.h>
  49#include <scsi/scsi_host.h>
  50#include <scsi/scsi_device.h>
  51#include <scsi/scsi_cmnd.h>
  52
  53#include "aacraid.h"
  54
  55/**
  56 *      fib_map_alloc           -       allocate the fib objects
  57 *      @dev: Adapter to allocate for
  58 *
  59 *      Allocate and map the shared PCI space for the FIB blocks used to
  60 *      talk to the Adaptec firmware.
  61 */
  62
  63static int fib_map_alloc(struct aac_dev *dev)
  64{
  65        if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
  66                dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
  67        else
  68                dev->max_cmd_size = dev->max_fib_size;
  69        if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
  70                dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
  71        } else {
  72                dev->max_cmd_size = dev->max_fib_size;
  73        }
  74
  75        dprintk((KERN_INFO
  76          "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
  77          &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
  78          AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  79        dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
  80                (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
  81                * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
  82                &dev->hw_fib_pa, GFP_KERNEL);
  83        if (dev->hw_fib_va == NULL)
  84                return -ENOMEM;
  85        return 0;
  86}
  87
  88/**
  89 *      aac_fib_map_free                -       free the fib objects
  90 *      @dev: Adapter to free
  91 *
  92 *      Free the PCI mappings and the memory allocated for FIB blocks
  93 *      on this adapter.
  94 */
  95
  96void aac_fib_map_free(struct aac_dev *dev)
  97{
  98        size_t alloc_size;
  99        size_t fib_size;
 100        int num_fibs;
 101
 102        if(!dev->hw_fib_va || !dev->max_cmd_size)
 103                return;
 104
 105        num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
 106        fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
 107        alloc_size = fib_size * num_fibs + ALIGN32 - 1;
 108
 109        dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
 110                          dev->hw_fib_pa);
 111
 112        dev->hw_fib_va = NULL;
 113        dev->hw_fib_pa = 0;
 114}
 115
 116void aac_fib_vector_assign(struct aac_dev *dev)
 117{
 118        u32 i = 0;
 119        u32 vector = 1;
 120        struct fib *fibptr = NULL;
 121
 122        for (i = 0, fibptr = &dev->fibs[i];
 123                i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
 124                i++, fibptr++) {
 125                if ((dev->max_msix == 1) ||
 126                  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
 127                        - dev->vector_cap))) {
 128                        fibptr->vector_no = 0;
 129                } else {
 130                        fibptr->vector_no = vector;
 131                        vector++;
 132                        if (vector == dev->max_msix)
 133                                vector = 1;
 134                }
 135        }
 136}
 137
 138/**
 139 *      aac_fib_setup   -       setup the fibs
 140 *      @dev: Adapter to set up
 141 *
 142 *      Allocate the PCI space for the fibs, map it and then initialise the
 143 *      fib area, the unmapped fib data and also the free list
 144 */
 145
 146int aac_fib_setup(struct aac_dev * dev)
 147{
 148        struct fib *fibptr;
 149        struct hw_fib *hw_fib;
 150        dma_addr_t hw_fib_pa;
 151        int i;
 152        u32 max_cmds;
 153
 154        while (((i = fib_map_alloc(dev)) == -ENOMEM)
 155         && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
 156                max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
 157                dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
 158                if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
 159                        dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
 160        }
 161        if (i<0)
 162                return -ENOMEM;
 163
 164        memset(dev->hw_fib_va, 0,
 165                (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
 166                (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
 167
 168        /* 32 byte alignment for PMC */
 169        hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
 170        hw_fib    = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
 171                                        (hw_fib_pa - dev->hw_fib_pa));
 172
 173        /* add Xport header */
 174        hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
 175                sizeof(struct aac_fib_xporthdr));
 176        hw_fib_pa += sizeof(struct aac_fib_xporthdr);
 177
 178        /*
 179         *      Initialise the fibs
 180         */
 181        for (i = 0, fibptr = &dev->fibs[i];
 182                i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
 183                i++, fibptr++)
 184        {
 185                fibptr->flags = 0;
 186                fibptr->size = sizeof(struct fib);
 187                fibptr->dev = dev;
 188                fibptr->hw_fib_va = hw_fib;
 189                fibptr->data = (void *) fibptr->hw_fib_va->data;
 190                fibptr->next = fibptr+1;        /* Forward chain the fibs */
 191                init_completion(&fibptr->event_wait);
 192                spin_lock_init(&fibptr->event_lock);
 193                hw_fib->header.XferState = cpu_to_le32(0xffffffff);
 194                hw_fib->header.SenderSize =
 195                        cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
 196                fibptr->hw_fib_pa = hw_fib_pa;
 197                fibptr->hw_sgl_pa = hw_fib_pa +
 198                        offsetof(struct aac_hba_cmd_req, sge[2]);
 199                /*
 200                 * one element is for the ptr to the separate sg list,
 201                 * second element for 32 byte alignment
 202                 */
 203                fibptr->hw_error_pa = hw_fib_pa +
 204                        offsetof(struct aac_native_hba, resp.resp_bytes[0]);
 205
 206                hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
 207                        dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
 208                hw_fib_pa = hw_fib_pa +
 209                        dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
 210        }
 211
 212        /*
 213         *Assign vector numbers to fibs
 214         */
 215        aac_fib_vector_assign(dev);
 216
 217        /*
 218         *      Add the fib chain to the free list
 219         */
 220        dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
 221        /*
 222        *       Set 8 fibs aside for management tools
 223        */
 224        dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
 225        return 0;
 226}
 227
 228/**
 229 *      aac_fib_alloc_tag-allocate a fib using tags
 230 *      @dev: Adapter to allocate the fib for
 231 *
 232 *      Allocate a fib from the adapter fib pool using tags
 233 *      from the blk layer.
 234 */
 235
 236struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
 237{
 238        struct fib *fibptr;
 239
 240        fibptr = &dev->fibs[scmd->request->tag];
 241        /*
 242         *      Null out fields that depend on being zero at the start of
 243         *      each I/O
 244         */
 245        fibptr->hw_fib_va->header.XferState = 0;
 246        fibptr->type = FSAFS_NTC_FIB_CONTEXT;
 247        fibptr->callback_data = NULL;
 248        fibptr->callback = NULL;
 249
 250        return fibptr;
 251}
 252
 253/**
 254 *      aac_fib_alloc   -       allocate a fib
 255 *      @dev: Adapter to allocate the fib for
 256 *
 257 *      Allocate a fib from the adapter fib pool. If the pool is empty we
 258 *      return NULL.
 259 */
 260
 261struct fib *aac_fib_alloc(struct aac_dev *dev)
 262{
 263        struct fib * fibptr;
 264        unsigned long flags;
 265        spin_lock_irqsave(&dev->fib_lock, flags);
 266        fibptr = dev->free_fib;
 267        if(!fibptr){
 268                spin_unlock_irqrestore(&dev->fib_lock, flags);
 269                return fibptr;
 270        }
 271        dev->free_fib = fibptr->next;
 272        spin_unlock_irqrestore(&dev->fib_lock, flags);
 273        /*
 274         *      Set the proper node type code and node byte size
 275         */
 276        fibptr->type = FSAFS_NTC_FIB_CONTEXT;
 277        fibptr->size = sizeof(struct fib);
 278        /*
 279         *      Null out fields that depend on being zero at the start of
 280         *      each I/O
 281         */
 282        fibptr->hw_fib_va->header.XferState = 0;
 283        fibptr->flags = 0;
 284        fibptr->callback = NULL;
 285        fibptr->callback_data = NULL;
 286
 287        return fibptr;
 288}
 289
 290/**
 291 *      aac_fib_free    -       free a fib
 292 *      @fibptr: fib to free up
 293 *
 294 *      Frees up a fib and places it on the appropriate queue
 295 */
 296
 297void aac_fib_free(struct fib *fibptr)
 298{
 299        unsigned long flags;
 300
 301        if (fibptr->done == 2)
 302                return;
 303
 304        spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
 305        if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 306                aac_config.fib_timeouts++;
 307        if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
 308                fibptr->hw_fib_va->header.XferState != 0) {
 309                printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
 310                         (void*)fibptr,
 311                         le32_to_cpu(fibptr->hw_fib_va->header.XferState));
 312        }
 313        fibptr->next = fibptr->dev->free_fib;
 314        fibptr->dev->free_fib = fibptr;
 315        spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
 316}
 317
 318/**
 319 *      aac_fib_init    -       initialise a fib
 320 *      @fibptr: The fib to initialize
 321 *
 322 *      Set up the generic fib fields ready for use
 323 */
 324
 325void aac_fib_init(struct fib *fibptr)
 326{
 327        struct hw_fib *hw_fib = fibptr->hw_fib_va;
 328
 329        memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
 330        hw_fib->header.StructType = FIB_MAGIC;
 331        hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
 332        hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
 333        hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
 334        hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
 335}
 336
 337/**
 338 *      fib_deallocate          -       deallocate a fib
 339 *      @fibptr: fib to deallocate
 340 *
 341 *      Will deallocate and return to the free pool the FIB pointed to by the
 342 *      caller.
 343 */
 344
 345static void fib_dealloc(struct fib * fibptr)
 346{
 347        struct hw_fib *hw_fib = fibptr->hw_fib_va;
 348        hw_fib->header.XferState = 0;
 349}
 350
 351/*
 352 *      Commuication primitives define and support the queuing method we use to
 353 *      support host to adapter commuication. All queue accesses happen through
 354 *      these routines and are the only routines which have a knowledge of the
 355 *       how these queues are implemented.
 356 */
 357
 358/**
 359 *      aac_get_entry           -       get a queue entry
 360 *      @dev: Adapter
 361 *      @qid: Queue Number
 362 *      @entry: Entry return
 363 *      @index: Index return
 364 *      @nonotify: notification control
 365 *
 366 *      With a priority the routine returns a queue entry if the queue has free entries. If the queue
 367 *      is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
 368 *      returned.
 369 */
 370
 371static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
 372{
 373        struct aac_queue * q;
 374        unsigned long idx;
 375
 376        /*
 377         *      All of the queues wrap when they reach the end, so we check
 378         *      to see if they have reached the end and if they have we just
 379         *      set the index back to zero. This is a wrap. You could or off
 380         *      the high bits in all updates but this is a bit faster I think.
 381         */
 382
 383        q = &dev->queues->queue[qid];
 384
 385        idx = *index = le32_to_cpu(*(q->headers.producer));
 386        /* Interrupt Moderation, only interrupt for first two entries */
 387        if (idx != le32_to_cpu(*(q->headers.consumer))) {
 388                if (--idx == 0) {
 389                        if (qid == AdapNormCmdQueue)
 390                                idx = ADAP_NORM_CMD_ENTRIES;
 391                        else
 392                                idx = ADAP_NORM_RESP_ENTRIES;
 393                }
 394                if (idx != le32_to_cpu(*(q->headers.consumer)))
 395                        *nonotify = 1;
 396        }
 397
 398        if (qid == AdapNormCmdQueue) {
 399                if (*index >= ADAP_NORM_CMD_ENTRIES)
 400                        *index = 0; /* Wrap to front of the Producer Queue. */
 401        } else {
 402                if (*index >= ADAP_NORM_RESP_ENTRIES)
 403                        *index = 0; /* Wrap to front of the Producer Queue. */
 404        }
 405
 406        /* Queue is full */
 407        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
 408                printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
 409                                qid, atomic_read(&q->numpending));
 410                return 0;
 411        } else {
 412                *entry = q->base + *index;
 413                return 1;
 414        }
 415}
 416
 417/**
 418 *      aac_queue_get           -       get the next free QE
 419 *      @dev: Adapter
 420 *      @index: Returned index
 421 *      @priority: Priority of fib
 422 *      @fib: Fib to associate with the queue entry
 423 *      @wait: Wait if queue full
 424 *      @fibptr: Driver fib object to go with fib
 425 *      @nonotify: Don't notify the adapter
 426 *
 427 *      Gets the next free QE off the requested priorty adapter command
 428 *      queue and associates the Fib with the QE. The QE represented by
 429 *      index is ready to insert on the queue when this routine returns
 430 *      success.
 431 */
 432
 433int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
 434{
 435        struct aac_entry * entry = NULL;
 436        int map = 0;
 437
 438        if (qid == AdapNormCmdQueue) {
 439                /*  if no entries wait for some if caller wants to */
 440                while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
 441                        printk(KERN_ERR "GetEntries failed\n");
 442                }
 443                /*
 444                 *      Setup queue entry with a command, status and fib mapped
 445                 */
 446                entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
 447                map = 1;
 448        } else {
 449                while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
 450                        /* if no entries wait for some if caller wants to */
 451                }
 452                /*
 453                 *      Setup queue entry with command, status and fib mapped
 454                 */
 455                entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
 456                entry->addr = hw_fib->header.SenderFibAddress;
 457                        /* Restore adapters pointer to the FIB */
 458                hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress;  /* Let the adapter now where to find its data */
 459                map = 0;
 460        }
 461        /*
 462         *      If MapFib is true than we need to map the Fib and put pointers
 463         *      in the queue entry.
 464         */
 465        if (map)
 466                entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
 467        return 0;
 468}
 469
 470/*
 471 *      Define the highest level of host to adapter communication routines.
 472 *      These routines will support host to adapter FS commuication. These
 473 *      routines have no knowledge of the commuication method used. This level
 474 *      sends and receives FIBs. This level has no knowledge of how these FIBs
 475 *      get passed back and forth.
 476 */
 477
 478/**
 479 *      aac_fib_send    -       send a fib to the adapter
 480 *      @command: Command to send
 481 *      @fibptr: The fib
 482 *      @size: Size of fib data area
 483 *      @priority: Priority of Fib
 484 *      @wait: Async/sync select
 485 *      @reply: True if a reply is wanted
 486 *      @callback: Called with reply
 487 *      @callback_data: Passed to callback
 488 *
 489 *      Sends the requested FIB to the adapter and optionally will wait for a
 490 *      response FIB. If the caller does not wish to wait for a response than
 491 *      an event to wait on must be supplied. This event will be set when a
 492 *      response FIB is received from the adapter.
 493 */
 494
 495int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
 496                int priority, int wait, int reply, fib_callback callback,
 497                void *callback_data)
 498{
 499        struct aac_dev * dev = fibptr->dev;
 500        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 501        unsigned long flags = 0;
 502        unsigned long mflags = 0;
 503        unsigned long sflags = 0;
 504
 505        if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
 506                return -EBUSY;
 507
 508        if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
 509                return -EINVAL;
 510
 511        /*
 512         *      There are 5 cases with the wait and response requested flags.
 513         *      The only invalid cases are if the caller requests to wait and
 514         *      does not request a response and if the caller does not want a
 515         *      response and the Fib is not allocated from pool. If a response
 516         *      is not requested the Fib will just be deallocaed by the DPC
 517         *      routine when the response comes back from the adapter. No
 518         *      further processing will be done besides deleting the Fib. We
 519         *      will have a debug mode where the adapter can notify the host
 520         *      it had a problem and the host can log that fact.
 521         */
 522        fibptr->flags = 0;
 523        if (wait && !reply) {
 524                return -EINVAL;
 525        } else if (!wait && reply) {
 526                hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
 527                FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
 528        } else if (!wait && !reply) {
 529                hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
 530                FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
 531        } else if (wait && reply) {
 532                hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
 533                FIB_COUNTER_INCREMENT(aac_config.NormalSent);
 534        }
 535        /*
 536         *      Map the fib into 32bits by using the fib number
 537         */
 538
 539        hw_fib->header.SenderFibAddress =
 540                cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
 541
 542        /* use the same shifted value for handle to be compatible
 543         * with the new native hba command handle
 544         */
 545        hw_fib->header.Handle =
 546                cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
 547
 548        /*
 549         *      Set FIB state to indicate where it came from and if we want a
 550         *      response from the adapter. Also load the command from the
 551         *      caller.
 552         *
 553         *      Map the hw fib pointer as a 32bit value
 554         */
 555        hw_fib->header.Command = cpu_to_le16(command);
 556        hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
 557        /*
 558         *      Set the size of the Fib we want to send to the adapter
 559         */
 560        hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
 561        if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
 562                return -EMSGSIZE;
 563        }
 564        /*
 565         *      Get a queue entry connect the FIB to it and send an notify
 566         *      the adapter a command is ready.
 567         */
 568        hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
 569
 570        /*
 571         *      Fill in the Callback and CallbackContext if we are not
 572         *      going to wait.
 573         */
 574        if (!wait) {
 575                fibptr->callback = callback;
 576                fibptr->callback_data = callback_data;
 577                fibptr->flags = FIB_CONTEXT_FLAG;
 578        }
 579
 580        fibptr->done = 0;
 581
 582        FIB_COUNTER_INCREMENT(aac_config.FibsSent);
 583
 584        dprintk((KERN_DEBUG "Fib contents:.\n"));
 585        dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
 586        dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
 587        dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
 588        dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
 589        dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
 590        dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
 591
 592        if (!dev->queues)
 593                return -EBUSY;
 594
 595        if (wait) {
 596
 597                spin_lock_irqsave(&dev->manage_lock, mflags);
 598                if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
 599                        printk(KERN_INFO "No management Fibs Available:%d\n",
 600                                                dev->management_fib_count);
 601                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 602                        return -EBUSY;
 603                }
 604                dev->management_fib_count++;
 605                spin_unlock_irqrestore(&dev->manage_lock, mflags);
 606                spin_lock_irqsave(&fibptr->event_lock, flags);
 607        }
 608
 609        if (dev->sync_mode) {
 610                if (wait)
 611                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 612                spin_lock_irqsave(&dev->sync_lock, sflags);
 613                if (dev->sync_fib) {
 614                        list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
 615                        spin_unlock_irqrestore(&dev->sync_lock, sflags);
 616                } else {
 617                        dev->sync_fib = fibptr;
 618                        spin_unlock_irqrestore(&dev->sync_lock, sflags);
 619                        aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
 620                                (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
 621                                NULL, NULL, NULL, NULL, NULL);
 622                }
 623                if (wait) {
 624                        fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
 625                        if (wait_for_completion_interruptible(&fibptr->event_wait)) {
 626                                fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
 627                                return -EFAULT;
 628                        }
 629                        return 0;
 630                }
 631                return -EINPROGRESS;
 632        }
 633
 634        if (aac_adapter_deliver(fibptr) != 0) {
 635                printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
 636                if (wait) {
 637                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 638                        spin_lock_irqsave(&dev->manage_lock, mflags);
 639                        dev->management_fib_count--;
 640                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 641                }
 642                return -EBUSY;
 643        }
 644
 645
 646        /*
 647         *      If the caller wanted us to wait for response wait now.
 648         */
 649
 650        if (wait) {
 651                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 652                /* Only set for first known interruptable command */
 653                if (wait < 0) {
 654                        /*
 655                         * *VERY* Dangerous to time out a command, the
 656                         * assumption is made that we have no hope of
 657                         * functioning because an interrupt routing or other
 658                         * hardware failure has occurred.
 659                         */
 660                        unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
 661                        while (!try_wait_for_completion(&fibptr->event_wait)) {
 662                                int blink;
 663                                if (time_is_before_eq_jiffies(timeout)) {
 664                                        struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
 665                                        atomic_dec(&q->numpending);
 666                                        if (wait == -1) {
 667                                                printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
 668                                                  "Usually a result of a PCI interrupt routing problem;\n"
 669                                                  "update mother board BIOS or consider utilizing one of\n"
 670                                                  "the SAFE mode kernel options (acpi, apic etc)\n");
 671                                        }
 672                                        return -ETIMEDOUT;
 673                                }
 674
 675                                if (unlikely(pci_channel_offline(dev->pdev)))
 676                                        return -EFAULT;
 677
 678                                if ((blink = aac_adapter_check_health(dev)) > 0) {
 679                                        if (wait == -1) {
 680                                                printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
 681                                                  "Usually a result of a serious unrecoverable hardware problem\n",
 682                                                  blink);
 683                                        }
 684                                        return -EFAULT;
 685                                }
 686                                /*
 687                                 * Allow other processes / CPUS to use core
 688                                 */
 689                                schedule();
 690                        }
 691                } else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
 692                        /* Do nothing ... satisfy
 693                         * wait_for_completion_interruptible must_check */
 694                }
 695
 696                spin_lock_irqsave(&fibptr->event_lock, flags);
 697                if (fibptr->done == 0) {
 698                        fibptr->done = 2; /* Tell interrupt we aborted */
 699                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 700                        return -ERESTARTSYS;
 701                }
 702                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 703                BUG_ON(fibptr->done == 0);
 704
 705                if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 706                        return -ETIMEDOUT;
 707                return 0;
 708        }
 709        /*
 710         *      If the user does not want a response than return success otherwise
 711         *      return pending
 712         */
 713        if (reply)
 714                return -EINPROGRESS;
 715        else
 716                return 0;
 717}
 718
 719int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 720                void *callback_data)
 721{
 722        struct aac_dev *dev = fibptr->dev;
 723        int wait;
 724        unsigned long flags = 0;
 725        unsigned long mflags = 0;
 726        struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
 727                        fibptr->hw_fib_va;
 728
 729        fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
 730        if (callback) {
 731                wait = 0;
 732                fibptr->callback = callback;
 733                fibptr->callback_data = callback_data;
 734        } else
 735                wait = 1;
 736
 737
 738        hbacmd->iu_type = command;
 739
 740        if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
 741                /* bit1 of request_id must be 0 */
 742                hbacmd->request_id =
 743                        cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
 744                fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
 745        } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
 746                return -EINVAL;
 747
 748
 749        if (wait) {
 750                spin_lock_irqsave(&dev->manage_lock, mflags);
 751                if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
 752                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 753                        return -EBUSY;
 754                }
 755                dev->management_fib_count++;
 756                spin_unlock_irqrestore(&dev->manage_lock, mflags);
 757                spin_lock_irqsave(&fibptr->event_lock, flags);
 758        }
 759
 760        if (aac_adapter_deliver(fibptr) != 0) {
 761                if (wait) {
 762                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 763                        spin_lock_irqsave(&dev->manage_lock, mflags);
 764                        dev->management_fib_count--;
 765                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 766                }
 767                return -EBUSY;
 768        }
 769        FIB_COUNTER_INCREMENT(aac_config.NativeSent);
 770
 771        if (wait) {
 772
 773                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 774
 775                if (unlikely(pci_channel_offline(dev->pdev)))
 776                        return -EFAULT;
 777
 778                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
 779                if (wait_for_completion_interruptible(&fibptr->event_wait))
 780                        fibptr->done = 2;
 781                fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
 782
 783                spin_lock_irqsave(&fibptr->event_lock, flags);
 784                if ((fibptr->done == 0) || (fibptr->done == 2)) {
 785                        fibptr->done = 2; /* Tell interrupt we aborted */
 786                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 787                        return -ERESTARTSYS;
 788                }
 789                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 790                WARN_ON(fibptr->done == 0);
 791
 792                if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 793                        return -ETIMEDOUT;
 794
 795                return 0;
 796        }
 797
 798        return -EINPROGRESS;
 799}
 800
 801/**
 802 *      aac_consumer_get        -       get the top of the queue
 803 *      @dev: Adapter
 804 *      @q: Queue
 805 *      @entry: Return entry
 806 *
 807 *      Will return a pointer to the entry on the top of the queue requested that
 808 *      we are a consumer of, and return the address of the queue entry. It does
 809 *      not change the state of the queue.
 810 */
 811
 812int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
 813{
 814        u32 index;
 815        int status;
 816        if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
 817                status = 0;
 818        } else {
 819                /*
 820                 *      The consumer index must be wrapped if we have reached
 821                 *      the end of the queue, else we just use the entry
 822                 *      pointed to by the header index
 823                 */
 824                if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 825                        index = 0;
 826                else
 827                        index = le32_to_cpu(*q->headers.consumer);
 828                *entry = q->base + index;
 829                status = 1;
 830        }
 831        return(status);
 832}
 833
 834/**
 835 *      aac_consumer_free       -       free consumer entry
 836 *      @dev: Adapter
 837 *      @q: Queue
 838 *      @qid: Queue ident
 839 *
 840 *      Frees up the current top of the queue we are a consumer of. If the
 841 *      queue was full notify the producer that the queue is no longer full.
 842 */
 843
 844void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
 845{
 846        int wasfull = 0;
 847        u32 notify;
 848
 849        if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
 850                wasfull = 1;
 851
 852        if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 853                *q->headers.consumer = cpu_to_le32(1);
 854        else
 855                le32_add_cpu(q->headers.consumer, 1);
 856
 857        if (wasfull) {
 858                switch (qid) {
 859
 860                case HostNormCmdQueue:
 861                        notify = HostNormCmdNotFull;
 862                        break;
 863                case HostNormRespQueue:
 864                        notify = HostNormRespNotFull;
 865                        break;
 866                default:
 867                        BUG();
 868                        return;
 869                }
 870                aac_adapter_notify(dev, notify);
 871        }
 872}
 873
 874/**
 875 *      aac_fib_adapter_complete        -       complete adapter issued fib
 876 *      @fibptr: fib to complete
 877 *      @size: size of fib
 878 *
 879 *      Will do all necessary work to complete a FIB that was sent from
 880 *      the adapter.
 881 */
 882
 883int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
 884{
 885        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 886        struct aac_dev * dev = fibptr->dev;
 887        struct aac_queue * q;
 888        unsigned long nointr = 0;
 889        unsigned long qflags;
 890
 891        if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
 892                dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
 893                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
 894                kfree(hw_fib);
 895                return 0;
 896        }
 897
 898        if (hw_fib->header.XferState == 0) {
 899                if (dev->comm_interface == AAC_COMM_MESSAGE)
 900                        kfree(hw_fib);
 901                return 0;
 902        }
 903        /*
 904         *      If we plan to do anything check the structure type first.
 905         */
 906        if (hw_fib->header.StructType != FIB_MAGIC &&
 907            hw_fib->header.StructType != FIB_MAGIC2 &&
 908            hw_fib->header.StructType != FIB_MAGIC2_64) {
 909                if (dev->comm_interface == AAC_COMM_MESSAGE)
 910                        kfree(hw_fib);
 911                return -EINVAL;
 912        }
 913        /*
 914         *      This block handles the case where the adapter had sent us a
 915         *      command and we have finished processing the command. We
 916         *      call completeFib when we are done processing the command
 917         *      and want to send a response back to the adapter. This will
 918         *      send the completed cdb to the adapter.
 919         */
 920        if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
 921                if (dev->comm_interface == AAC_COMM_MESSAGE) {
 922                        kfree (hw_fib);
 923                } else {
 924                        u32 index;
 925                        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
 926                        if (size) {
 927                                size += sizeof(struct aac_fibhdr);
 928                                if (size > le16_to_cpu(hw_fib->header.SenderSize))
 929                                        return -EMSGSIZE;
 930                                hw_fib->header.Size = cpu_to_le16(size);
 931                        }
 932                        q = &dev->queues->queue[AdapNormRespQueue];
 933                        spin_lock_irqsave(q->lock, qflags);
 934                        aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
 935                        *(q->headers.producer) = cpu_to_le32(index + 1);
 936                        spin_unlock_irqrestore(q->lock, qflags);
 937                        if (!(nointr & (int)aac_config.irq_mod))
 938                                aac_adapter_notify(dev, AdapNormRespQueue);
 939                }
 940        } else {
 941                printk(KERN_WARNING "aac_fib_adapter_complete: "
 942                        "Unknown xferstate detected.\n");
 943                BUG();
 944        }
 945        return 0;
 946}
 947
 948/**
 949 *      aac_fib_complete        -       fib completion handler
 950 *      @fib: FIB to complete
 951 *
 952 *      Will do all necessary work to complete a FIB.
 953 */
 954
 955int aac_fib_complete(struct fib *fibptr)
 956{
 957        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 958
 959        if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
 960                fib_dealloc(fibptr);
 961                return 0;
 962        }
 963
 964        /*
 965         *      Check for a fib which has already been completed or with a
 966         *      status wait timeout
 967         */
 968
 969        if (hw_fib->header.XferState == 0 || fibptr->done == 2)
 970                return 0;
 971        /*
 972         *      If we plan to do anything check the structure type first.
 973         */
 974
 975        if (hw_fib->header.StructType != FIB_MAGIC &&
 976            hw_fib->header.StructType != FIB_MAGIC2 &&
 977            hw_fib->header.StructType != FIB_MAGIC2_64)
 978                return -EINVAL;
 979        /*
 980         *      This block completes a cdb which orginated on the host and we
 981         *      just need to deallocate the cdb or reinit it. At this point the
 982         *      command is complete that we had sent to the adapter and this
 983         *      cdb could be reused.
 984         */
 985
 986        if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
 987                (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
 988        {
 989                fib_dealloc(fibptr);
 990        }
 991        else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
 992        {
 993                /*
 994                 *      This handles the case when the host has aborted the I/O
 995                 *      to the adapter because the adapter is not responding
 996                 */
 997                fib_dealloc(fibptr);
 998        } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
 999                fib_dealloc(fibptr);
1000        } else {
1001                BUG();
1002        }
1003        return 0;
1004}
1005
1006/**
1007 *      aac_printf      -       handle printf from firmware
1008 *      @dev: Adapter
1009 *      @val: Message info
1010 *
1011 *      Print a message passed to us by the controller firmware on the
1012 *      Adaptec board
1013 */
1014
1015void aac_printf(struct aac_dev *dev, u32 val)
1016{
1017        char *cp = dev->printfbuf;
1018        if (dev->printf_enabled)
1019        {
1020                int length = val & 0xffff;
1021                int level = (val >> 16) & 0xffff;
1022
1023                /*
1024                 *      The size of the printfbuf is set in port.c
1025                 *      There is no variable or define for it
1026                 */
1027                if (length > 255)
1028                        length = 255;
1029                if (cp[length] != 0)
1030                        cp[length] = 0;
1031                if (level == LOG_AAC_HIGH_ERROR)
1032                        printk(KERN_WARNING "%s:%s", dev->name, cp);
1033                else
1034                        printk(KERN_INFO "%s:%s", dev->name, cp);
1035        }
1036        memset(cp, 0, 256);
1037}
1038
1039static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
1040{
1041        return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
1042}
1043
1044
1045static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
1046{
1047        switch (aac_aif_data(aifcmd, 1)) {
1048        case AifBuCacheDataLoss:
1049                if (aac_aif_data(aifcmd, 2))
1050                        dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
1051                        aac_aif_data(aifcmd, 2));
1052                else
1053                        dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
1054                break;
1055        case AifBuCacheDataRecover:
1056                if (aac_aif_data(aifcmd, 2))
1057                        dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
1058                        aac_aif_data(aifcmd, 2));
1059                else
1060                        dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
1061                break;
1062        }
1063}
1064
1065/**
1066 *      aac_handle_aif          -       Handle a message from the firmware
1067 *      @dev: Which adapter this fib is from
1068 *      @fibptr: Pointer to fibptr from adapter
1069 *
1070 *      This routine handles a driver notify fib from the adapter and
1071 *      dispatches it to the appropriate routine for handling.
1072 */
1073
1074#define AIF_SNIFF_TIMEOUT       (500*HZ)
1075static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1076{
1077        struct hw_fib * hw_fib = fibptr->hw_fib_va;
1078        struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
1079        u32 channel, id, lun, container;
1080        struct scsi_device *device;
1081        enum {
1082                NOTHING,
1083                DELETE,
1084                ADD,
1085                CHANGE
1086        } device_config_needed = NOTHING;
1087
1088        /* Sniff for container changes */
1089
1090        if (!dev || !dev->fsa_dev)
1091                return;
1092        container = channel = id = lun = (u32)-1;
1093
1094        /*
1095         *      We have set this up to try and minimize the number of
1096         * re-configures that take place. As a result of this when
1097         * certain AIF's come in we will set a flag waiting for another
1098         * type of AIF before setting the re-config flag.
1099         */
1100        switch (le32_to_cpu(aifcmd->command)) {
1101        case AifCmdDriverNotify:
1102                switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1103                case AifRawDeviceRemove:
1104                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1105                        if ((container >> 28)) {
1106                                container = (u32)-1;
1107                                break;
1108                        }
1109                        channel = (container >> 24) & 0xF;
1110                        if (channel >= dev->maximum_num_channels) {
1111                                container = (u32)-1;
1112                                break;
1113                        }
1114                        id = container & 0xFFFF;
1115                        if (id >= dev->maximum_num_physicals) {
1116                                container = (u32)-1;
1117                                break;
1118                        }
1119                        lun = (container >> 16) & 0xFF;
1120                        container = (u32)-1;
1121                        channel = aac_phys_to_logical(channel);
1122                        device_config_needed = DELETE;
1123                        break;
1124
1125                /*
1126                 *      Morph or Expand complete
1127                 */
1128                case AifDenMorphComplete:
1129                case AifDenVolumeExtendComplete:
1130                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1131                        if (container >= dev->maximum_num_containers)
1132                                break;
1133
1134                        /*
1135                         *      Find the scsi_device associated with the SCSI
1136                         * address. Make sure we have the right array, and if
1137                         * so set the flag to initiate a new re-config once we
1138                         * see an AifEnConfigChange AIF come through.
1139                         */
1140
1141                        if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
1142                                device = scsi_device_lookup(dev->scsi_host_ptr,
1143                                        CONTAINER_TO_CHANNEL(container),
1144                                        CONTAINER_TO_ID(container),
1145                                        CONTAINER_TO_LUN(container));
1146                                if (device) {
1147                                        dev->fsa_dev[container].config_needed = CHANGE;
1148                                        dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
1149                                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1150                                        scsi_device_put(device);
1151                                }
1152                        }
1153                }
1154
1155                /*
1156                 *      If we are waiting on something and this happens to be
1157                 * that thing then set the re-configure flag.
1158                 */
1159                if (container != (u32)-1) {
1160                        if (container >= dev->maximum_num_containers)
1161                                break;
1162                        if ((dev->fsa_dev[container].config_waiting_on ==
1163                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1164                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1165                                dev->fsa_dev[container].config_waiting_on = 0;
1166                } else for (container = 0;
1167                    container < dev->maximum_num_containers; ++container) {
1168                        if ((dev->fsa_dev[container].config_waiting_on ==
1169                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1170                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1171                                dev->fsa_dev[container].config_waiting_on = 0;
1172                }
1173                break;
1174
1175        case AifCmdEventNotify:
1176                switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1177                case AifEnBatteryEvent:
1178                        dev->cache_protected =
1179                                (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
1180                        break;
1181                /*
1182                 *      Add an Array.
1183                 */
1184                case AifEnAddContainer:
1185                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1186                        if (container >= dev->maximum_num_containers)
1187                                break;
1188                        dev->fsa_dev[container].config_needed = ADD;
1189                        dev->fsa_dev[container].config_waiting_on =
1190                                AifEnConfigChange;
1191                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1192                        break;
1193
1194                /*
1195                 *      Delete an Array.
1196                 */
1197                case AifEnDeleteContainer:
1198                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1199                        if (container >= dev->maximum_num_containers)
1200                                break;
1201                        dev->fsa_dev[container].config_needed = DELETE;
1202                        dev->fsa_dev[container].config_waiting_on =
1203                                AifEnConfigChange;
1204                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1205                        break;
1206
1207                /*
1208                 *      Container change detected. If we currently are not
1209                 * waiting on something else, setup to wait on a Config Change.
1210                 */
1211                case AifEnContainerChange:
1212                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1213                        if (container >= dev->maximum_num_containers)
1214                                break;
1215                        if (dev->fsa_dev[container].config_waiting_on &&
1216                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1217                                break;
1218                        dev->fsa_dev[container].config_needed = CHANGE;
1219                        dev->fsa_dev[container].config_waiting_on =
1220                                AifEnConfigChange;
1221                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1222                        break;
1223
1224                case AifEnConfigChange:
1225                        break;
1226
1227                case AifEnAddJBOD:
1228                case AifEnDeleteJBOD:
1229                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1230                        if ((container >> 28)) {
1231                                container = (u32)-1;
1232                                break;
1233                        }
1234                        channel = (container >> 24) & 0xF;
1235                        if (channel >= dev->maximum_num_channels) {
1236                                container = (u32)-1;
1237                                break;
1238                        }
1239                        id = container & 0xFFFF;
1240                        if (id >= dev->maximum_num_physicals) {
1241                                container = (u32)-1;
1242                                break;
1243                        }
1244                        lun = (container >> 16) & 0xFF;
1245                        container = (u32)-1;
1246                        channel = aac_phys_to_logical(channel);
1247                        device_config_needed =
1248                          (((__le32 *)aifcmd->data)[0] ==
1249                            cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
1250                        if (device_config_needed == ADD) {
1251                                device = scsi_device_lookup(dev->scsi_host_ptr,
1252                                        channel,
1253                                        id,
1254                                        lun);
1255                                if (device) {
1256                                        scsi_remove_device(device);
1257                                        scsi_device_put(device);
1258                                }
1259                        }
1260                        break;
1261
1262                case AifEnEnclosureManagement:
1263                        /*
1264                         * If in JBOD mode, automatic exposure of new
1265                         * physical target to be suppressed until configured.
1266                         */
1267                        if (dev->jbod)
1268                                break;
1269                        switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
1270                        case EM_DRIVE_INSERTION:
1271                        case EM_DRIVE_REMOVAL:
1272                        case EM_SES_DRIVE_INSERTION:
1273                        case EM_SES_DRIVE_REMOVAL:
1274                                container = le32_to_cpu(
1275                                        ((__le32 *)aifcmd->data)[2]);
1276                                if ((container >> 28)) {
1277                                        container = (u32)-1;
1278                                        break;
1279                                }
1280                                channel = (container >> 24) & 0xF;
1281                                if (channel >= dev->maximum_num_channels) {
1282                                        container = (u32)-1;
1283                                        break;
1284                                }
1285                                id = container & 0xFFFF;
1286                                lun = (container >> 16) & 0xFF;
1287                                container = (u32)-1;
1288                                if (id >= dev->maximum_num_physicals) {
1289                                        /* legacy dev_t ? */
1290                                        if ((0x2000 <= id) || lun || channel ||
1291                                          ((channel = (id >> 7) & 0x3F) >=
1292                                          dev->maximum_num_channels))
1293                                                break;
1294                                        lun = (id >> 4) & 7;
1295                                        id &= 0xF;
1296                                }
1297                                channel = aac_phys_to_logical(channel);
1298                                device_config_needed =
1299                                  ((((__le32 *)aifcmd->data)[3]
1300                                    == cpu_to_le32(EM_DRIVE_INSERTION)) ||
1301                                    (((__le32 *)aifcmd->data)[3]
1302                                    == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
1303                                  ADD : DELETE;
1304                                break;
1305                        }
1306                        case AifBuManagerEvent:
1307                                aac_handle_aif_bu(dev, aifcmd);
1308                        break;
1309                }
1310
1311                /*
1312                 *      If we are waiting on something and this happens to be
1313                 * that thing then set the re-configure flag.
1314                 */
1315                if (container != (u32)-1) {
1316                        if (container >= dev->maximum_num_containers)
1317                                break;
1318                        if ((dev->fsa_dev[container].config_waiting_on ==
1319                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1320                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1321                                dev->fsa_dev[container].config_waiting_on = 0;
1322                } else for (container = 0;
1323                    container < dev->maximum_num_containers; ++container) {
1324                        if ((dev->fsa_dev[container].config_waiting_on ==
1325                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1326                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1327                                dev->fsa_dev[container].config_waiting_on = 0;
1328                }
1329                break;
1330
1331        case AifCmdJobProgress:
1332                /*
1333                 *      These are job progress AIF's. When a Clear is being
1334                 * done on a container it is initially created then hidden from
1335                 * the OS. When the clear completes we don't get a config
1336                 * change so we monitor the job status complete on a clear then
1337                 * wait for a container change.
1338                 */
1339
1340                if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1341                    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1342                     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1343                        for (container = 0;
1344                            container < dev->maximum_num_containers;
1345                            ++container) {
1346                                /*
1347                                 * Stomp on all config sequencing for all
1348                                 * containers?
1349                                 */
1350                                dev->fsa_dev[container].config_waiting_on =
1351                                        AifEnContainerChange;
1352                                dev->fsa_dev[container].config_needed = ADD;
1353                                dev->fsa_dev[container].config_waiting_stamp =
1354                                        jiffies;
1355                        }
1356                }
1357                if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1358                    ((__le32 *)aifcmd->data)[6] == 0 &&
1359                    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1360                        for (container = 0;
1361                            container < dev->maximum_num_containers;
1362                            ++container) {
1363                                /*
1364                                 * Stomp on all config sequencing for all
1365                                 * containers?
1366                                 */
1367                                dev->fsa_dev[container].config_waiting_on =
1368                                        AifEnContainerChange;
1369                                dev->fsa_dev[container].config_needed = DELETE;
1370                                dev->fsa_dev[container].config_waiting_stamp =
1371                                        jiffies;
1372                        }
1373                }
1374                break;
1375        }
1376
1377        container = 0;
1378retry_next:
1379        if (device_config_needed == NOTHING)
1380        for (; container < dev->maximum_num_containers; ++container) {
1381                if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1382                        (dev->fsa_dev[container].config_needed != NOTHING) &&
1383                        time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1384                        device_config_needed =
1385                                dev->fsa_dev[container].config_needed;
1386                        dev->fsa_dev[container].config_needed = NOTHING;
1387                        channel = CONTAINER_TO_CHANNEL(container);
1388                        id = CONTAINER_TO_ID(container);
1389                        lun = CONTAINER_TO_LUN(container);
1390                        break;
1391                }
1392        }
1393        if (device_config_needed == NOTHING)
1394                return;
1395
1396        /*
1397         *      If we decided that a re-configuration needs to be done,
1398         * schedule it here on the way out the door, please close the door
1399         * behind you.
1400         */
1401
1402        /*
1403         *      Find the scsi_device associated with the SCSI address,
1404         * and mark it as changed, invalidating the cache. This deals
1405         * with changes to existing device IDs.
1406         */
1407
1408        if (!dev || !dev->scsi_host_ptr)
1409                return;
1410        /*
1411         * force reload of disk info via aac_probe_container
1412         */
1413        if ((channel == CONTAINER_CHANNEL) &&
1414          (device_config_needed != NOTHING)) {
1415                if (dev->fsa_dev[container].valid == 1)
1416                        dev->fsa_dev[container].valid = 2;
1417                aac_probe_container(dev, container);
1418        }
1419        device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1420        if (device) {
1421                switch (device_config_needed) {
1422                case DELETE:
1423#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1424                        scsi_remove_device(device);
1425#else
1426                        if (scsi_device_online(device)) {
1427                                scsi_device_set_state(device, SDEV_OFFLINE);
1428                                sdev_printk(KERN_INFO, device,
1429                                        "Device offlined - %s\n",
1430                                        (channel == CONTAINER_CHANNEL) ?
1431                                                "array deleted" :
1432                                                "enclosure services event");
1433                        }
1434#endif
1435                        break;
1436                case ADD:
1437                        if (!scsi_device_online(device)) {
1438                                sdev_printk(KERN_INFO, device,
1439                                        "Device online - %s\n",
1440                                        (channel == CONTAINER_CHANNEL) ?
1441                                                "array created" :
1442                                                "enclosure services event");
1443                                scsi_device_set_state(device, SDEV_RUNNING);
1444                        }
1445                        /* FALLTHRU */
1446                case CHANGE:
1447                        if ((channel == CONTAINER_CHANNEL)
1448                         && (!dev->fsa_dev[container].valid)) {
1449#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1450                                scsi_remove_device(device);
1451#else
1452                                if (!scsi_device_online(device))
1453                                        break;
1454                                scsi_device_set_state(device, SDEV_OFFLINE);
1455                                sdev_printk(KERN_INFO, device,
1456                                        "Device offlined - %s\n",
1457                                        "array failed");
1458#endif
1459                                break;
1460                        }
1461                        scsi_rescan_device(&device->sdev_gendev);
1462
1463                default:
1464                        break;
1465                }
1466                scsi_device_put(device);
1467                device_config_needed = NOTHING;
1468        }
1469        if (device_config_needed == ADD)
1470                scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1471        if (channel == CONTAINER_CHANNEL) {
1472                container++;
1473                device_config_needed = NOTHING;
1474                goto retry_next;
1475        }
1476}
1477
1478static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1479{
1480        int index, quirks;
1481        int retval;
1482        struct Scsi_Host *host;
1483        struct scsi_device *dev;
1484        struct scsi_cmnd *command;
1485        struct scsi_cmnd *command_list;
1486        int jafo = 0;
1487        int bled;
1488        u64 dmamask;
1489        int num_of_fibs = 0;
1490
1491        /*
1492         * Assumptions:
1493         *      - host is locked, unless called by the aacraid thread.
1494         *        (a matter of convenience, due to legacy issues surrounding
1495         *        eh_host_adapter_reset).
1496         *      - in_reset is asserted, so no new i/o is getting to the
1497         *        card.
1498         *      - The card is dead, or will be very shortly ;-/ so no new
1499         *        commands are completing in the interrupt service.
1500         */
1501        host = aac->scsi_host_ptr;
1502        scsi_block_requests(host);
1503        aac_adapter_disable_int(aac);
1504        if (aac->thread && aac->thread->pid != current->pid) {
1505                spin_unlock_irq(host->host_lock);
1506                kthread_stop(aac->thread);
1507                aac->thread = NULL;
1508                jafo = 1;
1509        }
1510
1511        /*
1512         *      If a positive health, means in a known DEAD PANIC
1513         * state and the adapter could be reset to `try again'.
1514         */
1515        bled = forced ? 0 : aac_adapter_check_health(aac);
1516        retval = aac_adapter_restart(aac, bled, reset_type);
1517
1518        if (retval)
1519                goto out;
1520
1521        /*
1522         *      Loop through the fibs, close the synchronous FIBS
1523         */
1524        retval = 1;
1525        num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
1526        for (index = 0; index <  num_of_fibs; index++) {
1527
1528                struct fib *fib = &aac->fibs[index];
1529                __le32 XferState = fib->hw_fib_va->header.XferState;
1530                bool is_response_expected = false;
1531
1532                if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1533                   (XferState & cpu_to_le32(ResponseExpected)))
1534                        is_response_expected = true;
1535
1536                if (is_response_expected
1537                  || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
1538                        unsigned long flagv;
1539                        spin_lock_irqsave(&fib->event_lock, flagv);
1540                        complete(&fib->event_wait);
1541                        spin_unlock_irqrestore(&fib->event_lock, flagv);
1542                        schedule();
1543                        retval = 0;
1544                }
1545        }
1546        /* Give some extra time for ioctls to complete. */
1547        if (retval == 0)
1548                ssleep(2);
1549        index = aac->cardtype;
1550
1551        /*
1552         * Re-initialize the adapter, first free resources, then carefully
1553         * apply the initialization sequence to come back again. Only risk
1554         * is a change in Firmware dropping cache, it is assumed the caller
1555         * will ensure that i/o is queisced and the card is flushed in that
1556         * case.
1557         */
1558        aac_free_irq(aac);
1559        aac_fib_map_free(aac);
1560        dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
1561                          aac->comm_phys);
1562        aac->comm_addr = NULL;
1563        aac->comm_phys = 0;
1564        kfree(aac->queues);
1565        aac->queues = NULL;
1566        kfree(aac->fsa_dev);
1567        aac->fsa_dev = NULL;
1568
1569        dmamask = DMA_BIT_MASK(32);
1570        quirks = aac_get_driver_ident(index)->quirks;
1571        if (quirks & AAC_QUIRK_31BIT)
1572                retval = pci_set_dma_mask(aac->pdev, dmamask);
1573        else if (!(quirks & AAC_QUIRK_SRC))
1574                retval = pci_set_dma_mask(aac->pdev, dmamask);
1575        else
1576                retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
1577
1578        if (quirks & AAC_QUIRK_31BIT && !retval) {
1579                dmamask = DMA_BIT_MASK(31);
1580                retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
1581        }
1582
1583        if (retval)
1584                goto out;
1585
1586        if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1587                goto out;
1588
1589        if (jafo) {
1590                aac->thread = kthread_run(aac_command_thread, aac, "%s",
1591                                          aac->name);
1592                if (IS_ERR(aac->thread)) {
1593                        retval = PTR_ERR(aac->thread);
1594                        aac->thread = NULL;
1595                        goto out;
1596                }
1597        }
1598        (void)aac_get_adapter_info(aac);
1599        if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1600                host->sg_tablesize = 34;
1601                host->max_sectors = (host->sg_tablesize * 8) + 112;
1602        }
1603        if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1604                host->sg_tablesize = 17;
1605                host->max_sectors = (host->sg_tablesize * 8) + 112;
1606        }
1607        aac_get_config_status(aac, 1);
1608        aac_get_containers(aac);
1609        /*
1610         * This is where the assumption that the Adapter is quiesced
1611         * is important.
1612         */
1613        command_list = NULL;
1614        __shost_for_each_device(dev, host) {
1615                unsigned long flags;
1616                spin_lock_irqsave(&dev->list_lock, flags);
1617                list_for_each_entry(command, &dev->cmd_list, list)
1618                        if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1619                                command->SCp.buffer = (struct scatterlist *)command_list;
1620                                command_list = command;
1621                        }
1622                spin_unlock_irqrestore(&dev->list_lock, flags);
1623        }
1624        while ((command = command_list)) {
1625                command_list = (struct scsi_cmnd *)command->SCp.buffer;
1626                command->SCp.buffer = NULL;
1627                command->result = DID_OK << 16
1628                  | COMMAND_COMPLETE << 8
1629                  | SAM_STAT_TASK_SET_FULL;
1630                command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1631                command->scsi_done(command);
1632        }
1633        /*
1634         * Any Device that was already marked offline needs to be marked
1635         * running
1636         */
1637        __shost_for_each_device(dev, host) {
1638                if (!scsi_device_online(dev))
1639                        scsi_device_set_state(dev, SDEV_RUNNING);
1640        }
1641        retval = 0;
1642
1643out:
1644        aac->in_reset = 0;
1645        scsi_unblock_requests(host);
1646
1647        /*
1648         * Issue bus rescan to catch any configuration that might have
1649         * occurred
1650         */
1651        if (!retval && !is_kdump_kernel()) {
1652                dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
1653                aac_schedule_safw_scan_worker(aac);
1654        }
1655
1656        if (jafo) {
1657                spin_lock_irq(host->host_lock);
1658        }
1659        return retval;
1660}
1661
1662int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1663{
1664        unsigned long flagv = 0;
1665        int retval;
1666        struct Scsi_Host * host;
1667        int bled;
1668
1669        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1670                return -EBUSY;
1671
1672        if (aac->in_reset) {
1673                spin_unlock_irqrestore(&aac->fib_lock, flagv);
1674                return -EBUSY;
1675        }
1676        aac->in_reset = 1;
1677        spin_unlock_irqrestore(&aac->fib_lock, flagv);
1678
1679        /*
1680         * Wait for all commands to complete to this specific
1681         * target (block maximum 60 seconds). Although not necessary,
1682         * it does make us a good storage citizen.
1683         */
1684        host = aac->scsi_host_ptr;
1685        scsi_block_requests(host);
1686
1687        /* Quiesce build, flush cache, write through mode */
1688        if (forced < 2)
1689                aac_send_shutdown(aac);
1690        spin_lock_irqsave(host->host_lock, flagv);
1691        bled = forced ? forced :
1692                        (aac_check_reset != 0 && aac_check_reset != 1);
1693        retval = _aac_reset_adapter(aac, bled, reset_type);
1694        spin_unlock_irqrestore(host->host_lock, flagv);
1695
1696        if ((forced < 2) && (retval == -ENODEV)) {
1697                /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1698                struct fib * fibctx = aac_fib_alloc(aac);
1699                if (fibctx) {
1700                        struct aac_pause *cmd;
1701                        int status;
1702
1703                        aac_fib_init(fibctx);
1704
1705                        cmd = (struct aac_pause *) fib_data(fibctx);
1706
1707                        cmd->command = cpu_to_le32(VM_ContainerConfig);
1708                        cmd->type = cpu_to_le32(CT_PAUSE_IO);
1709                        cmd->timeout = cpu_to_le32(1);
1710                        cmd->min = cpu_to_le32(1);
1711                        cmd->noRescan = cpu_to_le32(1);
1712                        cmd->count = cpu_to_le32(0);
1713
1714                        status = aac_fib_send(ContainerCommand,
1715                          fibctx,
1716                          sizeof(struct aac_pause),
1717                          FsaNormal,
1718                          -2 /* Timeout silently */, 1,
1719                          NULL, NULL);
1720
1721                        if (status >= 0)
1722                                aac_fib_complete(fibctx);
1723                        /* FIB should be freed only after getting
1724                         * the response from the F/W */
1725                        if (status != -ERESTARTSYS)
1726                                aac_fib_free(fibctx);
1727                }
1728        }
1729
1730        return retval;
1731}
1732
1733int aac_check_health(struct aac_dev * aac)
1734{
1735        int BlinkLED;
1736        unsigned long time_now, flagv = 0;
1737        struct list_head * entry;
1738
1739        /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1740        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1741                return 0;
1742
1743        if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1744                spin_unlock_irqrestore(&aac->fib_lock, flagv);
1745                return 0; /* OK */
1746        }
1747
1748        aac->in_reset = 1;
1749
1750        /* Fake up an AIF:
1751         *      aac_aifcmd.command = AifCmdEventNotify = 1
1752         *      aac_aifcmd.seqnum = 0xFFFFFFFF
1753         *      aac_aifcmd.data[0] = AifEnExpEvent = 23
1754         *      aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1755         *      aac.aifcmd.data[2] = AifHighPriority = 3
1756         *      aac.aifcmd.data[3] = BlinkLED
1757         */
1758
1759        time_now = jiffies/HZ;
1760        entry = aac->fib_list.next;
1761
1762        /*
1763         * For each Context that is on the
1764         * fibctxList, make a copy of the
1765         * fib, and then set the event to wake up the
1766         * thread that is waiting for it.
1767         */
1768        while (entry != &aac->fib_list) {
1769                /*
1770                 * Extract the fibctx
1771                 */
1772                struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1773                struct hw_fib * hw_fib;
1774                struct fib * fib;
1775                /*
1776                 * Check if the queue is getting
1777                 * backlogged
1778                 */
1779                if (fibctx->count > 20) {
1780                        /*
1781                         * It's *not* jiffies folks,
1782                         * but jiffies / HZ, so do not
1783                         * panic ...
1784                         */
1785                        u32 time_last = fibctx->jiffies;
1786                        /*
1787                         * Has it been > 2 minutes
1788                         * since the last read off
1789                         * the queue?
1790                         */
1791                        if ((time_now - time_last) > aif_timeout) {
1792                                entry = entry->next;
1793                                aac_close_fib_context(aac, fibctx);
1794                                continue;
1795                        }
1796                }
1797                /*
1798                 * Warning: no sleep allowed while
1799                 * holding spinlock
1800                 */
1801                hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1802                fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1803                if (fib && hw_fib) {
1804                        struct aac_aifcmd * aif;
1805
1806                        fib->hw_fib_va = hw_fib;
1807                        fib->dev = aac;
1808                        aac_fib_init(fib);
1809                        fib->type = FSAFS_NTC_FIB_CONTEXT;
1810                        fib->size = sizeof (struct fib);
1811                        fib->data = hw_fib->data;
1812                        aif = (struct aac_aifcmd *)hw_fib->data;
1813                        aif->command = cpu_to_le32(AifCmdEventNotify);
1814                        aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1815                        ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1816                        ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1817                        ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1818                        ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1819
1820                        /*
1821                         * Put the FIB onto the
1822                         * fibctx's fibs
1823                         */
1824                        list_add_tail(&fib->fiblink, &fibctx->fib_list);
1825                        fibctx->count++;
1826                        /*
1827                         * Set the event to wake up the
1828                         * thread that will waiting.
1829                         */
1830                        complete(&fibctx->completion);
1831                } else {
1832                        printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1833                        kfree(fib);
1834                        kfree(hw_fib);
1835                }
1836                entry = entry->next;
1837        }
1838
1839        spin_unlock_irqrestore(&aac->fib_lock, flagv);
1840
1841        if (BlinkLED < 0) {
1842                printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1843                                aac->name, BlinkLED);
1844                goto out;
1845        }
1846
1847        printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1848
1849out:
1850        aac->in_reset = 0;
1851        return BlinkLED;
1852}
1853
1854static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
1855{
1856        return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
1857}
1858
1859static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
1860                                                                int bus,
1861                                                                int target)
1862{
1863        if (bus != CONTAINER_CHANNEL)
1864                bus = aac_phys_to_logical(bus);
1865
1866        return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
1867}
1868
1869static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
1870{
1871        if (bus != CONTAINER_CHANNEL)
1872                bus = aac_phys_to_logical(bus);
1873
1874        return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
1875}
1876
1877static void aac_put_safw_scsi_device(struct scsi_device *sdev)
1878{
1879        if (sdev)
1880                scsi_device_put(sdev);
1881}
1882
1883static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
1884{
1885        struct scsi_device *sdev;
1886
1887        sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1888        scsi_remove_device(sdev);
1889        aac_put_safw_scsi_device(sdev);
1890}
1891
1892static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
1893        int bus, int target)
1894{
1895        return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
1896}
1897
1898static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
1899{
1900        if (is_safw_raid_volume(dev, bus, target))
1901                return dev->fsa_dev[target].valid;
1902        else
1903                return aac_is_safw_scan_count_equal(dev, bus, target);
1904}
1905
1906static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
1907{
1908        int is_exposed = 0;
1909        struct scsi_device *sdev;
1910
1911        sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1912        if (sdev)
1913                is_exposed = 1;
1914        aac_put_safw_scsi_device(sdev);
1915
1916        return is_exposed;
1917}
1918
1919static int aac_update_safw_host_devices(struct aac_dev *dev)
1920{
1921        int i;
1922        int bus;
1923        int target;
1924        int is_exposed = 0;
1925        int rcode = 0;
1926
1927        rcode = aac_setup_safw_adapter(dev);
1928        if (unlikely(rcode < 0)) {
1929                goto out;
1930        }
1931
1932        for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
1933
1934                bus = get_bus_number(i);
1935                target = get_target_number(i);
1936
1937                is_exposed = aac_is_safw_device_exposed(dev, bus, target);
1938
1939                if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
1940                        aac_add_safw_device(dev, bus, target);
1941                else if (!aac_is_safw_target_valid(dev, bus, target) &&
1942                                                                is_exposed)
1943                        aac_remove_safw_device(dev, bus, target);
1944        }
1945out:
1946        return rcode;
1947}
1948
1949static int aac_scan_safw_host(struct aac_dev *dev)
1950{
1951        int rcode = 0;
1952
1953        rcode = aac_update_safw_host_devices(dev);
1954        if (rcode)
1955                aac_schedule_safw_scan_worker(dev);
1956
1957        return rcode;
1958}
1959
1960int aac_scan_host(struct aac_dev *dev)
1961{
1962        int rcode = 0;
1963
1964        mutex_lock(&dev->scan_mutex);
1965        if (dev->sa_firmware)
1966                rcode = aac_scan_safw_host(dev);
1967        else
1968                scsi_scan_host(dev->scsi_host_ptr);
1969        mutex_unlock(&dev->scan_mutex);
1970
1971        return rcode;
1972}
1973
1974/**
1975 *      aac_handle_sa_aif       Handle a message from the firmware
1976 *      @dev: Which adapter this fib is from
1977 *      @fibptr: Pointer to fibptr from adapter
1978 *
1979 *      This routine handles a driver notify fib from the adapter and
1980 *      dispatches it to the appropriate routine for handling.
1981 */
1982static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
1983{
1984        int i;
1985        u32 events = 0;
1986
1987        if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
1988                events = SA_AIF_HOTPLUG;
1989        else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
1990                events = SA_AIF_HARDWARE;
1991        else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
1992                events = SA_AIF_PDEV_CHANGE;
1993        else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
1994                events = SA_AIF_LDEV_CHANGE;
1995        else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
1996                events = SA_AIF_BPSTAT_CHANGE;
1997        else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
1998                events = SA_AIF_BPCFG_CHANGE;
1999
2000        switch (events) {
2001        case SA_AIF_HOTPLUG:
2002        case SA_AIF_HARDWARE:
2003        case SA_AIF_PDEV_CHANGE:
2004        case SA_AIF_LDEV_CHANGE:
2005        case SA_AIF_BPCFG_CHANGE:
2006
2007                aac_scan_host(dev);
2008
2009                break;
2010
2011        case SA_AIF_BPSTAT_CHANGE:
2012                /* currently do nothing */
2013                break;
2014        }
2015
2016        for (i = 1; i <= 10; ++i) {
2017                events = src_readl(dev, MUnit.IDR);
2018                if (events & (1<<23)) {
2019                        pr_warn(" AIF not cleared by firmware - %d/%d)\n",
2020                                i, 10);
2021                        ssleep(1);
2022                }
2023        }
2024}
2025
2026static int get_fib_count(struct aac_dev *dev)
2027{
2028        unsigned int num = 0;
2029        struct list_head *entry;
2030        unsigned long flagv;
2031
2032        /*
2033         * Warning: no sleep allowed while
2034         * holding spinlock. We take the estimate
2035         * and pre-allocate a set of fibs outside the
2036         * lock.
2037         */
2038        num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
2039                        / sizeof(struct hw_fib); /* some extra */
2040        spin_lock_irqsave(&dev->fib_lock, flagv);
2041        entry = dev->fib_list.next;
2042        while (entry != &dev->fib_list) {
2043                entry = entry->next;
2044                ++num;
2045        }
2046        spin_unlock_irqrestore(&dev->fib_lock, flagv);
2047
2048        return num;
2049}
2050
2051static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2052                                                struct fib **fib_pool,
2053                                                unsigned int num)
2054{
2055        struct hw_fib **hw_fib_p;
2056        struct fib **fib_p;
2057
2058        hw_fib_p = hw_fib_pool;
2059        fib_p = fib_pool;
2060        while (hw_fib_p < &hw_fib_pool[num]) {
2061                *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
2062                if (!(*(hw_fib_p++))) {
2063                        --hw_fib_p;
2064                        break;
2065                }
2066
2067                *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
2068                if (!(*(fib_p++))) {
2069                        kfree(*(--hw_fib_p));
2070                        break;
2071                }
2072        }
2073
2074        /*
2075         * Get the actual number of allocated fibs
2076         */
2077        num = hw_fib_p - hw_fib_pool;
2078        return num;
2079}
2080
2081static void wakeup_fibctx_threads(struct aac_dev *dev,
2082                                                struct hw_fib **hw_fib_pool,
2083                                                struct fib **fib_pool,
2084                                                struct fib *fib,
2085                                                struct hw_fib *hw_fib,
2086                                                unsigned int num)
2087{
2088        unsigned long flagv;
2089        struct list_head *entry;
2090        struct hw_fib **hw_fib_p;
2091        struct fib **fib_p;
2092        u32 time_now, time_last;
2093        struct hw_fib *hw_newfib;
2094        struct fib *newfib;
2095        struct aac_fib_context *fibctx;
2096
2097        time_now = jiffies/HZ;
2098        spin_lock_irqsave(&dev->fib_lock, flagv);
2099        entry = dev->fib_list.next;
2100        /*
2101         * For each Context that is on the
2102         * fibctxList, make a copy of the
2103         * fib, and then set the event to wake up the
2104         * thread that is waiting for it.
2105         */
2106
2107        hw_fib_p = hw_fib_pool;
2108        fib_p = fib_pool;
2109        while (entry != &dev->fib_list) {
2110                /*
2111                 * Extract the fibctx
2112                 */
2113                fibctx = list_entry(entry, struct aac_fib_context,
2114                                next);
2115                /*
2116                 * Check if the queue is getting
2117                 * backlogged
2118                 */
2119                if (fibctx->count > 20) {
2120                        /*
2121                         * It's *not* jiffies folks,
2122                         * but jiffies / HZ so do not
2123                         * panic ...
2124                         */
2125                        time_last = fibctx->jiffies;
2126                        /*
2127                         * Has it been > 2 minutes
2128                         * since the last read off
2129                         * the queue?
2130                         */
2131                        if ((time_now - time_last) > aif_timeout) {
2132                                entry = entry->next;
2133                                aac_close_fib_context(dev, fibctx);
2134                                continue;
2135                        }
2136                }
2137                /*
2138                 * Warning: no sleep allowed while
2139                 * holding spinlock
2140                 */
2141                if (hw_fib_p >= &hw_fib_pool[num]) {
2142                        pr_warn("aifd: didn't allocate NewFib\n");
2143                        entry = entry->next;
2144                        continue;
2145                }
2146
2147                hw_newfib = *hw_fib_p;
2148                *(hw_fib_p++) = NULL;
2149                newfib = *fib_p;
2150                *(fib_p++) = NULL;
2151                /*
2152                 * Make the copy of the FIB
2153                 */
2154                memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
2155                memcpy(newfib, fib, sizeof(struct fib));
2156                newfib->hw_fib_va = hw_newfib;
2157                /*
2158                 * Put the FIB onto the
2159                 * fibctx's fibs
2160                 */
2161                list_add_tail(&newfib->fiblink, &fibctx->fib_list);
2162                fibctx->count++;
2163                /*
2164                 * Set the event to wake up the
2165                 * thread that is waiting.
2166                 */
2167                complete(&fibctx->completion);
2168
2169                entry = entry->next;
2170        }
2171        /*
2172         *      Set the status of this FIB
2173         */
2174        *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2175        aac_fib_adapter_complete(fib, sizeof(u32));
2176        spin_unlock_irqrestore(&dev->fib_lock, flagv);
2177
2178}
2179
2180static void aac_process_events(struct aac_dev *dev)
2181{
2182        struct hw_fib *hw_fib;
2183        struct fib *fib;
2184        unsigned long flags;
2185        spinlock_t *t_lock;
2186
2187        t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2188        spin_lock_irqsave(t_lock, flags);
2189
2190        while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
2191                struct list_head *entry;
2192                struct aac_aifcmd *aifcmd;
2193                unsigned int  num;
2194                struct hw_fib **hw_fib_pool, **hw_fib_p;
2195                struct fib **fib_pool, **fib_p;
2196
2197                set_current_state(TASK_RUNNING);
2198
2199                entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
2200                list_del(entry);
2201
2202                t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2203                spin_unlock_irqrestore(t_lock, flags);
2204
2205                fib = list_entry(entry, struct fib, fiblink);
2206                hw_fib = fib->hw_fib_va;
2207                if (dev->sa_firmware) {
2208                        /* Thor AIF */
2209                        aac_handle_sa_aif(dev, fib);
2210                        aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2211                        goto free_fib;
2212                }
2213                /*
2214                 *      We will process the FIB here or pass it to a
2215                 *      worker thread that is TBD. We Really can't
2216                 *      do anything at this point since we don't have
2217                 *      anything defined for this thread to do.
2218                 */
2219                memset(fib, 0, sizeof(struct fib));
2220                fib->type = FSAFS_NTC_FIB_CONTEXT;
2221                fib->size = sizeof(struct fib);
2222                fib->hw_fib_va = hw_fib;
2223                fib->data = hw_fib->data;
2224                fib->dev = dev;
2225                /*
2226                 *      We only handle AifRequest fibs from the adapter.
2227                 */
2228
2229                aifcmd = (struct aac_aifcmd *) hw_fib->data;
2230                if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
2231                        /* Handle Driver Notify Events */
2232                        aac_handle_aif(dev, fib);
2233                        *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2234                        aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2235                        goto free_fib;
2236                }
2237                /*
2238                 * The u32 here is important and intended. We are using
2239                 * 32bit wrapping time to fit the adapter field
2240                 */
2241
2242                /* Sniff events */
2243                if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
2244                 || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
2245                        aac_handle_aif(dev, fib);
2246                }
2247
2248                /*
2249                 * get number of fibs to process
2250                 */
2251                num = get_fib_count(dev);
2252                if (!num)
2253                        goto free_fib;
2254
2255                hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
2256                                                GFP_KERNEL);
2257                if (!hw_fib_pool)
2258                        goto free_fib;
2259
2260                fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
2261                if (!fib_pool)
2262                        goto free_hw_fib_pool;
2263
2264                /*
2265                 * Fill up fib pointer pools with actual fibs
2266                 * and hw_fibs
2267                 */
2268                num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2269                if (!num)
2270                        goto free_mem;
2271
2272                /*
2273                 * wakeup the thread that is waiting for
2274                 * the response from fw (ioctl)
2275                 */
2276                wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
2277                                                            fib, hw_fib, num);
2278
2279free_mem:
2280                /* Free up the remaining resources */
2281                hw_fib_p = hw_fib_pool;
2282                fib_p = fib_pool;
2283                while (hw_fib_p < &hw_fib_pool[num]) {
2284                        kfree(*hw_fib_p);
2285                        kfree(*fib_p);
2286                        ++fib_p;
2287                        ++hw_fib_p;
2288                }
2289                kfree(fib_pool);
2290free_hw_fib_pool:
2291                kfree(hw_fib_pool);
2292free_fib:
2293                kfree(fib);
2294                t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2295                spin_lock_irqsave(t_lock, flags);
2296        }
2297        /*
2298         *      There are no more AIF's
2299         */
2300        t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2301        spin_unlock_irqrestore(t_lock, flags);
2302}
2303
2304static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
2305                                                        u32 datasize)
2306{
2307        struct aac_srb *srbcmd;
2308        struct sgmap64 *sg64;
2309        dma_addr_t addr;
2310        char *dma_buf;
2311        struct fib *fibptr;
2312        int ret = -ENOMEM;
2313        u32 vbus, vid;
2314
2315        fibptr = aac_fib_alloc(dev);
2316        if (!fibptr)
2317                goto out;
2318
2319        dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
2320                                     GFP_KERNEL);
2321        if (!dma_buf)
2322                goto fib_free_out;
2323
2324        aac_fib_init(fibptr);
2325
2326        vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
2327        vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
2328
2329        srbcmd = (struct aac_srb *)fib_data(fibptr);
2330
2331        srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2332        srbcmd->channel = cpu_to_le32(vbus);
2333        srbcmd->id = cpu_to_le32(vid);
2334        srbcmd->lun = 0;
2335        srbcmd->flags = cpu_to_le32(SRB_DataOut);
2336        srbcmd->timeout = cpu_to_le32(10);
2337        srbcmd->retry_limit = 0;
2338        srbcmd->cdb_size = cpu_to_le32(12);
2339        srbcmd->count = cpu_to_le32(datasize);
2340
2341        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2342        srbcmd->cdb[0] = BMIC_OUT;
2343        srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
2344        memcpy(dma_buf, (char *)wellness_str, datasize);
2345
2346        sg64 = (struct sgmap64 *)&srbcmd->sg;
2347        sg64->count = cpu_to_le32(1);
2348        sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
2349        sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2350        sg64->sg[0].count = cpu_to_le32(datasize);
2351
2352        ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
2353                                FsaNormal, 1, 1, NULL, NULL);
2354
2355        dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
2356
2357        /*
2358         * Do not set XferState to zero unless
2359         * receives a response from F/W
2360         */
2361        if (ret >= 0)
2362                aac_fib_complete(fibptr);
2363
2364        /*
2365         * FIB should be freed only after
2366         * getting the response from the F/W
2367         */
2368        if (ret != -ERESTARTSYS)
2369                goto fib_free_out;
2370
2371out:
2372        return ret;
2373fib_free_out:
2374        aac_fib_free(fibptr);
2375        goto out;
2376}
2377
2378int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
2379{
2380        struct tm cur_tm;
2381        char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2382        u32 datasize = sizeof(wellness_str);
2383        time64_t local_time;
2384        int ret = -ENODEV;
2385
2386        if (!dev->sa_firmware)
2387                goto out;
2388
2389        local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
2390        time64_to_tm(local_time, 0, &cur_tm);
2391        cur_tm.tm_mon += 1;
2392        cur_tm.tm_year += 1900;
2393        wellness_str[8] = bin2bcd(cur_tm.tm_hour);
2394        wellness_str[9] = bin2bcd(cur_tm.tm_min);
2395        wellness_str[10] = bin2bcd(cur_tm.tm_sec);
2396        wellness_str[12] = bin2bcd(cur_tm.tm_mon);
2397        wellness_str[13] = bin2bcd(cur_tm.tm_mday);
2398        wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
2399        wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
2400
2401        ret = aac_send_wellness_command(dev, wellness_str, datasize);
2402
2403out:
2404        return ret;
2405}
2406
2407int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
2408{
2409        int ret = -ENOMEM;
2410        struct fib *fibptr;
2411        __le32 *info;
2412
2413        fibptr = aac_fib_alloc(dev);
2414        if (!fibptr)
2415                goto out;
2416
2417        aac_fib_init(fibptr);
2418        info = (__le32 *)fib_data(fibptr);
2419        *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
2420        ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
2421                                        1, 1, NULL, NULL);
2422
2423        /*
2424         * Do not set XferState to zero unless
2425         * receives a response from F/W
2426         */
2427        if (ret >= 0)
2428                aac_fib_complete(fibptr);
2429
2430        /*
2431         * FIB should be freed only after
2432         * getting the response from the F/W
2433         */
2434        if (ret != -ERESTARTSYS)
2435                aac_fib_free(fibptr);
2436
2437out:
2438        return ret;
2439}
2440
2441/**
2442 *      aac_command_thread      -       command processing thread
2443 *      @dev: Adapter to monitor
2444 *
2445 *      Waits on the commandready event in it's queue. When the event gets set
2446 *      it will pull FIBs off it's queue. It will continue to pull FIBs off
2447 *      until the queue is empty. When the queue is empty it will wait for
2448 *      more FIBs.
2449 */
2450
2451int aac_command_thread(void *data)
2452{
2453        struct aac_dev *dev = data;
2454        DECLARE_WAITQUEUE(wait, current);
2455        unsigned long next_jiffies = jiffies + HZ;
2456        unsigned long next_check_jiffies = next_jiffies;
2457        long difference = HZ;
2458
2459        /*
2460         *      We can only have one thread per adapter for AIF's.
2461         */
2462        if (dev->aif_thread)
2463                return -EINVAL;
2464
2465        /*
2466         *      Let the DPC know it has a place to send the AIF's to.
2467         */
2468        dev->aif_thread = 1;
2469        add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2470        set_current_state(TASK_INTERRUPTIBLE);
2471        dprintk ((KERN_INFO "aac_command_thread start\n"));
2472        while (1) {
2473
2474                aac_process_events(dev);
2475
2476                /*
2477                 *      Background activity
2478                 */
2479                if ((time_before(next_check_jiffies,next_jiffies))
2480                 && ((difference = next_check_jiffies - jiffies) <= 0)) {
2481                        next_check_jiffies = next_jiffies;
2482                        if (aac_adapter_check_health(dev) == 0) {
2483                                difference = ((long)(unsigned)check_interval)
2484                                           * HZ;
2485                                next_check_jiffies = jiffies + difference;
2486                        } else if (!dev->queues)
2487                                break;
2488                }
2489                if (!time_before(next_check_jiffies,next_jiffies)
2490                 && ((difference = next_jiffies - jiffies) <= 0)) {
2491                        struct timespec64 now;
2492                        int ret;
2493
2494                        /* Don't even try to talk to adapter if its sick */
2495                        ret = aac_adapter_check_health(dev);
2496                        if (ret || !dev->queues)
2497                                break;
2498                        next_check_jiffies = jiffies
2499                                           + ((long)(unsigned)check_interval)
2500                                           * HZ;
2501                        ktime_get_real_ts64(&now);
2502
2503                        /* Synchronize our watches */
2504                        if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2505                         && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2506                                difference = HZ + HZ / 2 -
2507                                             now.tv_nsec / (NSEC_PER_SEC / HZ);
2508                        else {
2509                                if (now.tv_nsec > NSEC_PER_SEC / 2)
2510                                        ++now.tv_sec;
2511
2512                                if (dev->sa_firmware)
2513                                        ret =
2514                                        aac_send_safw_hostttime(dev, &now);
2515                                else
2516                                        ret = aac_send_hosttime(dev, &now);
2517
2518                                difference = (long)(unsigned)update_interval*HZ;
2519                        }
2520                        next_jiffies = jiffies + difference;
2521                        if (time_before(next_check_jiffies,next_jiffies))
2522                                difference = next_check_jiffies - jiffies;
2523                }
2524                if (difference <= 0)
2525                        difference = 1;
2526                set_current_state(TASK_INTERRUPTIBLE);
2527
2528                if (kthread_should_stop())
2529                        break;
2530
2531                /*
2532                 * we probably want usleep_range() here instead of the
2533                 * jiffies computation
2534                 */
2535                schedule_timeout(difference);
2536
2537                if (kthread_should_stop())
2538                        break;
2539        }
2540        if (dev->queues)
2541                remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2542        dev->aif_thread = 0;
2543        return 0;
2544}
2545
2546int aac_acquire_irq(struct aac_dev *dev)
2547{
2548        int i;
2549        int j;
2550        int ret = 0;
2551
2552        if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
2553                for (i = 0; i < dev->max_msix; i++) {
2554                        dev->aac_msix[i].vector_no = i;
2555                        dev->aac_msix[i].dev = dev;
2556                        if (request_irq(pci_irq_vector(dev->pdev, i),
2557                                        dev->a_ops.adapter_intr,
2558                                        0, "aacraid", &(dev->aac_msix[i]))) {
2559                                printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
2560                                                dev->name, dev->id, i);
2561                                for (j = 0 ; j < i ; j++)
2562                                        free_irq(pci_irq_vector(dev->pdev, j),
2563                                                 &(dev->aac_msix[j]));
2564                                pci_disable_msix(dev->pdev);
2565                                ret = -1;
2566                        }
2567                }
2568        } else {
2569                dev->aac_msix[0].vector_no = 0;
2570                dev->aac_msix[0].dev = dev;
2571
2572                if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
2573                        IRQF_SHARED, "aacraid",
2574                        &(dev->aac_msix[0])) < 0) {
2575                        if (dev->msi)
2576                                pci_disable_msi(dev->pdev);
2577                        printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
2578                                        dev->name, dev->id);
2579                        ret = -1;
2580                }
2581        }
2582        return ret;
2583}
2584
2585void aac_free_irq(struct aac_dev *dev)
2586{
2587        int i;
2588
2589        if (aac_is_src(dev)) {
2590                if (dev->max_msix > 1) {
2591                        for (i = 0; i < dev->max_msix; i++)
2592                                free_irq(pci_irq_vector(dev->pdev, i),
2593                                         &(dev->aac_msix[i]));
2594                } else {
2595                        free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
2596                }
2597        } else {
2598                free_irq(dev->pdev->irq, dev);
2599        }
2600        if (dev->msi)
2601                pci_disable_msi(dev->pdev);
2602        else if (dev->max_msix > 1)
2603                pci_disable_msix(dev->pdev);
2604}
2605