linux/drivers/scsi/aacraid/commsup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *      Adaptec AAC series RAID controller driver
   4 *      (c) Copyright 2001 Red Hat Inc.
   5 *
   6 * based on the old aacraid driver that is..
   7 * Adaptec aacraid device driver for Linux.
   8 *
   9 * Copyright (c) 2000-2010 Adaptec, Inc.
  10 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  11 *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  12 *
  13 * Module Name:
  14 *  commsup.c
  15 *
  16 * Abstract: Contain all routines that are required for FSA host/adapter
  17 *    communication.
  18 */
  19
  20#include <linux/kernel.h>
  21#include <linux/init.h>
  22#include <linux/crash_dump.h>
  23#include <linux/types.h>
  24#include <linux/sched.h>
  25#include <linux/pci.h>
  26#include <linux/spinlock.h>
  27#include <linux/slab.h>
  28#include <linux/completion.h>
  29#include <linux/blkdev.h>
  30#include <linux/delay.h>
  31#include <linux/kthread.h>
  32#include <linux/interrupt.h>
  33#include <linux/bcd.h>
  34#include <scsi/scsi.h>
  35#include <scsi/scsi_host.h>
  36#include <scsi/scsi_device.h>
  37#include <scsi/scsi_cmnd.h>
  38
  39#include "aacraid.h"
  40
  41/**
  42 *      fib_map_alloc           -       allocate the fib objects
  43 *      @dev: Adapter to allocate for
  44 *
  45 *      Allocate and map the shared PCI space for the FIB blocks used to
  46 *      talk to the Adaptec firmware.
  47 */
  48
  49static int fib_map_alloc(struct aac_dev *dev)
  50{
  51        if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
  52                dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
  53        else
  54                dev->max_cmd_size = dev->max_fib_size;
  55        if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
  56                dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
  57        } else {
  58                dev->max_cmd_size = dev->max_fib_size;
  59        }
  60
  61        dprintk((KERN_INFO
  62          "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
  63          &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
  64          AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  65        dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
  66                (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
  67                * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
  68                &dev->hw_fib_pa, GFP_KERNEL);
  69        if (dev->hw_fib_va == NULL)
  70                return -ENOMEM;
  71        return 0;
  72}
  73
  74/**
  75 *      aac_fib_map_free                -       free the fib objects
  76 *      @dev: Adapter to free
  77 *
  78 *      Free the PCI mappings and the memory allocated for FIB blocks
  79 *      on this adapter.
  80 */
  81
  82void aac_fib_map_free(struct aac_dev *dev)
  83{
  84        size_t alloc_size;
  85        size_t fib_size;
  86        int num_fibs;
  87
  88        if(!dev->hw_fib_va || !dev->max_cmd_size)
  89                return;
  90
  91        num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
  92        fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
  93        alloc_size = fib_size * num_fibs + ALIGN32 - 1;
  94
  95        dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
  96                          dev->hw_fib_pa);
  97
  98        dev->hw_fib_va = NULL;
  99        dev->hw_fib_pa = 0;
 100}
 101
 102void aac_fib_vector_assign(struct aac_dev *dev)
 103{
 104        u32 i = 0;
 105        u32 vector = 1;
 106        struct fib *fibptr = NULL;
 107
 108        for (i = 0, fibptr = &dev->fibs[i];
 109                i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
 110                i++, fibptr++) {
 111                if ((dev->max_msix == 1) ||
 112                  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
 113                        - dev->vector_cap))) {
 114                        fibptr->vector_no = 0;
 115                } else {
 116                        fibptr->vector_no = vector;
 117                        vector++;
 118                        if (vector == dev->max_msix)
 119                                vector = 1;
 120                }
 121        }
 122}
 123
 124/**
 125 *      aac_fib_setup   -       setup the fibs
 126 *      @dev: Adapter to set up
 127 *
 128 *      Allocate the PCI space for the fibs, map it and then initialise the
 129 *      fib area, the unmapped fib data and also the free list
 130 */
 131
 132int aac_fib_setup(struct aac_dev * dev)
 133{
 134        struct fib *fibptr;
 135        struct hw_fib *hw_fib;
 136        dma_addr_t hw_fib_pa;
 137        int i;
 138        u32 max_cmds;
 139
 140        while (((i = fib_map_alloc(dev)) == -ENOMEM)
 141         && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
 142                max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
 143                dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
 144                if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
 145                        dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
 146        }
 147        if (i<0)
 148                return -ENOMEM;
 149
 150        memset(dev->hw_fib_va, 0,
 151                (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
 152                (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
 153
 154        /* 32 byte alignment for PMC */
 155        hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
 156        hw_fib    = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
 157                                        (hw_fib_pa - dev->hw_fib_pa));
 158
 159        /* add Xport header */
 160        hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
 161                sizeof(struct aac_fib_xporthdr));
 162        hw_fib_pa += sizeof(struct aac_fib_xporthdr);
 163
 164        /*
 165         *      Initialise the fibs
 166         */
 167        for (i = 0, fibptr = &dev->fibs[i];
 168                i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
 169                i++, fibptr++)
 170        {
 171                fibptr->flags = 0;
 172                fibptr->size = sizeof(struct fib);
 173                fibptr->dev = dev;
 174                fibptr->hw_fib_va = hw_fib;
 175                fibptr->data = (void *) fibptr->hw_fib_va->data;
 176                fibptr->next = fibptr+1;        /* Forward chain the fibs */
 177                init_completion(&fibptr->event_wait);
 178                spin_lock_init(&fibptr->event_lock);
 179                hw_fib->header.XferState = cpu_to_le32(0xffffffff);
 180                hw_fib->header.SenderSize =
 181                        cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
 182                fibptr->hw_fib_pa = hw_fib_pa;
 183                fibptr->hw_sgl_pa = hw_fib_pa +
 184                        offsetof(struct aac_hba_cmd_req, sge[2]);
 185                /*
 186                 * one element is for the ptr to the separate sg list,
 187                 * second element for 32 byte alignment
 188                 */
 189                fibptr->hw_error_pa = hw_fib_pa +
 190                        offsetof(struct aac_native_hba, resp.resp_bytes[0]);
 191
 192                hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
 193                        dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
 194                hw_fib_pa = hw_fib_pa +
 195                        dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
 196        }
 197
 198        /*
 199         *Assign vector numbers to fibs
 200         */
 201        aac_fib_vector_assign(dev);
 202
 203        /*
 204         *      Add the fib chain to the free list
 205         */
 206        dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
 207        /*
 208        *       Set 8 fibs aside for management tools
 209        */
 210        dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
 211        return 0;
 212}
 213
 214/**
 215 *      aac_fib_alloc_tag-allocate a fib using tags
 216 *      @dev: Adapter to allocate the fib for
 217 *
 218 *      Allocate a fib from the adapter fib pool using tags
 219 *      from the blk layer.
 220 */
 221
 222struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
 223{
 224        struct fib *fibptr;
 225
 226        fibptr = &dev->fibs[scmd->request->tag];
 227        /*
 228         *      Null out fields that depend on being zero at the start of
 229         *      each I/O
 230         */
 231        fibptr->hw_fib_va->header.XferState = 0;
 232        fibptr->type = FSAFS_NTC_FIB_CONTEXT;
 233        fibptr->callback_data = NULL;
 234        fibptr->callback = NULL;
 235
 236        return fibptr;
 237}
 238
 239/**
 240 *      aac_fib_alloc   -       allocate a fib
 241 *      @dev: Adapter to allocate the fib for
 242 *
 243 *      Allocate a fib from the adapter fib pool. If the pool is empty we
 244 *      return NULL.
 245 */
 246
 247struct fib *aac_fib_alloc(struct aac_dev *dev)
 248{
 249        struct fib * fibptr;
 250        unsigned long flags;
 251        spin_lock_irqsave(&dev->fib_lock, flags);
 252        fibptr = dev->free_fib;
 253        if(!fibptr){
 254                spin_unlock_irqrestore(&dev->fib_lock, flags);
 255                return fibptr;
 256        }
 257        dev->free_fib = fibptr->next;
 258        spin_unlock_irqrestore(&dev->fib_lock, flags);
 259        /*
 260         *      Set the proper node type code and node byte size
 261         */
 262        fibptr->type = FSAFS_NTC_FIB_CONTEXT;
 263        fibptr->size = sizeof(struct fib);
 264        /*
 265         *      Null out fields that depend on being zero at the start of
 266         *      each I/O
 267         */
 268        fibptr->hw_fib_va->header.XferState = 0;
 269        fibptr->flags = 0;
 270        fibptr->callback = NULL;
 271        fibptr->callback_data = NULL;
 272
 273        return fibptr;
 274}
 275
 276/**
 277 *      aac_fib_free    -       free a fib
 278 *      @fibptr: fib to free up
 279 *
 280 *      Frees up a fib and places it on the appropriate queue
 281 */
 282
 283void aac_fib_free(struct fib *fibptr)
 284{
 285        unsigned long flags;
 286
 287        if (fibptr->done == 2)
 288                return;
 289
 290        spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
 291        if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 292                aac_config.fib_timeouts++;
 293        if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
 294                fibptr->hw_fib_va->header.XferState != 0) {
 295                printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
 296                         (void*)fibptr,
 297                         le32_to_cpu(fibptr->hw_fib_va->header.XferState));
 298        }
 299        fibptr->next = fibptr->dev->free_fib;
 300        fibptr->dev->free_fib = fibptr;
 301        spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
 302}
 303
 304/**
 305 *      aac_fib_init    -       initialise a fib
 306 *      @fibptr: The fib to initialize
 307 *
 308 *      Set up the generic fib fields ready for use
 309 */
 310
 311void aac_fib_init(struct fib *fibptr)
 312{
 313        struct hw_fib *hw_fib = fibptr->hw_fib_va;
 314
 315        memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
 316        hw_fib->header.StructType = FIB_MAGIC;
 317        hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
 318        hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
 319        hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
 320        hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
 321}
 322
 323/**
 324 *      fib_deallocate          -       deallocate a fib
 325 *      @fibptr: fib to deallocate
 326 *
 327 *      Will deallocate and return to the free pool the FIB pointed to by the
 328 *      caller.
 329 */
 330
 331static void fib_dealloc(struct fib * fibptr)
 332{
 333        struct hw_fib *hw_fib = fibptr->hw_fib_va;
 334        hw_fib->header.XferState = 0;
 335}
 336
 337/*
 338 *      Commuication primitives define and support the queuing method we use to
 339 *      support host to adapter commuication. All queue accesses happen through
 340 *      these routines and are the only routines which have a knowledge of the
 341 *       how these queues are implemented.
 342 */
 343
 344/**
 345 *      aac_get_entry           -       get a queue entry
 346 *      @dev: Adapter
 347 *      @qid: Queue Number
 348 *      @entry: Entry return
 349 *      @index: Index return
 350 *      @nonotify: notification control
 351 *
 352 *      With a priority the routine returns a queue entry if the queue has free entries. If the queue
 353 *      is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
 354 *      returned.
 355 */
 356
 357static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
 358{
 359        struct aac_queue * q;
 360        unsigned long idx;
 361
 362        /*
 363         *      All of the queues wrap when they reach the end, so we check
 364         *      to see if they have reached the end and if they have we just
 365         *      set the index back to zero. This is a wrap. You could or off
 366         *      the high bits in all updates but this is a bit faster I think.
 367         */
 368
 369        q = &dev->queues->queue[qid];
 370
 371        idx = *index = le32_to_cpu(*(q->headers.producer));
 372        /* Interrupt Moderation, only interrupt for first two entries */
 373        if (idx != le32_to_cpu(*(q->headers.consumer))) {
 374                if (--idx == 0) {
 375                        if (qid == AdapNormCmdQueue)
 376                                idx = ADAP_NORM_CMD_ENTRIES;
 377                        else
 378                                idx = ADAP_NORM_RESP_ENTRIES;
 379                }
 380                if (idx != le32_to_cpu(*(q->headers.consumer)))
 381                        *nonotify = 1;
 382        }
 383
 384        if (qid == AdapNormCmdQueue) {
 385                if (*index >= ADAP_NORM_CMD_ENTRIES)
 386                        *index = 0; /* Wrap to front of the Producer Queue. */
 387        } else {
 388                if (*index >= ADAP_NORM_RESP_ENTRIES)
 389                        *index = 0; /* Wrap to front of the Producer Queue. */
 390        }
 391
 392        /* Queue is full */
 393        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
 394                printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
 395                                qid, atomic_read(&q->numpending));
 396                return 0;
 397        } else {
 398                *entry = q->base + *index;
 399                return 1;
 400        }
 401}
 402
 403/**
 404 *      aac_queue_get           -       get the next free QE
 405 *      @dev: Adapter
 406 *      @index: Returned index
 407 *      @priority: Priority of fib
 408 *      @fib: Fib to associate with the queue entry
 409 *      @wait: Wait if queue full
 410 *      @fibptr: Driver fib object to go with fib
 411 *      @nonotify: Don't notify the adapter
 412 *
 413 *      Gets the next free QE off the requested priorty adapter command
 414 *      queue and associates the Fib with the QE. The QE represented by
 415 *      index is ready to insert on the queue when this routine returns
 416 *      success.
 417 */
 418
 419int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
 420{
 421        struct aac_entry * entry = NULL;
 422        int map = 0;
 423
 424        if (qid == AdapNormCmdQueue) {
 425                /*  if no entries wait for some if caller wants to */
 426                while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
 427                        printk(KERN_ERR "GetEntries failed\n");
 428                }
 429                /*
 430                 *      Setup queue entry with a command, status and fib mapped
 431                 */
 432                entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
 433                map = 1;
 434        } else {
 435                while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
 436                        /* if no entries wait for some if caller wants to */
 437                }
 438                /*
 439                 *      Setup queue entry with command, status and fib mapped
 440                 */
 441                entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
 442                entry->addr = hw_fib->header.SenderFibAddress;
 443                        /* Restore adapters pointer to the FIB */
 444                hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress;  /* Let the adapter now where to find its data */
 445                map = 0;
 446        }
 447        /*
 448         *      If MapFib is true than we need to map the Fib and put pointers
 449         *      in the queue entry.
 450         */
 451        if (map)
 452                entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
 453        return 0;
 454}
 455
 456/*
 457 *      Define the highest level of host to adapter communication routines.
 458 *      These routines will support host to adapter FS commuication. These
 459 *      routines have no knowledge of the commuication method used. This level
 460 *      sends and receives FIBs. This level has no knowledge of how these FIBs
 461 *      get passed back and forth.
 462 */
 463
 464/**
 465 *      aac_fib_send    -       send a fib to the adapter
 466 *      @command: Command to send
 467 *      @fibptr: The fib
 468 *      @size: Size of fib data area
 469 *      @priority: Priority of Fib
 470 *      @wait: Async/sync select
 471 *      @reply: True if a reply is wanted
 472 *      @callback: Called with reply
 473 *      @callback_data: Passed to callback
 474 *
 475 *      Sends the requested FIB to the adapter and optionally will wait for a
 476 *      response FIB. If the caller does not wish to wait for a response than
 477 *      an event to wait on must be supplied. This event will be set when a
 478 *      response FIB is received from the adapter.
 479 */
 480
 481int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
 482                int priority, int wait, int reply, fib_callback callback,
 483                void *callback_data)
 484{
 485        struct aac_dev * dev = fibptr->dev;
 486        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 487        unsigned long flags = 0;
 488        unsigned long mflags = 0;
 489        unsigned long sflags = 0;
 490
 491        if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
 492                return -EBUSY;
 493
 494        if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
 495                return -EINVAL;
 496
 497        /*
 498         *      There are 5 cases with the wait and response requested flags.
 499         *      The only invalid cases are if the caller requests to wait and
 500         *      does not request a response and if the caller does not want a
 501         *      response and the Fib is not allocated from pool. If a response
 502         *      is not requested the Fib will just be deallocaed by the DPC
 503         *      routine when the response comes back from the adapter. No
 504         *      further processing will be done besides deleting the Fib. We
 505         *      will have a debug mode where the adapter can notify the host
 506         *      it had a problem and the host can log that fact.
 507         */
 508        fibptr->flags = 0;
 509        if (wait && !reply) {
 510                return -EINVAL;
 511        } else if (!wait && reply) {
 512                hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
 513                FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
 514        } else if (!wait && !reply) {
 515                hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
 516                FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
 517        } else if (wait && reply) {
 518                hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
 519                FIB_COUNTER_INCREMENT(aac_config.NormalSent);
 520        }
 521        /*
 522         *      Map the fib into 32bits by using the fib number
 523         */
 524
 525        hw_fib->header.SenderFibAddress =
 526                cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
 527
 528        /* use the same shifted value for handle to be compatible
 529         * with the new native hba command handle
 530         */
 531        hw_fib->header.Handle =
 532                cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
 533
 534        /*
 535         *      Set FIB state to indicate where it came from and if we want a
 536         *      response from the adapter. Also load the command from the
 537         *      caller.
 538         *
 539         *      Map the hw fib pointer as a 32bit value
 540         */
 541        hw_fib->header.Command = cpu_to_le16(command);
 542        hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
 543        /*
 544         *      Set the size of the Fib we want to send to the adapter
 545         */
 546        hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
 547        if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
 548                return -EMSGSIZE;
 549        }
 550        /*
 551         *      Get a queue entry connect the FIB to it and send an notify
 552         *      the adapter a command is ready.
 553         */
 554        hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
 555
 556        /*
 557         *      Fill in the Callback and CallbackContext if we are not
 558         *      going to wait.
 559         */
 560        if (!wait) {
 561                fibptr->callback = callback;
 562                fibptr->callback_data = callback_data;
 563                fibptr->flags = FIB_CONTEXT_FLAG;
 564        }
 565
 566        fibptr->done = 0;
 567
 568        FIB_COUNTER_INCREMENT(aac_config.FibsSent);
 569
 570        dprintk((KERN_DEBUG "Fib contents:.\n"));
 571        dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
 572        dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
 573        dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
 574        dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
 575        dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
 576        dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
 577
 578        if (!dev->queues)
 579                return -EBUSY;
 580
 581        if (wait) {
 582
 583                spin_lock_irqsave(&dev->manage_lock, mflags);
 584                if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
 585                        printk(KERN_INFO "No management Fibs Available:%d\n",
 586                                                dev->management_fib_count);
 587                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 588                        return -EBUSY;
 589                }
 590                dev->management_fib_count++;
 591                spin_unlock_irqrestore(&dev->manage_lock, mflags);
 592                spin_lock_irqsave(&fibptr->event_lock, flags);
 593        }
 594
 595        if (dev->sync_mode) {
 596                if (wait)
 597                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 598                spin_lock_irqsave(&dev->sync_lock, sflags);
 599                if (dev->sync_fib) {
 600                        list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
 601                        spin_unlock_irqrestore(&dev->sync_lock, sflags);
 602                } else {
 603                        dev->sync_fib = fibptr;
 604                        spin_unlock_irqrestore(&dev->sync_lock, sflags);
 605                        aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
 606                                (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
 607                                NULL, NULL, NULL, NULL, NULL);
 608                }
 609                if (wait) {
 610                        fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
 611                        if (wait_for_completion_interruptible(&fibptr->event_wait)) {
 612                                fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
 613                                return -EFAULT;
 614                        }
 615                        return 0;
 616                }
 617                return -EINPROGRESS;
 618        }
 619
 620        if (aac_adapter_deliver(fibptr) != 0) {
 621                printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
 622                if (wait) {
 623                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 624                        spin_lock_irqsave(&dev->manage_lock, mflags);
 625                        dev->management_fib_count--;
 626                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 627                }
 628                return -EBUSY;
 629        }
 630
 631
 632        /*
 633         *      If the caller wanted us to wait for response wait now.
 634         */
 635
 636        if (wait) {
 637                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 638                /* Only set for first known interruptable command */
 639                if (wait < 0) {
 640                        /*
 641                         * *VERY* Dangerous to time out a command, the
 642                         * assumption is made that we have no hope of
 643                         * functioning because an interrupt routing or other
 644                         * hardware failure has occurred.
 645                         */
 646                        unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
 647                        while (!try_wait_for_completion(&fibptr->event_wait)) {
 648                                int blink;
 649                                if (time_is_before_eq_jiffies(timeout)) {
 650                                        struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
 651                                        atomic_dec(&q->numpending);
 652                                        if (wait == -1) {
 653                                                printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
 654                                                  "Usually a result of a PCI interrupt routing problem;\n"
 655                                                  "update mother board BIOS or consider utilizing one of\n"
 656                                                  "the SAFE mode kernel options (acpi, apic etc)\n");
 657                                        }
 658                                        return -ETIMEDOUT;
 659                                }
 660
 661                                if (unlikely(aac_pci_offline(dev)))
 662                                        return -EFAULT;
 663
 664                                if ((blink = aac_adapter_check_health(dev)) > 0) {
 665                                        if (wait == -1) {
 666                                                printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
 667                                                  "Usually a result of a serious unrecoverable hardware problem\n",
 668                                                  blink);
 669                                        }
 670                                        return -EFAULT;
 671                                }
 672                                /*
 673                                 * Allow other processes / CPUS to use core
 674                                 */
 675                                schedule();
 676                        }
 677                } else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
 678                        /* Do nothing ... satisfy
 679                         * wait_for_completion_interruptible must_check */
 680                }
 681
 682                spin_lock_irqsave(&fibptr->event_lock, flags);
 683                if (fibptr->done == 0) {
 684                        fibptr->done = 2; /* Tell interrupt we aborted */
 685                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 686                        return -ERESTARTSYS;
 687                }
 688                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 689                BUG_ON(fibptr->done == 0);
 690
 691                if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 692                        return -ETIMEDOUT;
 693                return 0;
 694        }
 695        /*
 696         *      If the user does not want a response than return success otherwise
 697         *      return pending
 698         */
 699        if (reply)
 700                return -EINPROGRESS;
 701        else
 702                return 0;
 703}
 704
 705int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 706                void *callback_data)
 707{
 708        struct aac_dev *dev = fibptr->dev;
 709        int wait;
 710        unsigned long flags = 0;
 711        unsigned long mflags = 0;
 712        struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
 713                        fibptr->hw_fib_va;
 714
 715        fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
 716        if (callback) {
 717                wait = 0;
 718                fibptr->callback = callback;
 719                fibptr->callback_data = callback_data;
 720        } else
 721                wait = 1;
 722
 723
 724        hbacmd->iu_type = command;
 725
 726        if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
 727                /* bit1 of request_id must be 0 */
 728                hbacmd->request_id =
 729                        cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
 730                fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
 731        } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
 732                return -EINVAL;
 733
 734
 735        if (wait) {
 736                spin_lock_irqsave(&dev->manage_lock, mflags);
 737                if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
 738                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 739                        return -EBUSY;
 740                }
 741                dev->management_fib_count++;
 742                spin_unlock_irqrestore(&dev->manage_lock, mflags);
 743                spin_lock_irqsave(&fibptr->event_lock, flags);
 744        }
 745
 746        if (aac_adapter_deliver(fibptr) != 0) {
 747                if (wait) {
 748                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 749                        spin_lock_irqsave(&dev->manage_lock, mflags);
 750                        dev->management_fib_count--;
 751                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 752                }
 753                return -EBUSY;
 754        }
 755        FIB_COUNTER_INCREMENT(aac_config.NativeSent);
 756
 757        if (wait) {
 758
 759                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 760
 761                if (unlikely(aac_pci_offline(dev)))
 762                        return -EFAULT;
 763
 764                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
 765                if (wait_for_completion_interruptible(&fibptr->event_wait))
 766                        fibptr->done = 2;
 767                fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
 768
 769                spin_lock_irqsave(&fibptr->event_lock, flags);
 770                if ((fibptr->done == 0) || (fibptr->done == 2)) {
 771                        fibptr->done = 2; /* Tell interrupt we aborted */
 772                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 773                        return -ERESTARTSYS;
 774                }
 775                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 776                WARN_ON(fibptr->done == 0);
 777
 778                if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 779                        return -ETIMEDOUT;
 780
 781                return 0;
 782        }
 783
 784        return -EINPROGRESS;
 785}
 786
 787/**
 788 *      aac_consumer_get        -       get the top of the queue
 789 *      @dev: Adapter
 790 *      @q: Queue
 791 *      @entry: Return entry
 792 *
 793 *      Will return a pointer to the entry on the top of the queue requested that
 794 *      we are a consumer of, and return the address of the queue entry. It does
 795 *      not change the state of the queue.
 796 */
 797
 798int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
 799{
 800        u32 index;
 801        int status;
 802        if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
 803                status = 0;
 804        } else {
 805                /*
 806                 *      The consumer index must be wrapped if we have reached
 807                 *      the end of the queue, else we just use the entry
 808                 *      pointed to by the header index
 809                 */
 810                if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 811                        index = 0;
 812                else
 813                        index = le32_to_cpu(*q->headers.consumer);
 814                *entry = q->base + index;
 815                status = 1;
 816        }
 817        return(status);
 818}
 819
 820/**
 821 *      aac_consumer_free       -       free consumer entry
 822 *      @dev: Adapter
 823 *      @q: Queue
 824 *      @qid: Queue ident
 825 *
 826 *      Frees up the current top of the queue we are a consumer of. If the
 827 *      queue was full notify the producer that the queue is no longer full.
 828 */
 829
 830void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
 831{
 832        int wasfull = 0;
 833        u32 notify;
 834
 835        if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
 836                wasfull = 1;
 837
 838        if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 839                *q->headers.consumer = cpu_to_le32(1);
 840        else
 841                le32_add_cpu(q->headers.consumer, 1);
 842
 843        if (wasfull) {
 844                switch (qid) {
 845
 846                case HostNormCmdQueue:
 847                        notify = HostNormCmdNotFull;
 848                        break;
 849                case HostNormRespQueue:
 850                        notify = HostNormRespNotFull;
 851                        break;
 852                default:
 853                        BUG();
 854                        return;
 855                }
 856                aac_adapter_notify(dev, notify);
 857        }
 858}
 859
 860/**
 861 *      aac_fib_adapter_complete        -       complete adapter issued fib
 862 *      @fibptr: fib to complete
 863 *      @size: size of fib
 864 *
 865 *      Will do all necessary work to complete a FIB that was sent from
 866 *      the adapter.
 867 */
 868
 869int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
 870{
 871        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 872        struct aac_dev * dev = fibptr->dev;
 873        struct aac_queue * q;
 874        unsigned long nointr = 0;
 875        unsigned long qflags;
 876
 877        if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
 878                dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
 879                dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
 880                kfree(hw_fib);
 881                return 0;
 882        }
 883
 884        if (hw_fib->header.XferState == 0) {
 885                if (dev->comm_interface == AAC_COMM_MESSAGE)
 886                        kfree(hw_fib);
 887                return 0;
 888        }
 889        /*
 890         *      If we plan to do anything check the structure type first.
 891         */
 892        if (hw_fib->header.StructType != FIB_MAGIC &&
 893            hw_fib->header.StructType != FIB_MAGIC2 &&
 894            hw_fib->header.StructType != FIB_MAGIC2_64) {
 895                if (dev->comm_interface == AAC_COMM_MESSAGE)
 896                        kfree(hw_fib);
 897                return -EINVAL;
 898        }
 899        /*
 900         *      This block handles the case where the adapter had sent us a
 901         *      command and we have finished processing the command. We
 902         *      call completeFib when we are done processing the command
 903         *      and want to send a response back to the adapter. This will
 904         *      send the completed cdb to the adapter.
 905         */
 906        if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
 907                if (dev->comm_interface == AAC_COMM_MESSAGE) {
 908                        kfree (hw_fib);
 909                } else {
 910                        u32 index;
 911                        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
 912                        if (size) {
 913                                size += sizeof(struct aac_fibhdr);
 914                                if (size > le16_to_cpu(hw_fib->header.SenderSize))
 915                                        return -EMSGSIZE;
 916                                hw_fib->header.Size = cpu_to_le16(size);
 917                        }
 918                        q = &dev->queues->queue[AdapNormRespQueue];
 919                        spin_lock_irqsave(q->lock, qflags);
 920                        aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
 921                        *(q->headers.producer) = cpu_to_le32(index + 1);
 922                        spin_unlock_irqrestore(q->lock, qflags);
 923                        if (!(nointr & (int)aac_config.irq_mod))
 924                                aac_adapter_notify(dev, AdapNormRespQueue);
 925                }
 926        } else {
 927                printk(KERN_WARNING "aac_fib_adapter_complete: "
 928                        "Unknown xferstate detected.\n");
 929                BUG();
 930        }
 931        return 0;
 932}
 933
 934/**
 935 *      aac_fib_complete        -       fib completion handler
 936 *      @fib: FIB to complete
 937 *
 938 *      Will do all necessary work to complete a FIB.
 939 */
 940
 941int aac_fib_complete(struct fib *fibptr)
 942{
 943        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 944
 945        if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
 946                fib_dealloc(fibptr);
 947                return 0;
 948        }
 949
 950        /*
 951         *      Check for a fib which has already been completed or with a
 952         *      status wait timeout
 953         */
 954
 955        if (hw_fib->header.XferState == 0 || fibptr->done == 2)
 956                return 0;
 957        /*
 958         *      If we plan to do anything check the structure type first.
 959         */
 960
 961        if (hw_fib->header.StructType != FIB_MAGIC &&
 962            hw_fib->header.StructType != FIB_MAGIC2 &&
 963            hw_fib->header.StructType != FIB_MAGIC2_64)
 964                return -EINVAL;
 965        /*
 966         *      This block completes a cdb which orginated on the host and we
 967         *      just need to deallocate the cdb or reinit it. At this point the
 968         *      command is complete that we had sent to the adapter and this
 969         *      cdb could be reused.
 970         */
 971
 972        if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
 973                (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
 974        {
 975                fib_dealloc(fibptr);
 976        }
 977        else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
 978        {
 979                /*
 980                 *      This handles the case when the host has aborted the I/O
 981                 *      to the adapter because the adapter is not responding
 982                 */
 983                fib_dealloc(fibptr);
 984        } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
 985                fib_dealloc(fibptr);
 986        } else {
 987                BUG();
 988        }
 989        return 0;
 990}
 991
 992/**
 993 *      aac_printf      -       handle printf from firmware
 994 *      @dev: Adapter
 995 *      @val: Message info
 996 *
 997 *      Print a message passed to us by the controller firmware on the
 998 *      Adaptec board
 999 */
1000
1001void aac_printf(struct aac_dev *dev, u32 val)
1002{
1003        char *cp = dev->printfbuf;
1004        if (dev->printf_enabled)
1005        {
1006                int length = val & 0xffff;
1007                int level = (val >> 16) & 0xffff;
1008
1009                /*
1010                 *      The size of the printfbuf is set in port.c
1011                 *      There is no variable or define for it
1012                 */
1013                if (length > 255)
1014                        length = 255;
1015                if (cp[length] != 0)
1016                        cp[length] = 0;
1017                if (level == LOG_AAC_HIGH_ERROR)
1018                        printk(KERN_WARNING "%s:%s", dev->name, cp);
1019                else
1020                        printk(KERN_INFO "%s:%s", dev->name, cp);
1021        }
1022        memset(cp, 0, 256);
1023}
1024
1025static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
1026{
1027        return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
1028}
1029
1030
1031static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
1032{
1033        switch (aac_aif_data(aifcmd, 1)) {
1034        case AifBuCacheDataLoss:
1035                if (aac_aif_data(aifcmd, 2))
1036                        dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
1037                        aac_aif_data(aifcmd, 2));
1038                else
1039                        dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
1040                break;
1041        case AifBuCacheDataRecover:
1042                if (aac_aif_data(aifcmd, 2))
1043                        dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
1044                        aac_aif_data(aifcmd, 2));
1045                else
1046                        dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
1047                break;
1048        }
1049}
1050
1051/**
1052 *      aac_handle_aif          -       Handle a message from the firmware
1053 *      @dev: Which adapter this fib is from
1054 *      @fibptr: Pointer to fibptr from adapter
1055 *
1056 *      This routine handles a driver notify fib from the adapter and
1057 *      dispatches it to the appropriate routine for handling.
1058 */
1059
1060#define AIF_SNIFF_TIMEOUT       (500*HZ)
1061static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1062{
1063        struct hw_fib * hw_fib = fibptr->hw_fib_va;
1064        struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
1065        u32 channel, id, lun, container;
1066        struct scsi_device *device;
1067        enum {
1068                NOTHING,
1069                DELETE,
1070                ADD,
1071                CHANGE
1072        } device_config_needed = NOTHING;
1073
1074        /* Sniff for container changes */
1075
1076        if (!dev || !dev->fsa_dev)
1077                return;
1078        container = channel = id = lun = (u32)-1;
1079
1080        /*
1081         *      We have set this up to try and minimize the number of
1082         * re-configures that take place. As a result of this when
1083         * certain AIF's come in we will set a flag waiting for another
1084         * type of AIF before setting the re-config flag.
1085         */
1086        switch (le32_to_cpu(aifcmd->command)) {
1087        case AifCmdDriverNotify:
1088                switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1089                case AifRawDeviceRemove:
1090                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1091                        if ((container >> 28)) {
1092                                container = (u32)-1;
1093                                break;
1094                        }
1095                        channel = (container >> 24) & 0xF;
1096                        if (channel >= dev->maximum_num_channels) {
1097                                container = (u32)-1;
1098                                break;
1099                        }
1100                        id = container & 0xFFFF;
1101                        if (id >= dev->maximum_num_physicals) {
1102                                container = (u32)-1;
1103                                break;
1104                        }
1105                        lun = (container >> 16) & 0xFF;
1106                        container = (u32)-1;
1107                        channel = aac_phys_to_logical(channel);
1108                        device_config_needed = DELETE;
1109                        break;
1110
1111                /*
1112                 *      Morph or Expand complete
1113                 */
1114                case AifDenMorphComplete:
1115                case AifDenVolumeExtendComplete:
1116                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1117                        if (container >= dev->maximum_num_containers)
1118                                break;
1119
1120                        /*
1121                         *      Find the scsi_device associated with the SCSI
1122                         * address. Make sure we have the right array, and if
1123                         * so set the flag to initiate a new re-config once we
1124                         * see an AifEnConfigChange AIF come through.
1125                         */
1126
1127                        if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
1128                                device = scsi_device_lookup(dev->scsi_host_ptr,
1129                                        CONTAINER_TO_CHANNEL(container),
1130                                        CONTAINER_TO_ID(container),
1131                                        CONTAINER_TO_LUN(container));
1132                                if (device) {
1133                                        dev->fsa_dev[container].config_needed = CHANGE;
1134                                        dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
1135                                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1136                                        scsi_device_put(device);
1137                                }
1138                        }
1139                }
1140
1141                /*
1142                 *      If we are waiting on something and this happens to be
1143                 * that thing then set the re-configure flag.
1144                 */
1145                if (container != (u32)-1) {
1146                        if (container >= dev->maximum_num_containers)
1147                                break;
1148                        if ((dev->fsa_dev[container].config_waiting_on ==
1149                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1150                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1151                                dev->fsa_dev[container].config_waiting_on = 0;
1152                } else for (container = 0;
1153                    container < dev->maximum_num_containers; ++container) {
1154                        if ((dev->fsa_dev[container].config_waiting_on ==
1155                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1156                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1157                                dev->fsa_dev[container].config_waiting_on = 0;
1158                }
1159                break;
1160
1161        case AifCmdEventNotify:
1162                switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1163                case AifEnBatteryEvent:
1164                        dev->cache_protected =
1165                                (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
1166                        break;
1167                /*
1168                 *      Add an Array.
1169                 */
1170                case AifEnAddContainer:
1171                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1172                        if (container >= dev->maximum_num_containers)
1173                                break;
1174                        dev->fsa_dev[container].config_needed = ADD;
1175                        dev->fsa_dev[container].config_waiting_on =
1176                                AifEnConfigChange;
1177                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1178                        break;
1179
1180                /*
1181                 *      Delete an Array.
1182                 */
1183                case AifEnDeleteContainer:
1184                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1185                        if (container >= dev->maximum_num_containers)
1186                                break;
1187                        dev->fsa_dev[container].config_needed = DELETE;
1188                        dev->fsa_dev[container].config_waiting_on =
1189                                AifEnConfigChange;
1190                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1191                        break;
1192
1193                /*
1194                 *      Container change detected. If we currently are not
1195                 * waiting on something else, setup to wait on a Config Change.
1196                 */
1197                case AifEnContainerChange:
1198                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1199                        if (container >= dev->maximum_num_containers)
1200                                break;
1201                        if (dev->fsa_dev[container].config_waiting_on &&
1202                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1203                                break;
1204                        dev->fsa_dev[container].config_needed = CHANGE;
1205                        dev->fsa_dev[container].config_waiting_on =
1206                                AifEnConfigChange;
1207                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1208                        break;
1209
1210                case AifEnConfigChange:
1211                        break;
1212
1213                case AifEnAddJBOD:
1214                case AifEnDeleteJBOD:
1215                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1216                        if ((container >> 28)) {
1217                                container = (u32)-1;
1218                                break;
1219                        }
1220                        channel = (container >> 24) & 0xF;
1221                        if (channel >= dev->maximum_num_channels) {
1222                                container = (u32)-1;
1223                                break;
1224                        }
1225                        id = container & 0xFFFF;
1226                        if (id >= dev->maximum_num_physicals) {
1227                                container = (u32)-1;
1228                                break;
1229                        }
1230                        lun = (container >> 16) & 0xFF;
1231                        container = (u32)-1;
1232                        channel = aac_phys_to_logical(channel);
1233                        device_config_needed =
1234                          (((__le32 *)aifcmd->data)[0] ==
1235                            cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
1236                        if (device_config_needed == ADD) {
1237                                device = scsi_device_lookup(dev->scsi_host_ptr,
1238                                        channel,
1239                                        id,
1240                                        lun);
1241                                if (device) {
1242                                        scsi_remove_device(device);
1243                                        scsi_device_put(device);
1244                                }
1245                        }
1246                        break;
1247
1248                case AifEnEnclosureManagement:
1249                        /*
1250                         * If in JBOD mode, automatic exposure of new
1251                         * physical target to be suppressed until configured.
1252                         */
1253                        if (dev->jbod)
1254                                break;
1255                        switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
1256                        case EM_DRIVE_INSERTION:
1257                        case EM_DRIVE_REMOVAL:
1258                        case EM_SES_DRIVE_INSERTION:
1259                        case EM_SES_DRIVE_REMOVAL:
1260                                container = le32_to_cpu(
1261                                        ((__le32 *)aifcmd->data)[2]);
1262                                if ((container >> 28)) {
1263                                        container = (u32)-1;
1264                                        break;
1265                                }
1266                                channel = (container >> 24) & 0xF;
1267                                if (channel >= dev->maximum_num_channels) {
1268                                        container = (u32)-1;
1269                                        break;
1270                                }
1271                                id = container & 0xFFFF;
1272                                lun = (container >> 16) & 0xFF;
1273                                container = (u32)-1;
1274                                if (id >= dev->maximum_num_physicals) {
1275                                        /* legacy dev_t ? */
1276                                        if ((0x2000 <= id) || lun || channel ||
1277                                          ((channel = (id >> 7) & 0x3F) >=
1278                                          dev->maximum_num_channels))
1279                                                break;
1280                                        lun = (id >> 4) & 7;
1281                                        id &= 0xF;
1282                                }
1283                                channel = aac_phys_to_logical(channel);
1284                                device_config_needed =
1285                                  ((((__le32 *)aifcmd->data)[3]
1286                                    == cpu_to_le32(EM_DRIVE_INSERTION)) ||
1287                                    (((__le32 *)aifcmd->data)[3]
1288                                    == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
1289                                  ADD : DELETE;
1290                                break;
1291                        }
1292                        break;
1293                case AifBuManagerEvent:
1294                        aac_handle_aif_bu(dev, aifcmd);
1295                        break;
1296                }
1297
1298                /*
1299                 *      If we are waiting on something and this happens to be
1300                 * that thing then set the re-configure flag.
1301                 */
1302                if (container != (u32)-1) {
1303                        if (container >= dev->maximum_num_containers)
1304                                break;
1305                        if ((dev->fsa_dev[container].config_waiting_on ==
1306                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1307                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1308                                dev->fsa_dev[container].config_waiting_on = 0;
1309                } else for (container = 0;
1310                    container < dev->maximum_num_containers; ++container) {
1311                        if ((dev->fsa_dev[container].config_waiting_on ==
1312                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1313                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1314                                dev->fsa_dev[container].config_waiting_on = 0;
1315                }
1316                break;
1317
1318        case AifCmdJobProgress:
1319                /*
1320                 *      These are job progress AIF's. When a Clear is being
1321                 * done on a container it is initially created then hidden from
1322                 * the OS. When the clear completes we don't get a config
1323                 * change so we monitor the job status complete on a clear then
1324                 * wait for a container change.
1325                 */
1326
1327                if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1328                    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1329                     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1330                        for (container = 0;
1331                            container < dev->maximum_num_containers;
1332                            ++container) {
1333                                /*
1334                                 * Stomp on all config sequencing for all
1335                                 * containers?
1336                                 */
1337                                dev->fsa_dev[container].config_waiting_on =
1338                                        AifEnContainerChange;
1339                                dev->fsa_dev[container].config_needed = ADD;
1340                                dev->fsa_dev[container].config_waiting_stamp =
1341                                        jiffies;
1342                        }
1343                }
1344                if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1345                    ((__le32 *)aifcmd->data)[6] == 0 &&
1346                    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1347                        for (container = 0;
1348                            container < dev->maximum_num_containers;
1349                            ++container) {
1350                                /*
1351                                 * Stomp on all config sequencing for all
1352                                 * containers?
1353                                 */
1354                                dev->fsa_dev[container].config_waiting_on =
1355                                        AifEnContainerChange;
1356                                dev->fsa_dev[container].config_needed = DELETE;
1357                                dev->fsa_dev[container].config_waiting_stamp =
1358                                        jiffies;
1359                        }
1360                }
1361                break;
1362        }
1363
1364        container = 0;
1365retry_next:
1366        if (device_config_needed == NOTHING) {
1367                for (; container < dev->maximum_num_containers; ++container) {
1368                        if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1369                            (dev->fsa_dev[container].config_needed != NOTHING) &&
1370                            time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1371                                device_config_needed =
1372                                        dev->fsa_dev[container].config_needed;
1373                                dev->fsa_dev[container].config_needed = NOTHING;
1374                                channel = CONTAINER_TO_CHANNEL(container);
1375                                id = CONTAINER_TO_ID(container);
1376                                lun = CONTAINER_TO_LUN(container);
1377                                break;
1378                        }
1379                }
1380        }
1381        if (device_config_needed == NOTHING)
1382                return;
1383
1384        /*
1385         *      If we decided that a re-configuration needs to be done,
1386         * schedule it here on the way out the door, please close the door
1387         * behind you.
1388         */
1389
1390        /*
1391         *      Find the scsi_device associated with the SCSI address,
1392         * and mark it as changed, invalidating the cache. This deals
1393         * with changes to existing device IDs.
1394         */
1395
1396        if (!dev || !dev->scsi_host_ptr)
1397                return;
1398        /*
1399         * force reload of disk info via aac_probe_container
1400         */
1401        if ((channel == CONTAINER_CHANNEL) &&
1402          (device_config_needed != NOTHING)) {
1403                if (dev->fsa_dev[container].valid == 1)
1404                        dev->fsa_dev[container].valid = 2;
1405                aac_probe_container(dev, container);
1406        }
1407        device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1408        if (device) {
1409                switch (device_config_needed) {
1410                case DELETE:
1411#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1412                        scsi_remove_device(device);
1413#else
1414                        if (scsi_device_online(device)) {
1415                                scsi_device_set_state(device, SDEV_OFFLINE);
1416                                sdev_printk(KERN_INFO, device,
1417                                        "Device offlined - %s\n",
1418                                        (channel == CONTAINER_CHANNEL) ?
1419                                                "array deleted" :
1420                                                "enclosure services event");
1421                        }
1422#endif
1423                        break;
1424                case ADD:
1425                        if (!scsi_device_online(device)) {
1426                                sdev_printk(KERN_INFO, device,
1427                                        "Device online - %s\n",
1428                                        (channel == CONTAINER_CHANNEL) ?
1429                                                "array created" :
1430                                                "enclosure services event");
1431                                scsi_device_set_state(device, SDEV_RUNNING);
1432                        }
1433                        /* FALLTHRU */
1434                case CHANGE:
1435                        if ((channel == CONTAINER_CHANNEL)
1436                         && (!dev->fsa_dev[container].valid)) {
1437#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1438                                scsi_remove_device(device);
1439#else
1440                                if (!scsi_device_online(device))
1441                                        break;
1442                                scsi_device_set_state(device, SDEV_OFFLINE);
1443                                sdev_printk(KERN_INFO, device,
1444                                        "Device offlined - %s\n",
1445                                        "array failed");
1446#endif
1447                                break;
1448                        }
1449                        scsi_rescan_device(&device->sdev_gendev);
1450
1451                default:
1452                        break;
1453                }
1454                scsi_device_put(device);
1455                device_config_needed = NOTHING;
1456        }
1457        if (device_config_needed == ADD)
1458                scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1459        if (channel == CONTAINER_CHANNEL) {
1460                container++;
1461                device_config_needed = NOTHING;
1462                goto retry_next;
1463        }
1464}
1465
1466static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1467{
1468        int index, quirks;
1469        int retval;
1470        struct Scsi_Host *host;
1471        struct scsi_device *dev;
1472        struct scsi_cmnd *command;
1473        struct scsi_cmnd *command_list;
1474        int jafo = 0;
1475        int bled;
1476        u64 dmamask;
1477        int num_of_fibs = 0;
1478
1479        /*
1480         * Assumptions:
1481         *      - host is locked, unless called by the aacraid thread.
1482         *        (a matter of convenience, due to legacy issues surrounding
1483         *        eh_host_adapter_reset).
1484         *      - in_reset is asserted, so no new i/o is getting to the
1485         *        card.
1486         *      - The card is dead, or will be very shortly ;-/ so no new
1487         *        commands are completing in the interrupt service.
1488         */
1489        host = aac->scsi_host_ptr;
1490        scsi_block_requests(host);
1491        aac_adapter_disable_int(aac);
1492        if (aac->thread && aac->thread->pid != current->pid) {
1493                spin_unlock_irq(host->host_lock);
1494                kthread_stop(aac->thread);
1495                aac->thread = NULL;
1496                jafo = 1;
1497        }
1498
1499        /*
1500         *      If a positive health, means in a known DEAD PANIC
1501         * state and the adapter could be reset to `try again'.
1502         */
1503        bled = forced ? 0 : aac_adapter_check_health(aac);
1504        retval = aac_adapter_restart(aac, bled, reset_type);
1505
1506        if (retval)
1507                goto out;
1508
1509        /*
1510         *      Loop through the fibs, close the synchronous FIBS
1511         */
1512        retval = 1;
1513        num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
1514        for (index = 0; index <  num_of_fibs; index++) {
1515
1516                struct fib *fib = &aac->fibs[index];
1517                __le32 XferState = fib->hw_fib_va->header.XferState;
1518                bool is_response_expected = false;
1519
1520                if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1521                   (XferState & cpu_to_le32(ResponseExpected)))
1522                        is_response_expected = true;
1523
1524                if (is_response_expected
1525                  || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
1526                        unsigned long flagv;
1527                        spin_lock_irqsave(&fib->event_lock, flagv);
1528                        complete(&fib->event_wait);
1529                        spin_unlock_irqrestore(&fib->event_lock, flagv);
1530                        schedule();
1531                        retval = 0;
1532                }
1533        }
1534        /* Give some extra time for ioctls to complete. */
1535        if (retval == 0)
1536                ssleep(2);
1537        index = aac->cardtype;
1538
1539        /*
1540         * Re-initialize the adapter, first free resources, then carefully
1541         * apply the initialization sequence to come back again. Only risk
1542         * is a change in Firmware dropping cache, it is assumed the caller
1543         * will ensure that i/o is queisced and the card is flushed in that
1544         * case.
1545         */
1546        aac_free_irq(aac);
1547        aac_fib_map_free(aac);
1548        dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
1549                          aac->comm_phys);
1550        aac->comm_addr = NULL;
1551        aac->comm_phys = 0;
1552        kfree(aac->queues);
1553        aac->queues = NULL;
1554        kfree(aac->fsa_dev);
1555        aac->fsa_dev = NULL;
1556
1557        dmamask = DMA_BIT_MASK(32);
1558        quirks = aac_get_driver_ident(index)->quirks;
1559        if (quirks & AAC_QUIRK_31BIT)
1560                retval = pci_set_dma_mask(aac->pdev, dmamask);
1561        else if (!(quirks & AAC_QUIRK_SRC))
1562                retval = pci_set_dma_mask(aac->pdev, dmamask);
1563        else
1564                retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
1565
1566        if (quirks & AAC_QUIRK_31BIT && !retval) {
1567                dmamask = DMA_BIT_MASK(31);
1568                retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
1569        }
1570
1571        if (retval)
1572                goto out;
1573
1574        if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1575                goto out;
1576
1577        if (jafo) {
1578                aac->thread = kthread_run(aac_command_thread, aac, "%s",
1579                                          aac->name);
1580                if (IS_ERR(aac->thread)) {
1581                        retval = PTR_ERR(aac->thread);
1582                        aac->thread = NULL;
1583                        goto out;
1584                }
1585        }
1586        (void)aac_get_adapter_info(aac);
1587        if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1588                host->sg_tablesize = 34;
1589                host->max_sectors = (host->sg_tablesize * 8) + 112;
1590        }
1591        if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1592                host->sg_tablesize = 17;
1593                host->max_sectors = (host->sg_tablesize * 8) + 112;
1594        }
1595        aac_get_config_status(aac, 1);
1596        aac_get_containers(aac);
1597        /*
1598         * This is where the assumption that the Adapter is quiesced
1599         * is important.
1600         */
1601        command_list = NULL;
1602        __shost_for_each_device(dev, host) {
1603                unsigned long flags;
1604                spin_lock_irqsave(&dev->list_lock, flags);
1605                list_for_each_entry(command, &dev->cmd_list, list)
1606                        if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1607                                command->SCp.buffer = (struct scatterlist *)command_list;
1608                                command_list = command;
1609                        }
1610                spin_unlock_irqrestore(&dev->list_lock, flags);
1611        }
1612        while ((command = command_list)) {
1613                command_list = (struct scsi_cmnd *)command->SCp.buffer;
1614                command->SCp.buffer = NULL;
1615                command->result = DID_OK << 16
1616                  | COMMAND_COMPLETE << 8
1617                  | SAM_STAT_TASK_SET_FULL;
1618                command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1619                command->scsi_done(command);
1620        }
1621        /*
1622         * Any Device that was already marked offline needs to be marked
1623         * running
1624         */
1625        __shost_for_each_device(dev, host) {
1626                if (!scsi_device_online(dev))
1627                        scsi_device_set_state(dev, SDEV_RUNNING);
1628        }
1629        retval = 0;
1630
1631out:
1632        aac->in_reset = 0;
1633        scsi_unblock_requests(host);
1634
1635        /*
1636         * Issue bus rescan to catch any configuration that might have
1637         * occurred
1638         */
1639        if (!retval && !is_kdump_kernel()) {
1640                dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
1641                aac_schedule_safw_scan_worker(aac);
1642        }
1643
1644        if (jafo) {
1645                spin_lock_irq(host->host_lock);
1646        }
1647        return retval;
1648}
1649
1650int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1651{
1652        unsigned long flagv = 0;
1653        int retval;
1654        struct Scsi_Host * host;
1655        int bled;
1656
1657        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1658                return -EBUSY;
1659
1660        if (aac->in_reset) {
1661                spin_unlock_irqrestore(&aac->fib_lock, flagv);
1662                return -EBUSY;
1663        }
1664        aac->in_reset = 1;
1665        spin_unlock_irqrestore(&aac->fib_lock, flagv);
1666
1667        /*
1668         * Wait for all commands to complete to this specific
1669         * target (block maximum 60 seconds). Although not necessary,
1670         * it does make us a good storage citizen.
1671         */
1672        host = aac->scsi_host_ptr;
1673        scsi_block_requests(host);
1674
1675        /* Quiesce build, flush cache, write through mode */
1676        if (forced < 2)
1677                aac_send_shutdown(aac);
1678        spin_lock_irqsave(host->host_lock, flagv);
1679        bled = forced ? forced :
1680                        (aac_check_reset != 0 && aac_check_reset != 1);
1681        retval = _aac_reset_adapter(aac, bled, reset_type);
1682        spin_unlock_irqrestore(host->host_lock, flagv);
1683
1684        if ((forced < 2) && (retval == -ENODEV)) {
1685                /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1686                struct fib * fibctx = aac_fib_alloc(aac);
1687                if (fibctx) {
1688                        struct aac_pause *cmd;
1689                        int status;
1690
1691                        aac_fib_init(fibctx);
1692
1693                        cmd = (struct aac_pause *) fib_data(fibctx);
1694
1695                        cmd->command = cpu_to_le32(VM_ContainerConfig);
1696                        cmd->type = cpu_to_le32(CT_PAUSE_IO);
1697                        cmd->timeout = cpu_to_le32(1);
1698                        cmd->min = cpu_to_le32(1);
1699                        cmd->noRescan = cpu_to_le32(1);
1700                        cmd->count = cpu_to_le32(0);
1701
1702                        status = aac_fib_send(ContainerCommand,
1703                          fibctx,
1704                          sizeof(struct aac_pause),
1705                          FsaNormal,
1706                          -2 /* Timeout silently */, 1,
1707                          NULL, NULL);
1708
1709                        if (status >= 0)
1710                                aac_fib_complete(fibctx);
1711                        /* FIB should be freed only after getting
1712                         * the response from the F/W */
1713                        if (status != -ERESTARTSYS)
1714                                aac_fib_free(fibctx);
1715                }
1716        }
1717
1718        return retval;
1719}
1720
1721int aac_check_health(struct aac_dev * aac)
1722{
1723        int BlinkLED;
1724        unsigned long time_now, flagv = 0;
1725        struct list_head * entry;
1726
1727        /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1728        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1729                return 0;
1730
1731        if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1732                spin_unlock_irqrestore(&aac->fib_lock, flagv);
1733                return 0; /* OK */
1734        }
1735
1736        aac->in_reset = 1;
1737
1738        /* Fake up an AIF:
1739         *      aac_aifcmd.command = AifCmdEventNotify = 1
1740         *      aac_aifcmd.seqnum = 0xFFFFFFFF
1741         *      aac_aifcmd.data[0] = AifEnExpEvent = 23
1742         *      aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1743         *      aac.aifcmd.data[2] = AifHighPriority = 3
1744         *      aac.aifcmd.data[3] = BlinkLED
1745         */
1746
1747        time_now = jiffies/HZ;
1748        entry = aac->fib_list.next;
1749
1750        /*
1751         * For each Context that is on the
1752         * fibctxList, make a copy of the
1753         * fib, and then set the event to wake up the
1754         * thread that is waiting for it.
1755         */
1756        while (entry != &aac->fib_list) {
1757                /*
1758                 * Extract the fibctx
1759                 */
1760                struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1761                struct hw_fib * hw_fib;
1762                struct fib * fib;
1763                /*
1764                 * Check if the queue is getting
1765                 * backlogged
1766                 */
1767                if (fibctx->count > 20) {
1768                        /*
1769                         * It's *not* jiffies folks,
1770                         * but jiffies / HZ, so do not
1771                         * panic ...
1772                         */
1773                        u32 time_last = fibctx->jiffies;
1774                        /*
1775                         * Has it been > 2 minutes
1776                         * since the last read off
1777                         * the queue?
1778                         */
1779                        if ((time_now - time_last) > aif_timeout) {
1780                                entry = entry->next;
1781                                aac_close_fib_context(aac, fibctx);
1782                                continue;
1783                        }
1784                }
1785                /*
1786                 * Warning: no sleep allowed while
1787                 * holding spinlock
1788                 */
1789                hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1790                fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1791                if (fib && hw_fib) {
1792                        struct aac_aifcmd * aif;
1793
1794                        fib->hw_fib_va = hw_fib;
1795                        fib->dev = aac;
1796                        aac_fib_init(fib);
1797                        fib->type = FSAFS_NTC_FIB_CONTEXT;
1798                        fib->size = sizeof (struct fib);
1799                        fib->data = hw_fib->data;
1800                        aif = (struct aac_aifcmd *)hw_fib->data;
1801                        aif->command = cpu_to_le32(AifCmdEventNotify);
1802                        aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1803                        ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1804                        ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1805                        ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1806                        ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1807
1808                        /*
1809                         * Put the FIB onto the
1810                         * fibctx's fibs
1811                         */
1812                        list_add_tail(&fib->fiblink, &fibctx->fib_list);
1813                        fibctx->count++;
1814                        /*
1815                         * Set the event to wake up the
1816                         * thread that will waiting.
1817                         */
1818                        complete(&fibctx->completion);
1819                } else {
1820                        printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1821                        kfree(fib);
1822                        kfree(hw_fib);
1823                }
1824                entry = entry->next;
1825        }
1826
1827        spin_unlock_irqrestore(&aac->fib_lock, flagv);
1828
1829        if (BlinkLED < 0) {
1830                printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1831                                aac->name, BlinkLED);
1832                goto out;
1833        }
1834
1835        printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1836
1837out:
1838        aac->in_reset = 0;
1839        return BlinkLED;
1840}
1841
1842static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
1843{
1844        return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
1845}
1846
1847static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
1848                                                                int bus,
1849                                                                int target)
1850{
1851        if (bus != CONTAINER_CHANNEL)
1852                bus = aac_phys_to_logical(bus);
1853
1854        return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
1855}
1856
1857static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
1858{
1859        if (bus != CONTAINER_CHANNEL)
1860                bus = aac_phys_to_logical(bus);
1861
1862        return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
1863}
1864
1865static void aac_put_safw_scsi_device(struct scsi_device *sdev)
1866{
1867        if (sdev)
1868                scsi_device_put(sdev);
1869}
1870
1871static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
1872{
1873        struct scsi_device *sdev;
1874
1875        sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1876        scsi_remove_device(sdev);
1877        aac_put_safw_scsi_device(sdev);
1878}
1879
1880static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
1881        int bus, int target)
1882{
1883        return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
1884}
1885
1886static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
1887{
1888        if (is_safw_raid_volume(dev, bus, target))
1889                return dev->fsa_dev[target].valid;
1890        else
1891                return aac_is_safw_scan_count_equal(dev, bus, target);
1892}
1893
1894static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
1895{
1896        int is_exposed = 0;
1897        struct scsi_device *sdev;
1898
1899        sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1900        if (sdev)
1901                is_exposed = 1;
1902        aac_put_safw_scsi_device(sdev);
1903
1904        return is_exposed;
1905}
1906
1907static int aac_update_safw_host_devices(struct aac_dev *dev)
1908{
1909        int i;
1910        int bus;
1911        int target;
1912        int is_exposed = 0;
1913        int rcode = 0;
1914
1915        rcode = aac_setup_safw_adapter(dev);
1916        if (unlikely(rcode < 0)) {
1917                goto out;
1918        }
1919
1920        for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
1921
1922                bus = get_bus_number(i);
1923                target = get_target_number(i);
1924
1925                is_exposed = aac_is_safw_device_exposed(dev, bus, target);
1926
1927                if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
1928                        aac_add_safw_device(dev, bus, target);
1929                else if (!aac_is_safw_target_valid(dev, bus, target) &&
1930                                                                is_exposed)
1931                        aac_remove_safw_device(dev, bus, target);
1932        }
1933out:
1934        return rcode;
1935}
1936
1937static int aac_scan_safw_host(struct aac_dev *dev)
1938{
1939        int rcode = 0;
1940
1941        rcode = aac_update_safw_host_devices(dev);
1942        if (rcode)
1943                aac_schedule_safw_scan_worker(dev);
1944
1945        return rcode;
1946}
1947
1948int aac_scan_host(struct aac_dev *dev)
1949{
1950        int rcode = 0;
1951
1952        mutex_lock(&dev->scan_mutex);
1953        if (dev->sa_firmware)
1954                rcode = aac_scan_safw_host(dev);
1955        else
1956                scsi_scan_host(dev->scsi_host_ptr);
1957        mutex_unlock(&dev->scan_mutex);
1958
1959        return rcode;
1960}
1961
1962/**
1963 *      aac_handle_sa_aif       Handle a message from the firmware
1964 *      @dev: Which adapter this fib is from
1965 *      @fibptr: Pointer to fibptr from adapter
1966 *
1967 *      This routine handles a driver notify fib from the adapter and
1968 *      dispatches it to the appropriate routine for handling.
1969 */
1970static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
1971{
1972        int i;
1973        u32 events = 0;
1974
1975        if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
1976                events = SA_AIF_HOTPLUG;
1977        else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
1978                events = SA_AIF_HARDWARE;
1979        else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
1980                events = SA_AIF_PDEV_CHANGE;
1981        else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
1982                events = SA_AIF_LDEV_CHANGE;
1983        else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
1984                events = SA_AIF_BPSTAT_CHANGE;
1985        else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
1986                events = SA_AIF_BPCFG_CHANGE;
1987
1988        switch (events) {
1989        case SA_AIF_HOTPLUG:
1990        case SA_AIF_HARDWARE:
1991        case SA_AIF_PDEV_CHANGE:
1992        case SA_AIF_LDEV_CHANGE:
1993        case SA_AIF_BPCFG_CHANGE:
1994
1995                aac_scan_host(dev);
1996
1997                break;
1998
1999        case SA_AIF_BPSTAT_CHANGE:
2000                /* currently do nothing */
2001                break;
2002        }
2003
2004        for (i = 1; i <= 10; ++i) {
2005                events = src_readl(dev, MUnit.IDR);
2006                if (events & (1<<23)) {
2007                        pr_warn(" AIF not cleared by firmware - %d/%d)\n",
2008                                i, 10);
2009                        ssleep(1);
2010                }
2011        }
2012}
2013
2014static int get_fib_count(struct aac_dev *dev)
2015{
2016        unsigned int num = 0;
2017        struct list_head *entry;
2018        unsigned long flagv;
2019
2020        /*
2021         * Warning: no sleep allowed while
2022         * holding spinlock. We take the estimate
2023         * and pre-allocate a set of fibs outside the
2024         * lock.
2025         */
2026        num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
2027                        / sizeof(struct hw_fib); /* some extra */
2028        spin_lock_irqsave(&dev->fib_lock, flagv);
2029        entry = dev->fib_list.next;
2030        while (entry != &dev->fib_list) {
2031                entry = entry->next;
2032                ++num;
2033        }
2034        spin_unlock_irqrestore(&dev->fib_lock, flagv);
2035
2036        return num;
2037}
2038
2039static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2040                                                struct fib **fib_pool,
2041                                                unsigned int num)
2042{
2043        struct hw_fib **hw_fib_p;
2044        struct fib **fib_p;
2045
2046        hw_fib_p = hw_fib_pool;
2047        fib_p = fib_pool;
2048        while (hw_fib_p < &hw_fib_pool[num]) {
2049                *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
2050                if (!(*(hw_fib_p++))) {
2051                        --hw_fib_p;
2052                        break;
2053                }
2054
2055                *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
2056                if (!(*(fib_p++))) {
2057                        kfree(*(--hw_fib_p));
2058                        break;
2059                }
2060        }
2061
2062        /*
2063         * Get the actual number of allocated fibs
2064         */
2065        num = hw_fib_p - hw_fib_pool;
2066        return num;
2067}
2068
2069static void wakeup_fibctx_threads(struct aac_dev *dev,
2070                                                struct hw_fib **hw_fib_pool,
2071                                                struct fib **fib_pool,
2072                                                struct fib *fib,
2073                                                struct hw_fib *hw_fib,
2074                                                unsigned int num)
2075{
2076        unsigned long flagv;
2077        struct list_head *entry;
2078        struct hw_fib **hw_fib_p;
2079        struct fib **fib_p;
2080        u32 time_now, time_last;
2081        struct hw_fib *hw_newfib;
2082        struct fib *newfib;
2083        struct aac_fib_context *fibctx;
2084
2085        time_now = jiffies/HZ;
2086        spin_lock_irqsave(&dev->fib_lock, flagv);
2087        entry = dev->fib_list.next;
2088        /*
2089         * For each Context that is on the
2090         * fibctxList, make a copy of the
2091         * fib, and then set the event to wake up the
2092         * thread that is waiting for it.
2093         */
2094
2095        hw_fib_p = hw_fib_pool;
2096        fib_p = fib_pool;
2097        while (entry != &dev->fib_list) {
2098                /*
2099                 * Extract the fibctx
2100                 */
2101                fibctx = list_entry(entry, struct aac_fib_context,
2102                                next);
2103                /*
2104                 * Check if the queue is getting
2105                 * backlogged
2106                 */
2107                if (fibctx->count > 20) {
2108                        /*
2109                         * It's *not* jiffies folks,
2110                         * but jiffies / HZ so do not
2111                         * panic ...
2112                         */
2113                        time_last = fibctx->jiffies;
2114                        /*
2115                         * Has it been > 2 minutes
2116                         * since the last read off
2117                         * the queue?
2118                         */
2119                        if ((time_now - time_last) > aif_timeout) {
2120                                entry = entry->next;
2121                                aac_close_fib_context(dev, fibctx);
2122                                continue;
2123                        }
2124                }
2125                /*
2126                 * Warning: no sleep allowed while
2127                 * holding spinlock
2128                 */
2129                if (hw_fib_p >= &hw_fib_pool[num]) {
2130                        pr_warn("aifd: didn't allocate NewFib\n");
2131                        entry = entry->next;
2132                        continue;
2133                }
2134
2135                hw_newfib = *hw_fib_p;
2136                *(hw_fib_p++) = NULL;
2137                newfib = *fib_p;
2138                *(fib_p++) = NULL;
2139                /*
2140                 * Make the copy of the FIB
2141                 */
2142                memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
2143                memcpy(newfib, fib, sizeof(struct fib));
2144                newfib->hw_fib_va = hw_newfib;
2145                /*
2146                 * Put the FIB onto the
2147                 * fibctx's fibs
2148                 */
2149                list_add_tail(&newfib->fiblink, &fibctx->fib_list);
2150                fibctx->count++;
2151                /*
2152                 * Set the event to wake up the
2153                 * thread that is waiting.
2154                 */
2155                complete(&fibctx->completion);
2156
2157                entry = entry->next;
2158        }
2159        /*
2160         *      Set the status of this FIB
2161         */
2162        *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2163        aac_fib_adapter_complete(fib, sizeof(u32));
2164        spin_unlock_irqrestore(&dev->fib_lock, flagv);
2165
2166}
2167
2168static void aac_process_events(struct aac_dev *dev)
2169{
2170        struct hw_fib *hw_fib;
2171        struct fib *fib;
2172        unsigned long flags;
2173        spinlock_t *t_lock;
2174
2175        t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2176        spin_lock_irqsave(t_lock, flags);
2177
2178        while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
2179                struct list_head *entry;
2180                struct aac_aifcmd *aifcmd;
2181                unsigned int  num;
2182                struct hw_fib **hw_fib_pool, **hw_fib_p;
2183                struct fib **fib_pool, **fib_p;
2184
2185                set_current_state(TASK_RUNNING);
2186
2187                entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
2188                list_del(entry);
2189
2190                t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2191                spin_unlock_irqrestore(t_lock, flags);
2192
2193                fib = list_entry(entry, struct fib, fiblink);
2194                hw_fib = fib->hw_fib_va;
2195                if (dev->sa_firmware) {
2196                        /* Thor AIF */
2197                        aac_handle_sa_aif(dev, fib);
2198                        aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2199                        goto free_fib;
2200                }
2201                /*
2202                 *      We will process the FIB here or pass it to a
2203                 *      worker thread that is TBD. We Really can't
2204                 *      do anything at this point since we don't have
2205                 *      anything defined for this thread to do.
2206                 */
2207                memset(fib, 0, sizeof(struct fib));
2208                fib->type = FSAFS_NTC_FIB_CONTEXT;
2209                fib->size = sizeof(struct fib);
2210                fib->hw_fib_va = hw_fib;
2211                fib->data = hw_fib->data;
2212                fib->dev = dev;
2213                /*
2214                 *      We only handle AifRequest fibs from the adapter.
2215                 */
2216
2217                aifcmd = (struct aac_aifcmd *) hw_fib->data;
2218                if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
2219                        /* Handle Driver Notify Events */
2220                        aac_handle_aif(dev, fib);
2221                        *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2222                        aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2223                        goto free_fib;
2224                }
2225                /*
2226                 * The u32 here is important and intended. We are using
2227                 * 32bit wrapping time to fit the adapter field
2228                 */
2229
2230                /* Sniff events */
2231                if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
2232                 || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
2233                        aac_handle_aif(dev, fib);
2234                }
2235
2236                /*
2237                 * get number of fibs to process
2238                 */
2239                num = get_fib_count(dev);
2240                if (!num)
2241                        goto free_fib;
2242
2243                hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
2244                                                GFP_KERNEL);
2245                if (!hw_fib_pool)
2246                        goto free_fib;
2247
2248                fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
2249                if (!fib_pool)
2250                        goto free_hw_fib_pool;
2251
2252                /*
2253                 * Fill up fib pointer pools with actual fibs
2254                 * and hw_fibs
2255                 */
2256                num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2257                if (!num)
2258                        goto free_mem;
2259
2260                /*
2261                 * wakeup the thread that is waiting for
2262                 * the response from fw (ioctl)
2263                 */
2264                wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
2265                                                            fib, hw_fib, num);
2266
2267free_mem:
2268                /* Free up the remaining resources */
2269                hw_fib_p = hw_fib_pool;
2270                fib_p = fib_pool;
2271                while (hw_fib_p < &hw_fib_pool[num]) {
2272                        kfree(*hw_fib_p);
2273                        kfree(*fib_p);
2274                        ++fib_p;
2275                        ++hw_fib_p;
2276                }
2277                kfree(fib_pool);
2278free_hw_fib_pool:
2279                kfree(hw_fib_pool);
2280free_fib:
2281                kfree(fib);
2282                t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2283                spin_lock_irqsave(t_lock, flags);
2284        }
2285        /*
2286         *      There are no more AIF's
2287         */
2288        t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2289        spin_unlock_irqrestore(t_lock, flags);
2290}
2291
2292static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
2293                                                        u32 datasize)
2294{
2295        struct aac_srb *srbcmd;
2296        struct sgmap64 *sg64;
2297        dma_addr_t addr;
2298        char *dma_buf;
2299        struct fib *fibptr;
2300        int ret = -ENOMEM;
2301        u32 vbus, vid;
2302
2303        fibptr = aac_fib_alloc(dev);
2304        if (!fibptr)
2305                goto out;
2306
2307        dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
2308                                     GFP_KERNEL);
2309        if (!dma_buf)
2310                goto fib_free_out;
2311
2312        aac_fib_init(fibptr);
2313
2314        vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
2315        vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
2316
2317        srbcmd = (struct aac_srb *)fib_data(fibptr);
2318
2319        srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2320        srbcmd->channel = cpu_to_le32(vbus);
2321        srbcmd->id = cpu_to_le32(vid);
2322        srbcmd->lun = 0;
2323        srbcmd->flags = cpu_to_le32(SRB_DataOut);
2324        srbcmd->timeout = cpu_to_le32(10);
2325        srbcmd->retry_limit = 0;
2326        srbcmd->cdb_size = cpu_to_le32(12);
2327        srbcmd->count = cpu_to_le32(datasize);
2328
2329        memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2330        srbcmd->cdb[0] = BMIC_OUT;
2331        srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
2332        memcpy(dma_buf, (char *)wellness_str, datasize);
2333
2334        sg64 = (struct sgmap64 *)&srbcmd->sg;
2335        sg64->count = cpu_to_le32(1);
2336        sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
2337        sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2338        sg64->sg[0].count = cpu_to_le32(datasize);
2339
2340        ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
2341                                FsaNormal, 1, 1, NULL, NULL);
2342
2343        dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
2344
2345        /*
2346         * Do not set XferState to zero unless
2347         * receives a response from F/W
2348         */
2349        if (ret >= 0)
2350                aac_fib_complete(fibptr);
2351
2352        /*
2353         * FIB should be freed only after
2354         * getting the response from the F/W
2355         */
2356        if (ret != -ERESTARTSYS)
2357                goto fib_free_out;
2358
2359out:
2360        return ret;
2361fib_free_out:
2362        aac_fib_free(fibptr);
2363        goto out;
2364}
2365
2366int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
2367{
2368        struct tm cur_tm;
2369        char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2370        u32 datasize = sizeof(wellness_str);
2371        time64_t local_time;
2372        int ret = -ENODEV;
2373
2374        if (!dev->sa_firmware)
2375                goto out;
2376
2377        local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
2378        time64_to_tm(local_time, 0, &cur_tm);
2379        cur_tm.tm_mon += 1;
2380        cur_tm.tm_year += 1900;
2381        wellness_str[8] = bin2bcd(cur_tm.tm_hour);
2382        wellness_str[9] = bin2bcd(cur_tm.tm_min);
2383        wellness_str[10] = bin2bcd(cur_tm.tm_sec);
2384        wellness_str[12] = bin2bcd(cur_tm.tm_mon);
2385        wellness_str[13] = bin2bcd(cur_tm.tm_mday);
2386        wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
2387        wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
2388
2389        ret = aac_send_wellness_command(dev, wellness_str, datasize);
2390
2391out:
2392        return ret;
2393}
2394
2395int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
2396{
2397        int ret = -ENOMEM;
2398        struct fib *fibptr;
2399        __le32 *info;
2400
2401        fibptr = aac_fib_alloc(dev);
2402        if (!fibptr)
2403                goto out;
2404
2405        aac_fib_init(fibptr);
2406        info = (__le32 *)fib_data(fibptr);
2407        *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
2408        ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
2409                                        1, 1, NULL, NULL);
2410
2411        /*
2412         * Do not set XferState to zero unless
2413         * receives a response from F/W
2414         */
2415        if (ret >= 0)
2416                aac_fib_complete(fibptr);
2417
2418        /*
2419         * FIB should be freed only after
2420         * getting the response from the F/W
2421         */
2422        if (ret != -ERESTARTSYS)
2423                aac_fib_free(fibptr);
2424
2425out:
2426        return ret;
2427}
2428
2429/**
2430 *      aac_command_thread      -       command processing thread
2431 *      @dev: Adapter to monitor
2432 *
2433 *      Waits on the commandready event in it's queue. When the event gets set
2434 *      it will pull FIBs off it's queue. It will continue to pull FIBs off
2435 *      until the queue is empty. When the queue is empty it will wait for
2436 *      more FIBs.
2437 */
2438
2439int aac_command_thread(void *data)
2440{
2441        struct aac_dev *dev = data;
2442        DECLARE_WAITQUEUE(wait, current);
2443        unsigned long next_jiffies = jiffies + HZ;
2444        unsigned long next_check_jiffies = next_jiffies;
2445        long difference = HZ;
2446
2447        /*
2448         *      We can only have one thread per adapter for AIF's.
2449         */
2450        if (dev->aif_thread)
2451                return -EINVAL;
2452
2453        /*
2454         *      Let the DPC know it has a place to send the AIF's to.
2455         */
2456        dev->aif_thread = 1;
2457        add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2458        set_current_state(TASK_INTERRUPTIBLE);
2459        dprintk ((KERN_INFO "aac_command_thread start\n"));
2460        while (1) {
2461
2462                aac_process_events(dev);
2463
2464                /*
2465                 *      Background activity
2466                 */
2467                if ((time_before(next_check_jiffies,next_jiffies))
2468                 && ((difference = next_check_jiffies - jiffies) <= 0)) {
2469                        next_check_jiffies = next_jiffies;
2470                        if (aac_adapter_check_health(dev) == 0) {
2471                                difference = ((long)(unsigned)check_interval)
2472                                           * HZ;
2473                                next_check_jiffies = jiffies + difference;
2474                        } else if (!dev->queues)
2475                                break;
2476                }
2477                if (!time_before(next_check_jiffies,next_jiffies)
2478                 && ((difference = next_jiffies - jiffies) <= 0)) {
2479                        struct timespec64 now;
2480                        int ret;
2481
2482                        /* Don't even try to talk to adapter if its sick */
2483                        ret = aac_adapter_check_health(dev);
2484                        if (ret || !dev->queues)
2485                                break;
2486                        next_check_jiffies = jiffies
2487                                           + ((long)(unsigned)check_interval)
2488                                           * HZ;
2489                        ktime_get_real_ts64(&now);
2490
2491                        /* Synchronize our watches */
2492                        if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2493                         && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2494                                difference = HZ + HZ / 2 -
2495                                             now.tv_nsec / (NSEC_PER_SEC / HZ);
2496                        else {
2497                                if (now.tv_nsec > NSEC_PER_SEC / 2)
2498                                        ++now.tv_sec;
2499
2500                                if (dev->sa_firmware)
2501                                        ret =
2502                                        aac_send_safw_hostttime(dev, &now);
2503                                else
2504                                        ret = aac_send_hosttime(dev, &now);
2505
2506                                difference = (long)(unsigned)update_interval*HZ;
2507                        }
2508                        next_jiffies = jiffies + difference;
2509                        if (time_before(next_check_jiffies,next_jiffies))
2510                                difference = next_check_jiffies - jiffies;
2511                }
2512                if (difference <= 0)
2513                        difference = 1;
2514                set_current_state(TASK_INTERRUPTIBLE);
2515
2516                if (kthread_should_stop())
2517                        break;
2518
2519                /*
2520                 * we probably want usleep_range() here instead of the
2521                 * jiffies computation
2522                 */
2523                schedule_timeout(difference);
2524
2525                if (kthread_should_stop())
2526                        break;
2527        }
2528        if (dev->queues)
2529                remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2530        dev->aif_thread = 0;
2531        return 0;
2532}
2533
2534int aac_acquire_irq(struct aac_dev *dev)
2535{
2536        int i;
2537        int j;
2538        int ret = 0;
2539
2540        if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
2541                for (i = 0; i < dev->max_msix; i++) {
2542                        dev->aac_msix[i].vector_no = i;
2543                        dev->aac_msix[i].dev = dev;
2544                        if (request_irq(pci_irq_vector(dev->pdev, i),
2545                                        dev->a_ops.adapter_intr,
2546                                        0, "aacraid", &(dev->aac_msix[i]))) {
2547                                printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
2548                                                dev->name, dev->id, i);
2549                                for (j = 0 ; j < i ; j++)
2550                                        free_irq(pci_irq_vector(dev->pdev, j),
2551                                                 &(dev->aac_msix[j]));
2552                                pci_disable_msix(dev->pdev);
2553                                ret = -1;
2554                        }
2555                }
2556        } else {
2557                dev->aac_msix[0].vector_no = 0;
2558                dev->aac_msix[0].dev = dev;
2559
2560                if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
2561                        IRQF_SHARED, "aacraid",
2562                        &(dev->aac_msix[0])) < 0) {
2563                        if (dev->msi)
2564                                pci_disable_msi(dev->pdev);
2565                        printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
2566                                        dev->name, dev->id);
2567                        ret = -1;
2568                }
2569        }
2570        return ret;
2571}
2572
2573void aac_free_irq(struct aac_dev *dev)
2574{
2575        int i;
2576
2577        if (aac_is_src(dev)) {
2578                if (dev->max_msix > 1) {
2579                        for (i = 0; i < dev->max_msix; i++)
2580                                free_irq(pci_irq_vector(dev->pdev, i),
2581                                         &(dev->aac_msix[i]));
2582                } else {
2583                        free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
2584                }
2585        } else {
2586                free_irq(dev->pdev->irq, dev);
2587        }
2588        if (dev->msi)
2589                pci_disable_msi(dev->pdev);
2590        else if (dev->max_msix > 1)
2591                pci_disable_msix(dev->pdev);
2592}
2593