linux/drivers/scsi/aacraid/commsup.c
<<
>>
Prefs
   1/*
   2 *      Adaptec AAC series RAID controller driver
   3 *      (c) Copyright 2001 Red Hat Inc.
   4 *
   5 * based on the old aacraid driver that is..
   6 * Adaptec aacraid device driver for Linux.
   7 *
   8 * Copyright (c) 2000-2010 Adaptec, Inc.
   9 *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; see the file COPYING.  If not, write to
  23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24 *
  25 * Module Name:
  26 *  commsup.c
  27 *
  28 * Abstract: Contain all routines that are required for FSA host/adapter
  29 *    communication.
  30 *
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/init.h>
  35#include <linux/types.h>
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/spinlock.h>
  39#include <linux/slab.h>
  40#include <linux/completion.h>
  41#include <linux/blkdev.h>
  42#include <linux/delay.h>
  43#include <linux/kthread.h>
  44#include <linux/interrupt.h>
  45#include <linux/semaphore.h>
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_host.h>
  48#include <scsi/scsi_device.h>
  49#include <scsi/scsi_cmnd.h>
  50
  51#include "aacraid.h"
  52
  53/**
  54 *      fib_map_alloc           -       allocate the fib objects
  55 *      @dev: Adapter to allocate for
  56 *
  57 *      Allocate and map the shared PCI space for the FIB blocks used to
  58 *      talk to the Adaptec firmware.
  59 */
  60
  61static int fib_map_alloc(struct aac_dev *dev)
  62{
  63        dprintk((KERN_INFO
  64          "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
  65          dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
  66          AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  67        dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
  68                (dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
  69                * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
  70                &dev->hw_fib_pa);
  71        if (dev->hw_fib_va == NULL)
  72                return -ENOMEM;
  73        return 0;
  74}
  75
  76/**
  77 *      aac_fib_map_free                -       free the fib objects
  78 *      @dev: Adapter to free
  79 *
  80 *      Free the PCI mappings and the memory allocated for FIB blocks
  81 *      on this adapter.
  82 */
  83
  84void aac_fib_map_free(struct aac_dev *dev)
  85{
  86        pci_free_consistent(dev->pdev,
  87          dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
  88          dev->hw_fib_va, dev->hw_fib_pa);
  89        dev->hw_fib_va = NULL;
  90        dev->hw_fib_pa = 0;
  91}
  92
  93/**
  94 *      aac_fib_setup   -       setup the fibs
  95 *      @dev: Adapter to set up
  96 *
  97 *      Allocate the PCI space for the fibs, map it and then initialise the
  98 *      fib area, the unmapped fib data and also the free list
  99 */
 100
 101int aac_fib_setup(struct aac_dev * dev)
 102{
 103        struct fib *fibptr;
 104        struct hw_fib *hw_fib;
 105        dma_addr_t hw_fib_pa;
 106        int i;
 107
 108        while (((i = fib_map_alloc(dev)) == -ENOMEM)
 109         && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
 110                dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
 111                dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
 112        }
 113        if (i<0)
 114                return -ENOMEM;
 115
 116        /* 32 byte alignment for PMC */
 117        hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
 118        dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
 119                (hw_fib_pa - dev->hw_fib_pa));
 120        dev->hw_fib_pa = hw_fib_pa;
 121        memset(dev->hw_fib_va, 0,
 122                (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
 123                (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
 124
 125        /* add Xport header */
 126        dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
 127                sizeof(struct aac_fib_xporthdr));
 128        dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
 129
 130        hw_fib = dev->hw_fib_va;
 131        hw_fib_pa = dev->hw_fib_pa;
 132        /*
 133         *      Initialise the fibs
 134         */
 135        for (i = 0, fibptr = &dev->fibs[i];
 136                i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
 137                i++, fibptr++)
 138        {
 139                fibptr->flags = 0;
 140                fibptr->dev = dev;
 141                fibptr->hw_fib_va = hw_fib;
 142                fibptr->data = (void *) fibptr->hw_fib_va->data;
 143                fibptr->next = fibptr+1;        /* Forward chain the fibs */
 144                sema_init(&fibptr->event_wait, 0);
 145                spin_lock_init(&fibptr->event_lock);
 146                hw_fib->header.XferState = cpu_to_le32(0xffffffff);
 147                hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
 148                fibptr->hw_fib_pa = hw_fib_pa;
 149                hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
 150                        dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
 151                hw_fib_pa = hw_fib_pa +
 152                        dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
 153        }
 154        /*
 155         *      Add the fib chain to the free list
 156         */
 157        dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
 158        /*
 159         *      Enable this to debug out of queue space
 160         */
 161        dev->free_fib = &dev->fibs[0];
 162        return 0;
 163}
 164
 165/**
 166 *      aac_fib_alloc   -       allocate a fib
 167 *      @dev: Adapter to allocate the fib for
 168 *
 169 *      Allocate a fib from the adapter fib pool. If the pool is empty we
 170 *      return NULL.
 171 */
 172
 173struct fib *aac_fib_alloc(struct aac_dev *dev)
 174{
 175        struct fib * fibptr;
 176        unsigned long flags;
 177        spin_lock_irqsave(&dev->fib_lock, flags);
 178        fibptr = dev->free_fib;
 179        if(!fibptr){
 180                spin_unlock_irqrestore(&dev->fib_lock, flags);
 181                return fibptr;
 182        }
 183        dev->free_fib = fibptr->next;
 184        spin_unlock_irqrestore(&dev->fib_lock, flags);
 185        /*
 186         *      Set the proper node type code and node byte size
 187         */
 188        fibptr->type = FSAFS_NTC_FIB_CONTEXT;
 189        fibptr->size = sizeof(struct fib);
 190        /*
 191         *      Null out fields that depend on being zero at the start of
 192         *      each I/O
 193         */
 194        fibptr->hw_fib_va->header.XferState = 0;
 195        fibptr->flags = 0;
 196        fibptr->callback = NULL;
 197        fibptr->callback_data = NULL;
 198
 199        return fibptr;
 200}
 201
 202/**
 203 *      aac_fib_free    -       free a fib
 204 *      @fibptr: fib to free up
 205 *
 206 *      Frees up a fib and places it on the appropriate queue
 207 */
 208
 209void aac_fib_free(struct fib *fibptr)
 210{
 211        unsigned long flags;
 212
 213        if (fibptr->done == 2)
 214                return;
 215
 216        spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
 217        if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 218                aac_config.fib_timeouts++;
 219        if (fibptr->hw_fib_va->header.XferState != 0) {
 220                printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
 221                         (void*)fibptr,
 222                         le32_to_cpu(fibptr->hw_fib_va->header.XferState));
 223        }
 224        fibptr->next = fibptr->dev->free_fib;
 225        fibptr->dev->free_fib = fibptr;
 226        spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
 227}
 228
 229/**
 230 *      aac_fib_init    -       initialise a fib
 231 *      @fibptr: The fib to initialize
 232 *
 233 *      Set up the generic fib fields ready for use
 234 */
 235
 236void aac_fib_init(struct fib *fibptr)
 237{
 238        struct hw_fib *hw_fib = fibptr->hw_fib_va;
 239
 240        memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
 241        hw_fib->header.StructType = FIB_MAGIC;
 242        hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
 243        hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
 244        hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
 245        hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
 246}
 247
 248/**
 249 *      fib_deallocate          -       deallocate a fib
 250 *      @fibptr: fib to deallocate
 251 *
 252 *      Will deallocate and return to the free pool the FIB pointed to by the
 253 *      caller.
 254 */
 255
 256static void fib_dealloc(struct fib * fibptr)
 257{
 258        struct hw_fib *hw_fib = fibptr->hw_fib_va;
 259        hw_fib->header.XferState = 0;
 260}
 261
 262/*
 263 *      Commuication primitives define and support the queuing method we use to
 264 *      support host to adapter commuication. All queue accesses happen through
 265 *      these routines and are the only routines which have a knowledge of the
 266 *       how these queues are implemented.
 267 */
 268
 269/**
 270 *      aac_get_entry           -       get a queue entry
 271 *      @dev: Adapter
 272 *      @qid: Queue Number
 273 *      @entry: Entry return
 274 *      @index: Index return
 275 *      @nonotify: notification control
 276 *
 277 *      With a priority the routine returns a queue entry if the queue has free entries. If the queue
 278 *      is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
 279 *      returned.
 280 */
 281
 282static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
 283{
 284        struct aac_queue * q;
 285        unsigned long idx;
 286
 287        /*
 288         *      All of the queues wrap when they reach the end, so we check
 289         *      to see if they have reached the end and if they have we just
 290         *      set the index back to zero. This is a wrap. You could or off
 291         *      the high bits in all updates but this is a bit faster I think.
 292         */
 293
 294        q = &dev->queues->queue[qid];
 295
 296        idx = *index = le32_to_cpu(*(q->headers.producer));
 297        /* Interrupt Moderation, only interrupt for first two entries */
 298        if (idx != le32_to_cpu(*(q->headers.consumer))) {
 299                if (--idx == 0) {
 300                        if (qid == AdapNormCmdQueue)
 301                                idx = ADAP_NORM_CMD_ENTRIES;
 302                        else
 303                                idx = ADAP_NORM_RESP_ENTRIES;
 304                }
 305                if (idx != le32_to_cpu(*(q->headers.consumer)))
 306                        *nonotify = 1;
 307        }
 308
 309        if (qid == AdapNormCmdQueue) {
 310                if (*index >= ADAP_NORM_CMD_ENTRIES)
 311                        *index = 0; /* Wrap to front of the Producer Queue. */
 312        } else {
 313                if (*index >= ADAP_NORM_RESP_ENTRIES)
 314                        *index = 0; /* Wrap to front of the Producer Queue. */
 315        }
 316
 317        /* Queue is full */
 318        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
 319                printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
 320                                qid, atomic_read(&q->numpending));
 321                return 0;
 322        } else {
 323                *entry = q->base + *index;
 324                return 1;
 325        }
 326}
 327
 328/**
 329 *      aac_queue_get           -       get the next free QE
 330 *      @dev: Adapter
 331 *      @index: Returned index
 332 *      @priority: Priority of fib
 333 *      @fib: Fib to associate with the queue entry
 334 *      @wait: Wait if queue full
 335 *      @fibptr: Driver fib object to go with fib
 336 *      @nonotify: Don't notify the adapter
 337 *
 338 *      Gets the next free QE off the requested priorty adapter command
 339 *      queue and associates the Fib with the QE. The QE represented by
 340 *      index is ready to insert on the queue when this routine returns
 341 *      success.
 342 */
 343
 344int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
 345{
 346        struct aac_entry * entry = NULL;
 347        int map = 0;
 348
 349        if (qid == AdapNormCmdQueue) {
 350                /*  if no entries wait for some if caller wants to */
 351                while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
 352                        printk(KERN_ERR "GetEntries failed\n");
 353                }
 354                /*
 355                 *      Setup queue entry with a command, status and fib mapped
 356                 */
 357                entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
 358                map = 1;
 359        } else {
 360                while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
 361                        /* if no entries wait for some if caller wants to */
 362                }
 363                /*
 364                 *      Setup queue entry with command, status and fib mapped
 365                 */
 366                entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
 367                entry->addr = hw_fib->header.SenderFibAddress;
 368                        /* Restore adapters pointer to the FIB */
 369                hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress;  /* Let the adapter now where to find its data */
 370                map = 0;
 371        }
 372        /*
 373         *      If MapFib is true than we need to map the Fib and put pointers
 374         *      in the queue entry.
 375         */
 376        if (map)
 377                entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
 378        return 0;
 379}
 380
 381/*
 382 *      Define the highest level of host to adapter communication routines.
 383 *      These routines will support host to adapter FS commuication. These
 384 *      routines have no knowledge of the commuication method used. This level
 385 *      sends and receives FIBs. This level has no knowledge of how these FIBs
 386 *      get passed back and forth.
 387 */
 388
 389/**
 390 *      aac_fib_send    -       send a fib to the adapter
 391 *      @command: Command to send
 392 *      @fibptr: The fib
 393 *      @size: Size of fib data area
 394 *      @priority: Priority of Fib
 395 *      @wait: Async/sync select
 396 *      @reply: True if a reply is wanted
 397 *      @callback: Called with reply
 398 *      @callback_data: Passed to callback
 399 *
 400 *      Sends the requested FIB to the adapter and optionally will wait for a
 401 *      response FIB. If the caller does not wish to wait for a response than
 402 *      an event to wait on must be supplied. This event will be set when a
 403 *      response FIB is received from the adapter.
 404 */
 405
 406int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
 407                int priority, int wait, int reply, fib_callback callback,
 408                void *callback_data)
 409{
 410        struct aac_dev * dev = fibptr->dev;
 411        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 412        unsigned long flags = 0;
 413        unsigned long mflags = 0;
 414        unsigned long sflags = 0;
 415
 416
 417        if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
 418                return -EBUSY;
 419        /*
 420         *      There are 5 cases with the wait and response requested flags.
 421         *      The only invalid cases are if the caller requests to wait and
 422         *      does not request a response and if the caller does not want a
 423         *      response and the Fib is not allocated from pool. If a response
 424         *      is not requesed the Fib will just be deallocaed by the DPC
 425         *      routine when the response comes back from the adapter. No
 426         *      further processing will be done besides deleting the Fib. We
 427         *      will have a debug mode where the adapter can notify the host
 428         *      it had a problem and the host can log that fact.
 429         */
 430        fibptr->flags = 0;
 431        if (wait && !reply) {
 432                return -EINVAL;
 433        } else if (!wait && reply) {
 434                hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
 435                FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
 436        } else if (!wait && !reply) {
 437                hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
 438                FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
 439        } else if (wait && reply) {
 440                hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
 441                FIB_COUNTER_INCREMENT(aac_config.NormalSent);
 442        }
 443        /*
 444         *      Map the fib into 32bits by using the fib number
 445         */
 446
 447        hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
 448        hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
 449        /*
 450         *      Set FIB state to indicate where it came from and if we want a
 451         *      response from the adapter. Also load the command from the
 452         *      caller.
 453         *
 454         *      Map the hw fib pointer as a 32bit value
 455         */
 456        hw_fib->header.Command = cpu_to_le16(command);
 457        hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
 458        /*
 459         *      Set the size of the Fib we want to send to the adapter
 460         */
 461        hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
 462        if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
 463                return -EMSGSIZE;
 464        }
 465        /*
 466         *      Get a queue entry connect the FIB to it and send an notify
 467         *      the adapter a command is ready.
 468         */
 469        hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
 470
 471        /*
 472         *      Fill in the Callback and CallbackContext if we are not
 473         *      going to wait.
 474         */
 475        if (!wait) {
 476                fibptr->callback = callback;
 477                fibptr->callback_data = callback_data;
 478                fibptr->flags = FIB_CONTEXT_FLAG;
 479        }
 480
 481        fibptr->done = 0;
 482
 483        FIB_COUNTER_INCREMENT(aac_config.FibsSent);
 484
 485        dprintk((KERN_DEBUG "Fib contents:.\n"));
 486        dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
 487        dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
 488        dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
 489        dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
 490        dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
 491        dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
 492
 493        if (!dev->queues)
 494                return -EBUSY;
 495
 496        if (wait) {
 497
 498                spin_lock_irqsave(&dev->manage_lock, mflags);
 499                if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
 500                        printk(KERN_INFO "No management Fibs Available:%d\n",
 501                                                dev->management_fib_count);
 502                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 503                        return -EBUSY;
 504                }
 505                dev->management_fib_count++;
 506                spin_unlock_irqrestore(&dev->manage_lock, mflags);
 507                spin_lock_irqsave(&fibptr->event_lock, flags);
 508        }
 509
 510        if (dev->sync_mode) {
 511                if (wait)
 512                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 513                spin_lock_irqsave(&dev->sync_lock, sflags);
 514                if (dev->sync_fib) {
 515                        list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
 516                        spin_unlock_irqrestore(&dev->sync_lock, sflags);
 517                } else {
 518                        dev->sync_fib = fibptr;
 519                        spin_unlock_irqrestore(&dev->sync_lock, sflags);
 520                        aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
 521                                (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
 522                                NULL, NULL, NULL, NULL, NULL);
 523                }
 524                if (wait) {
 525                        fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
 526                        if (down_interruptible(&fibptr->event_wait)) {
 527                                fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
 528                                return -EFAULT;
 529                        }
 530                        return 0;
 531                }
 532                return -EINPROGRESS;
 533        }
 534
 535        if (aac_adapter_deliver(fibptr) != 0) {
 536                printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
 537                if (wait) {
 538                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 539                        spin_lock_irqsave(&dev->manage_lock, mflags);
 540                        dev->management_fib_count--;
 541                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 542                }
 543                return -EBUSY;
 544        }
 545
 546
 547        /*
 548         *      If the caller wanted us to wait for response wait now.
 549         */
 550
 551        if (wait) {
 552                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 553                /* Only set for first known interruptable command */
 554                if (wait < 0) {
 555                        /*
 556                         * *VERY* Dangerous to time out a command, the
 557                         * assumption is made that we have no hope of
 558                         * functioning because an interrupt routing or other
 559                         * hardware failure has occurred.
 560                         */
 561                        unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
 562                        while (down_trylock(&fibptr->event_wait)) {
 563                                int blink;
 564                                if (time_is_before_eq_jiffies(timeout)) {
 565                                        struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
 566                                        atomic_dec(&q->numpending);
 567                                        if (wait == -1) {
 568                                                printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
 569                                                  "Usually a result of a PCI interrupt routing problem;\n"
 570                                                  "update mother board BIOS or consider utilizing one of\n"
 571                                                  "the SAFE mode kernel options (acpi, apic etc)\n");
 572                                        }
 573                                        return -ETIMEDOUT;
 574                                }
 575                                if ((blink = aac_adapter_check_health(dev)) > 0) {
 576                                        if (wait == -1) {
 577                                                printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
 578                                                  "Usually a result of a serious unrecoverable hardware problem\n",
 579                                                  blink);
 580                                        }
 581                                        return -EFAULT;
 582                                }
 583                                /* We used to udelay() here but that absorbed
 584                                 * a CPU when a timeout occured. Not very
 585                                 * useful. */
 586                                cpu_relax();
 587                        }
 588                } else if (down_interruptible(&fibptr->event_wait)) {
 589                        /* Do nothing ... satisfy
 590                         * down_interruptible must_check */
 591                }
 592
 593                spin_lock_irqsave(&fibptr->event_lock, flags);
 594                if (fibptr->done == 0) {
 595                        fibptr->done = 2; /* Tell interrupt we aborted */
 596                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 597                        return -ERESTARTSYS;
 598                }
 599                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 600                BUG_ON(fibptr->done == 0);
 601
 602                if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 603                        return -ETIMEDOUT;
 604                return 0;
 605        }
 606        /*
 607         *      If the user does not want a response than return success otherwise
 608         *      return pending
 609         */
 610        if (reply)
 611                return -EINPROGRESS;
 612        else
 613                return 0;
 614}
 615
 616/**
 617 *      aac_consumer_get        -       get the top of the queue
 618 *      @dev: Adapter
 619 *      @q: Queue
 620 *      @entry: Return entry
 621 *
 622 *      Will return a pointer to the entry on the top of the queue requested that
 623 *      we are a consumer of, and return the address of the queue entry. It does
 624 *      not change the state of the queue.
 625 */
 626
 627int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
 628{
 629        u32 index;
 630        int status;
 631        if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
 632                status = 0;
 633        } else {
 634                /*
 635                 *      The consumer index must be wrapped if we have reached
 636                 *      the end of the queue, else we just use the entry
 637                 *      pointed to by the header index
 638                 */
 639                if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 640                        index = 0;
 641                else
 642                        index = le32_to_cpu(*q->headers.consumer);
 643                *entry = q->base + index;
 644                status = 1;
 645        }
 646        return(status);
 647}
 648
 649/**
 650 *      aac_consumer_free       -       free consumer entry
 651 *      @dev: Adapter
 652 *      @q: Queue
 653 *      @qid: Queue ident
 654 *
 655 *      Frees up the current top of the queue we are a consumer of. If the
 656 *      queue was full notify the producer that the queue is no longer full.
 657 */
 658
 659void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
 660{
 661        int wasfull = 0;
 662        u32 notify;
 663
 664        if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
 665                wasfull = 1;
 666
 667        if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 668                *q->headers.consumer = cpu_to_le32(1);
 669        else
 670                le32_add_cpu(q->headers.consumer, 1);
 671
 672        if (wasfull) {
 673                switch (qid) {
 674
 675                case HostNormCmdQueue:
 676                        notify = HostNormCmdNotFull;
 677                        break;
 678                case HostNormRespQueue:
 679                        notify = HostNormRespNotFull;
 680                        break;
 681                default:
 682                        BUG();
 683                        return;
 684                }
 685                aac_adapter_notify(dev, notify);
 686        }
 687}
 688
 689/**
 690 *      aac_fib_adapter_complete        -       complete adapter issued fib
 691 *      @fibptr: fib to complete
 692 *      @size: size of fib
 693 *
 694 *      Will do all necessary work to complete a FIB that was sent from
 695 *      the adapter.
 696 */
 697
 698int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
 699{
 700        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 701        struct aac_dev * dev = fibptr->dev;
 702        struct aac_queue * q;
 703        unsigned long nointr = 0;
 704        unsigned long qflags;
 705
 706        if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
 707            dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
 708                kfree(hw_fib);
 709                return 0;
 710        }
 711
 712        if (hw_fib->header.XferState == 0) {
 713                if (dev->comm_interface == AAC_COMM_MESSAGE)
 714                        kfree(hw_fib);
 715                return 0;
 716        }
 717        /*
 718         *      If we plan to do anything check the structure type first.
 719         */
 720        if (hw_fib->header.StructType != FIB_MAGIC &&
 721            hw_fib->header.StructType != FIB_MAGIC2 &&
 722            hw_fib->header.StructType != FIB_MAGIC2_64) {
 723                if (dev->comm_interface == AAC_COMM_MESSAGE)
 724                        kfree(hw_fib);
 725                return -EINVAL;
 726        }
 727        /*
 728         *      This block handles the case where the adapter had sent us a
 729         *      command and we have finished processing the command. We
 730         *      call completeFib when we are done processing the command
 731         *      and want to send a response back to the adapter. This will
 732         *      send the completed cdb to the adapter.
 733         */
 734        if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
 735                if (dev->comm_interface == AAC_COMM_MESSAGE) {
 736                        kfree (hw_fib);
 737                } else {
 738                        u32 index;
 739                        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
 740                        if (size) {
 741                                size += sizeof(struct aac_fibhdr);
 742                                if (size > le16_to_cpu(hw_fib->header.SenderSize))
 743                                        return -EMSGSIZE;
 744                                hw_fib->header.Size = cpu_to_le16(size);
 745                        }
 746                        q = &dev->queues->queue[AdapNormRespQueue];
 747                        spin_lock_irqsave(q->lock, qflags);
 748                        aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
 749                        *(q->headers.producer) = cpu_to_le32(index + 1);
 750                        spin_unlock_irqrestore(q->lock, qflags);
 751                        if (!(nointr & (int)aac_config.irq_mod))
 752                                aac_adapter_notify(dev, AdapNormRespQueue);
 753                }
 754        } else {
 755                printk(KERN_WARNING "aac_fib_adapter_complete: "
 756                        "Unknown xferstate detected.\n");
 757                BUG();
 758        }
 759        return 0;
 760}
 761
 762/**
 763 *      aac_fib_complete        -       fib completion handler
 764 *      @fib: FIB to complete
 765 *
 766 *      Will do all necessary work to complete a FIB.
 767 */
 768
 769int aac_fib_complete(struct fib *fibptr)
 770{
 771        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 772
 773        /*
 774         *      Check for a fib which has already been completed
 775         */
 776
 777        if (hw_fib->header.XferState == 0)
 778                return 0;
 779        /*
 780         *      If we plan to do anything check the structure type first.
 781         */
 782
 783        if (hw_fib->header.StructType != FIB_MAGIC &&
 784            hw_fib->header.StructType != FIB_MAGIC2 &&
 785            hw_fib->header.StructType != FIB_MAGIC2_64)
 786                return -EINVAL;
 787        /*
 788         *      This block completes a cdb which orginated on the host and we
 789         *      just need to deallocate the cdb or reinit it. At this point the
 790         *      command is complete that we had sent to the adapter and this
 791         *      cdb could be reused.
 792         */
 793
 794        if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
 795                (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
 796        {
 797                fib_dealloc(fibptr);
 798        }
 799        else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
 800        {
 801                /*
 802                 *      This handles the case when the host has aborted the I/O
 803                 *      to the adapter because the adapter is not responding
 804                 */
 805                fib_dealloc(fibptr);
 806        } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
 807                fib_dealloc(fibptr);
 808        } else {
 809                BUG();
 810        }
 811        return 0;
 812}
 813
 814/**
 815 *      aac_printf      -       handle printf from firmware
 816 *      @dev: Adapter
 817 *      @val: Message info
 818 *
 819 *      Print a message passed to us by the controller firmware on the
 820 *      Adaptec board
 821 */
 822
 823void aac_printf(struct aac_dev *dev, u32 val)
 824{
 825        char *cp = dev->printfbuf;
 826        if (dev->printf_enabled)
 827        {
 828                int length = val & 0xffff;
 829                int level = (val >> 16) & 0xffff;
 830
 831                /*
 832                 *      The size of the printfbuf is set in port.c
 833                 *      There is no variable or define for it
 834                 */
 835                if (length > 255)
 836                        length = 255;
 837                if (cp[length] != 0)
 838                        cp[length] = 0;
 839                if (level == LOG_AAC_HIGH_ERROR)
 840                        printk(KERN_WARNING "%s:%s", dev->name, cp);
 841                else
 842                        printk(KERN_INFO "%s:%s", dev->name, cp);
 843        }
 844        memset(cp, 0, 256);
 845}
 846
 847
 848/**
 849 *      aac_handle_aif          -       Handle a message from the firmware
 850 *      @dev: Which adapter this fib is from
 851 *      @fibptr: Pointer to fibptr from adapter
 852 *
 853 *      This routine handles a driver notify fib from the adapter and
 854 *      dispatches it to the appropriate routine for handling.
 855 */
 856
 857#define AIF_SNIFF_TIMEOUT       (500*HZ)
 858static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
 859{
 860        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 861        struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
 862        u32 channel, id, lun, container;
 863        struct scsi_device *device;
 864        enum {
 865                NOTHING,
 866                DELETE,
 867                ADD,
 868                CHANGE
 869        } device_config_needed = NOTHING;
 870
 871        /* Sniff for container changes */
 872
 873        if (!dev || !dev->fsa_dev)
 874                return;
 875        container = channel = id = lun = (u32)-1;
 876
 877        /*
 878         *      We have set this up to try and minimize the number of
 879         * re-configures that take place. As a result of this when
 880         * certain AIF's come in we will set a flag waiting for another
 881         * type of AIF before setting the re-config flag.
 882         */
 883        switch (le32_to_cpu(aifcmd->command)) {
 884        case AifCmdDriverNotify:
 885                switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
 886                case AifRawDeviceRemove:
 887                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
 888                        if ((container >> 28)) {
 889                                container = (u32)-1;
 890                                break;
 891                        }
 892                        channel = (container >> 24) & 0xF;
 893                        if (channel >= dev->maximum_num_channels) {
 894                                container = (u32)-1;
 895                                break;
 896                        }
 897                        id = container & 0xFFFF;
 898                        if (id >= dev->maximum_num_physicals) {
 899                                container = (u32)-1;
 900                                break;
 901                        }
 902                        lun = (container >> 16) & 0xFF;
 903                        container = (u32)-1;
 904                        channel = aac_phys_to_logical(channel);
 905                        device_config_needed =
 906                          (((__le32 *)aifcmd->data)[0] ==
 907                            cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
 908
 909                        if (device_config_needed == ADD) {
 910                                device = scsi_device_lookup(
 911                                        dev->scsi_host_ptr,
 912                                        channel, id, lun);
 913                                if (device) {
 914                                        scsi_remove_device(device);
 915                                        scsi_device_put(device);
 916                                }
 917                        }
 918                        break;
 919                /*
 920                 *      Morph or Expand complete
 921                 */
 922                case AifDenMorphComplete:
 923                case AifDenVolumeExtendComplete:
 924                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
 925                        if (container >= dev->maximum_num_containers)
 926                                break;
 927
 928                        /*
 929                         *      Find the scsi_device associated with the SCSI
 930                         * address. Make sure we have the right array, and if
 931                         * so set the flag to initiate a new re-config once we
 932                         * see an AifEnConfigChange AIF come through.
 933                         */
 934
 935                        if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
 936                                device = scsi_device_lookup(dev->scsi_host_ptr,
 937                                        CONTAINER_TO_CHANNEL(container),
 938                                        CONTAINER_TO_ID(container),
 939                                        CONTAINER_TO_LUN(container));
 940                                if (device) {
 941                                        dev->fsa_dev[container].config_needed = CHANGE;
 942                                        dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
 943                                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
 944                                        scsi_device_put(device);
 945                                }
 946                        }
 947                }
 948
 949                /*
 950                 *      If we are waiting on something and this happens to be
 951                 * that thing then set the re-configure flag.
 952                 */
 953                if (container != (u32)-1) {
 954                        if (container >= dev->maximum_num_containers)
 955                                break;
 956                        if ((dev->fsa_dev[container].config_waiting_on ==
 957                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
 958                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
 959                                dev->fsa_dev[container].config_waiting_on = 0;
 960                } else for (container = 0;
 961                    container < dev->maximum_num_containers; ++container) {
 962                        if ((dev->fsa_dev[container].config_waiting_on ==
 963                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
 964                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
 965                                dev->fsa_dev[container].config_waiting_on = 0;
 966                }
 967                break;
 968
 969        case AifCmdEventNotify:
 970                switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
 971                case AifEnBatteryEvent:
 972                        dev->cache_protected =
 973                                (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
 974                        break;
 975                /*
 976                 *      Add an Array.
 977                 */
 978                case AifEnAddContainer:
 979                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
 980                        if (container >= dev->maximum_num_containers)
 981                                break;
 982                        dev->fsa_dev[container].config_needed = ADD;
 983                        dev->fsa_dev[container].config_waiting_on =
 984                                AifEnConfigChange;
 985                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
 986                        break;
 987
 988                /*
 989                 *      Delete an Array.
 990                 */
 991                case AifEnDeleteContainer:
 992                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
 993                        if (container >= dev->maximum_num_containers)
 994                                break;
 995                        dev->fsa_dev[container].config_needed = DELETE;
 996                        dev->fsa_dev[container].config_waiting_on =
 997                                AifEnConfigChange;
 998                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
 999                        break;
1000
1001                /*
1002                 *      Container change detected. If we currently are not
1003                 * waiting on something else, setup to wait on a Config Change.
1004                 */
1005                case AifEnContainerChange:
1006                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1007                        if (container >= dev->maximum_num_containers)
1008                                break;
1009                        if (dev->fsa_dev[container].config_waiting_on &&
1010                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1011                                break;
1012                        dev->fsa_dev[container].config_needed = CHANGE;
1013                        dev->fsa_dev[container].config_waiting_on =
1014                                AifEnConfigChange;
1015                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1016                        break;
1017
1018                case AifEnConfigChange:
1019                        break;
1020
1021                case AifEnAddJBOD:
1022                case AifEnDeleteJBOD:
1023                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1024                        if ((container >> 28)) {
1025                                container = (u32)-1;
1026                                break;
1027                        }
1028                        channel = (container >> 24) & 0xF;
1029                        if (channel >= dev->maximum_num_channels) {
1030                                container = (u32)-1;
1031                                break;
1032                        }
1033                        id = container & 0xFFFF;
1034                        if (id >= dev->maximum_num_physicals) {
1035                                container = (u32)-1;
1036                                break;
1037                        }
1038                        lun = (container >> 16) & 0xFF;
1039                        container = (u32)-1;
1040                        channel = aac_phys_to_logical(channel);
1041                        device_config_needed =
1042                          (((__le32 *)aifcmd->data)[0] ==
1043                            cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
1044                        if (device_config_needed == ADD) {
1045                                device = scsi_device_lookup(dev->scsi_host_ptr,
1046                                        channel,
1047                                        id,
1048                                        lun);
1049                                if (device) {
1050                                        scsi_remove_device(device);
1051                                        scsi_device_put(device);
1052                                }
1053                        }
1054                        break;
1055
1056                case AifEnEnclosureManagement:
1057                        /*
1058                         * If in JBOD mode, automatic exposure of new
1059                         * physical target to be suppressed until configured.
1060                         */
1061                        if (dev->jbod)
1062                                break;
1063                        switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
1064                        case EM_DRIVE_INSERTION:
1065                        case EM_DRIVE_REMOVAL:
1066                        case EM_SES_DRIVE_INSERTION:
1067                        case EM_SES_DRIVE_REMOVAL:
1068                                container = le32_to_cpu(
1069                                        ((__le32 *)aifcmd->data)[2]);
1070                                if ((container >> 28)) {
1071                                        container = (u32)-1;
1072                                        break;
1073                                }
1074                                channel = (container >> 24) & 0xF;
1075                                if (channel >= dev->maximum_num_channels) {
1076                                        container = (u32)-1;
1077                                        break;
1078                                }
1079                                id = container & 0xFFFF;
1080                                lun = (container >> 16) & 0xFF;
1081                                container = (u32)-1;
1082                                if (id >= dev->maximum_num_physicals) {
1083                                        /* legacy dev_t ? */
1084                                        if ((0x2000 <= id) || lun || channel ||
1085                                          ((channel = (id >> 7) & 0x3F) >=
1086                                          dev->maximum_num_channels))
1087                                                break;
1088                                        lun = (id >> 4) & 7;
1089                                        id &= 0xF;
1090                                }
1091                                channel = aac_phys_to_logical(channel);
1092                                device_config_needed =
1093                                  ((((__le32 *)aifcmd->data)[3]
1094                                    == cpu_to_le32(EM_DRIVE_INSERTION)) ||
1095                                    (((__le32 *)aifcmd->data)[3]
1096                                    == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
1097                                  ADD : DELETE;
1098                                break;
1099                        }
1100                        break;
1101                }
1102
1103                /*
1104                 *      If we are waiting on something and this happens to be
1105                 * that thing then set the re-configure flag.
1106                 */
1107                if (container != (u32)-1) {
1108                        if (container >= dev->maximum_num_containers)
1109                                break;
1110                        if ((dev->fsa_dev[container].config_waiting_on ==
1111                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1112                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1113                                dev->fsa_dev[container].config_waiting_on = 0;
1114                } else for (container = 0;
1115                    container < dev->maximum_num_containers; ++container) {
1116                        if ((dev->fsa_dev[container].config_waiting_on ==
1117                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1118                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1119                                dev->fsa_dev[container].config_waiting_on = 0;
1120                }
1121                break;
1122
1123        case AifCmdJobProgress:
1124                /*
1125                 *      These are job progress AIF's. When a Clear is being
1126                 * done on a container it is initially created then hidden from
1127                 * the OS. When the clear completes we don't get a config
1128                 * change so we monitor the job status complete on a clear then
1129                 * wait for a container change.
1130                 */
1131
1132                if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1133                    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1134                     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1135                        for (container = 0;
1136                            container < dev->maximum_num_containers;
1137                            ++container) {
1138                                /*
1139                                 * Stomp on all config sequencing for all
1140                                 * containers?
1141                                 */
1142                                dev->fsa_dev[container].config_waiting_on =
1143                                        AifEnContainerChange;
1144                                dev->fsa_dev[container].config_needed = ADD;
1145                                dev->fsa_dev[container].config_waiting_stamp =
1146                                        jiffies;
1147                        }
1148                }
1149                if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1150                    ((__le32 *)aifcmd->data)[6] == 0 &&
1151                    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1152                        for (container = 0;
1153                            container < dev->maximum_num_containers;
1154                            ++container) {
1155                                /*
1156                                 * Stomp on all config sequencing for all
1157                                 * containers?
1158                                 */
1159                                dev->fsa_dev[container].config_waiting_on =
1160                                        AifEnContainerChange;
1161                                dev->fsa_dev[container].config_needed = DELETE;
1162                                dev->fsa_dev[container].config_waiting_stamp =
1163                                        jiffies;
1164                        }
1165                }
1166                break;
1167        }
1168
1169        container = 0;
1170retry_next:
1171        if (device_config_needed == NOTHING)
1172        for (; container < dev->maximum_num_containers; ++container) {
1173                if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1174                        (dev->fsa_dev[container].config_needed != NOTHING) &&
1175                        time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1176                        device_config_needed =
1177                                dev->fsa_dev[container].config_needed;
1178                        dev->fsa_dev[container].config_needed = NOTHING;
1179                        channel = CONTAINER_TO_CHANNEL(container);
1180                        id = CONTAINER_TO_ID(container);
1181                        lun = CONTAINER_TO_LUN(container);
1182                        break;
1183                }
1184        }
1185        if (device_config_needed == NOTHING)
1186                return;
1187
1188        /*
1189         *      If we decided that a re-configuration needs to be done,
1190         * schedule it here on the way out the door, please close the door
1191         * behind you.
1192         */
1193
1194        /*
1195         *      Find the scsi_device associated with the SCSI address,
1196         * and mark it as changed, invalidating the cache. This deals
1197         * with changes to existing device IDs.
1198         */
1199
1200        if (!dev || !dev->scsi_host_ptr)
1201                return;
1202        /*
1203         * force reload of disk info via aac_probe_container
1204         */
1205        if ((channel == CONTAINER_CHANNEL) &&
1206          (device_config_needed != NOTHING)) {
1207                if (dev->fsa_dev[container].valid == 1)
1208                        dev->fsa_dev[container].valid = 2;
1209                aac_probe_container(dev, container);
1210        }
1211        device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1212        if (device) {
1213                switch (device_config_needed) {
1214                case DELETE:
1215#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1216                        scsi_remove_device(device);
1217#else
1218                        if (scsi_device_online(device)) {
1219                                scsi_device_set_state(device, SDEV_OFFLINE);
1220                                sdev_printk(KERN_INFO, device,
1221                                        "Device offlined - %s\n",
1222                                        (channel == CONTAINER_CHANNEL) ?
1223                                                "array deleted" :
1224                                                "enclosure services event");
1225                        }
1226#endif
1227                        break;
1228                case ADD:
1229                        if (!scsi_device_online(device)) {
1230                                sdev_printk(KERN_INFO, device,
1231                                        "Device online - %s\n",
1232                                        (channel == CONTAINER_CHANNEL) ?
1233                                                "array created" :
1234                                                "enclosure services event");
1235                                scsi_device_set_state(device, SDEV_RUNNING);
1236                        }
1237                        /* FALLTHRU */
1238                case CHANGE:
1239                        if ((channel == CONTAINER_CHANNEL)
1240                         && (!dev->fsa_dev[container].valid)) {
1241#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1242                                scsi_remove_device(device);
1243#else
1244                                if (!scsi_device_online(device))
1245                                        break;
1246                                scsi_device_set_state(device, SDEV_OFFLINE);
1247                                sdev_printk(KERN_INFO, device,
1248                                        "Device offlined - %s\n",
1249                                        "array failed");
1250#endif
1251                                break;
1252                        }
1253                        scsi_rescan_device(&device->sdev_gendev);
1254
1255                default:
1256                        break;
1257                }
1258                scsi_device_put(device);
1259                device_config_needed = NOTHING;
1260        }
1261        if (device_config_needed == ADD)
1262                scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1263        if (channel == CONTAINER_CHANNEL) {
1264                container++;
1265                device_config_needed = NOTHING;
1266                goto retry_next;
1267        }
1268}
1269
1270static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1271{
1272        int index, quirks;
1273        int retval, i;
1274        struct Scsi_Host *host;
1275        struct scsi_device *dev;
1276        struct scsi_cmnd *command;
1277        struct scsi_cmnd *command_list;
1278        int jafo = 0;
1279        int cpu;
1280
1281        /*
1282         * Assumptions:
1283         *      - host is locked, unless called by the aacraid thread.
1284         *        (a matter of convenience, due to legacy issues surrounding
1285         *        eh_host_adapter_reset).
1286         *      - in_reset is asserted, so no new i/o is getting to the
1287         *        card.
1288         *      - The card is dead, or will be very shortly ;-/ so no new
1289         *        commands are completing in the interrupt service.
1290         */
1291        host = aac->scsi_host_ptr;
1292        scsi_block_requests(host);
1293        aac_adapter_disable_int(aac);
1294        if (aac->thread->pid != current->pid) {
1295                spin_unlock_irq(host->host_lock);
1296                kthread_stop(aac->thread);
1297                jafo = 1;
1298        }
1299
1300        /*
1301         *      If a positive health, means in a known DEAD PANIC
1302         * state and the adapter could be reset to `try again'.
1303         */
1304        retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
1305
1306        if (retval)
1307                goto out;
1308
1309        /*
1310         *      Loop through the fibs, close the synchronous FIBS
1311         */
1312        for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1313                struct fib *fib = &aac->fibs[index];
1314                if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1315                  (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1316                        unsigned long flagv;
1317                        spin_lock_irqsave(&fib->event_lock, flagv);
1318                        up(&fib->event_wait);
1319                        spin_unlock_irqrestore(&fib->event_lock, flagv);
1320                        schedule();
1321                        retval = 0;
1322                }
1323        }
1324        /* Give some extra time for ioctls to complete. */
1325        if (retval == 0)
1326                ssleep(2);
1327        index = aac->cardtype;
1328
1329        /*
1330         * Re-initialize the adapter, first free resources, then carefully
1331         * apply the initialization sequence to come back again. Only risk
1332         * is a change in Firmware dropping cache, it is assumed the caller
1333         * will ensure that i/o is queisced and the card is flushed in that
1334         * case.
1335         */
1336        aac_fib_map_free(aac);
1337        pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1338        aac->comm_addr = NULL;
1339        aac->comm_phys = 0;
1340        kfree(aac->queues);
1341        aac->queues = NULL;
1342        cpu = cpumask_first(cpu_online_mask);
1343        if (aac->pdev->device == PMC_DEVICE_S6 ||
1344            aac->pdev->device == PMC_DEVICE_S7 ||
1345            aac->pdev->device == PMC_DEVICE_S8 ||
1346            aac->pdev->device == PMC_DEVICE_S9) {
1347                if (aac->max_msix > 1) {
1348                        for (i = 0; i < aac->max_msix; i++) {
1349                                if (irq_set_affinity_hint(
1350                                    aac->msixentry[i].vector,
1351                                    NULL)) {
1352                                        printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
1353                                                aac->name,
1354                                                aac->id,
1355                                                cpu);
1356                                }
1357                                cpu = cpumask_next(cpu,
1358                                                cpu_online_mask);
1359                                free_irq(aac->msixentry[i].vector,
1360                                         &(aac->aac_msix[i]));
1361                        }
1362                        pci_disable_msix(aac->pdev);
1363                } else {
1364                        free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
1365                }
1366        } else {
1367                free_irq(aac->pdev->irq, aac);
1368        }
1369        if (aac->msi)
1370                pci_disable_msi(aac->pdev);
1371        kfree(aac->fsa_dev);
1372        aac->fsa_dev = NULL;
1373        quirks = aac_get_driver_ident(index)->quirks;
1374        if (quirks & AAC_QUIRK_31BIT) {
1375                if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
1376                  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
1377                        goto out;
1378        } else {
1379                if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
1380                  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
1381                        goto out;
1382        }
1383        if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1384                goto out;
1385        if (quirks & AAC_QUIRK_31BIT)
1386                if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
1387                        goto out;
1388        if (jafo) {
1389                aac->thread = kthread_run(aac_command_thread, aac, "%s",
1390                                          aac->name);
1391                if (IS_ERR(aac->thread)) {
1392                        retval = PTR_ERR(aac->thread);
1393                        goto out;
1394                }
1395        }
1396        (void)aac_get_adapter_info(aac);
1397        if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1398                host->sg_tablesize = 34;
1399                host->max_sectors = (host->sg_tablesize * 8) + 112;
1400        }
1401        if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1402                host->sg_tablesize = 17;
1403                host->max_sectors = (host->sg_tablesize * 8) + 112;
1404        }
1405        aac_get_config_status(aac, 1);
1406        aac_get_containers(aac);
1407        /*
1408         * This is where the assumption that the Adapter is quiesced
1409         * is important.
1410         */
1411        command_list = NULL;
1412        __shost_for_each_device(dev, host) {
1413                unsigned long flags;
1414                spin_lock_irqsave(&dev->list_lock, flags);
1415                list_for_each_entry(command, &dev->cmd_list, list)
1416                        if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1417                                command->SCp.buffer = (struct scatterlist *)command_list;
1418                                command_list = command;
1419                        }
1420                spin_unlock_irqrestore(&dev->list_lock, flags);
1421        }
1422        while ((command = command_list)) {
1423                command_list = (struct scsi_cmnd *)command->SCp.buffer;
1424                command->SCp.buffer = NULL;
1425                command->result = DID_OK << 16
1426                  | COMMAND_COMPLETE << 8
1427                  | SAM_STAT_TASK_SET_FULL;
1428                command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1429                command->scsi_done(command);
1430        }
1431        retval = 0;
1432
1433out:
1434        aac->in_reset = 0;
1435        scsi_unblock_requests(host);
1436        if (jafo) {
1437                spin_lock_irq(host->host_lock);
1438        }
1439        return retval;
1440}
1441
1442int aac_reset_adapter(struct aac_dev * aac, int forced)
1443{
1444        unsigned long flagv = 0;
1445        int retval;
1446        struct Scsi_Host * host;
1447
1448        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1449                return -EBUSY;
1450
1451        if (aac->in_reset) {
1452                spin_unlock_irqrestore(&aac->fib_lock, flagv);
1453                return -EBUSY;
1454        }
1455        aac->in_reset = 1;
1456        spin_unlock_irqrestore(&aac->fib_lock, flagv);
1457
1458        /*
1459         * Wait for all commands to complete to this specific
1460         * target (block maximum 60 seconds). Although not necessary,
1461         * it does make us a good storage citizen.
1462         */
1463        host = aac->scsi_host_ptr;
1464        scsi_block_requests(host);
1465        if (forced < 2) for (retval = 60; retval; --retval) {
1466                struct scsi_device * dev;
1467                struct scsi_cmnd * command;
1468                int active = 0;
1469
1470                __shost_for_each_device(dev, host) {
1471                        spin_lock_irqsave(&dev->list_lock, flagv);
1472                        list_for_each_entry(command, &dev->cmd_list, list) {
1473                                if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1474                                        active++;
1475                                        break;
1476                                }
1477                        }
1478                        spin_unlock_irqrestore(&dev->list_lock, flagv);
1479                        if (active)
1480                                break;
1481
1482                }
1483                /*
1484                 * We can exit If all the commands are complete
1485                 */
1486                if (active == 0)
1487                        break;
1488                ssleep(1);
1489        }
1490
1491        /* Quiesce build, flush cache, write through mode */
1492        if (forced < 2)
1493                aac_send_shutdown(aac);
1494        spin_lock_irqsave(host->host_lock, flagv);
1495        retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
1496        spin_unlock_irqrestore(host->host_lock, flagv);
1497
1498        if ((forced < 2) && (retval == -ENODEV)) {
1499                /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1500                struct fib * fibctx = aac_fib_alloc(aac);
1501                if (fibctx) {
1502                        struct aac_pause *cmd;
1503                        int status;
1504
1505                        aac_fib_init(fibctx);
1506
1507                        cmd = (struct aac_pause *) fib_data(fibctx);
1508
1509                        cmd->command = cpu_to_le32(VM_ContainerConfig);
1510                        cmd->type = cpu_to_le32(CT_PAUSE_IO);
1511                        cmd->timeout = cpu_to_le32(1);
1512                        cmd->min = cpu_to_le32(1);
1513                        cmd->noRescan = cpu_to_le32(1);
1514                        cmd->count = cpu_to_le32(0);
1515
1516                        status = aac_fib_send(ContainerCommand,
1517                          fibctx,
1518                          sizeof(struct aac_pause),
1519                          FsaNormal,
1520                          -2 /* Timeout silently */, 1,
1521                          NULL, NULL);
1522
1523                        if (status >= 0)
1524                                aac_fib_complete(fibctx);
1525                        /* FIB should be freed only after getting
1526                         * the response from the F/W */
1527                        if (status != -ERESTARTSYS)
1528                                aac_fib_free(fibctx);
1529                }
1530        }
1531
1532        return retval;
1533}
1534
1535int aac_check_health(struct aac_dev * aac)
1536{
1537        int BlinkLED;
1538        unsigned long time_now, flagv = 0;
1539        struct list_head * entry;
1540        struct Scsi_Host * host;
1541
1542        /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1543        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1544                return 0;
1545
1546        if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1547                spin_unlock_irqrestore(&aac->fib_lock, flagv);
1548                return 0; /* OK */
1549        }
1550
1551        aac->in_reset = 1;
1552
1553        /* Fake up an AIF:
1554         *      aac_aifcmd.command = AifCmdEventNotify = 1
1555         *      aac_aifcmd.seqnum = 0xFFFFFFFF
1556         *      aac_aifcmd.data[0] = AifEnExpEvent = 23
1557         *      aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1558         *      aac.aifcmd.data[2] = AifHighPriority = 3
1559         *      aac.aifcmd.data[3] = BlinkLED
1560         */
1561
1562        time_now = jiffies/HZ;
1563        entry = aac->fib_list.next;
1564
1565        /*
1566         * For each Context that is on the
1567         * fibctxList, make a copy of the
1568         * fib, and then set the event to wake up the
1569         * thread that is waiting for it.
1570         */
1571        while (entry != &aac->fib_list) {
1572                /*
1573                 * Extract the fibctx
1574                 */
1575                struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1576                struct hw_fib * hw_fib;
1577                struct fib * fib;
1578                /*
1579                 * Check if the queue is getting
1580                 * backlogged
1581                 */
1582                if (fibctx->count > 20) {
1583                        /*
1584                         * It's *not* jiffies folks,
1585                         * but jiffies / HZ, so do not
1586                         * panic ...
1587                         */
1588                        u32 time_last = fibctx->jiffies;
1589                        /*
1590                         * Has it been > 2 minutes
1591                         * since the last read off
1592                         * the queue?
1593                         */
1594                        if ((time_now - time_last) > aif_timeout) {
1595                                entry = entry->next;
1596                                aac_close_fib_context(aac, fibctx);
1597                                continue;
1598                        }
1599                }
1600                /*
1601                 * Warning: no sleep allowed while
1602                 * holding spinlock
1603                 */
1604                hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1605                fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1606                if (fib && hw_fib) {
1607                        struct aac_aifcmd * aif;
1608
1609                        fib->hw_fib_va = hw_fib;
1610                        fib->dev = aac;
1611                        aac_fib_init(fib);
1612                        fib->type = FSAFS_NTC_FIB_CONTEXT;
1613                        fib->size = sizeof (struct fib);
1614                        fib->data = hw_fib->data;
1615                        aif = (struct aac_aifcmd *)hw_fib->data;
1616                        aif->command = cpu_to_le32(AifCmdEventNotify);
1617                        aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1618                        ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1619                        ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1620                        ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1621                        ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1622
1623                        /*
1624                         * Put the FIB onto the
1625                         * fibctx's fibs
1626                         */
1627                        list_add_tail(&fib->fiblink, &fibctx->fib_list);
1628                        fibctx->count++;
1629                        /*
1630                         * Set the event to wake up the
1631                         * thread that will waiting.
1632                         */
1633                        up(&fibctx->wait_sem);
1634                } else {
1635                        printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1636                        kfree(fib);
1637                        kfree(hw_fib);
1638                }
1639                entry = entry->next;
1640        }
1641
1642        spin_unlock_irqrestore(&aac->fib_lock, flagv);
1643
1644        if (BlinkLED < 0) {
1645                printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1646                goto out;
1647        }
1648
1649        printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1650
1651        if (!aac_check_reset || ((aac_check_reset == 1) &&
1652                (aac->supplement_adapter_info.SupportedOptions2 &
1653                        AAC_OPTION_IGNORE_RESET)))
1654                goto out;
1655        host = aac->scsi_host_ptr;
1656        if (aac->thread->pid != current->pid)
1657                spin_lock_irqsave(host->host_lock, flagv);
1658        BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
1659        if (aac->thread->pid != current->pid)
1660                spin_unlock_irqrestore(host->host_lock, flagv);
1661        return BlinkLED;
1662
1663out:
1664        aac->in_reset = 0;
1665        return BlinkLED;
1666}
1667
1668
1669/**
1670 *      aac_command_thread      -       command processing thread
1671 *      @dev: Adapter to monitor
1672 *
1673 *      Waits on the commandready event in it's queue. When the event gets set
1674 *      it will pull FIBs off it's queue. It will continue to pull FIBs off
1675 *      until the queue is empty. When the queue is empty it will wait for
1676 *      more FIBs.
1677 */
1678
1679int aac_command_thread(void *data)
1680{
1681        struct aac_dev *dev = data;
1682        struct hw_fib *hw_fib, *hw_newfib;
1683        struct fib *fib, *newfib;
1684        struct aac_fib_context *fibctx;
1685        unsigned long flags;
1686        DECLARE_WAITQUEUE(wait, current);
1687        unsigned long next_jiffies = jiffies + HZ;
1688        unsigned long next_check_jiffies = next_jiffies;
1689        long difference = HZ;
1690
1691        /*
1692         *      We can only have one thread per adapter for AIF's.
1693         */
1694        if (dev->aif_thread)
1695                return -EINVAL;
1696
1697        /*
1698         *      Let the DPC know it has a place to send the AIF's to.
1699         */
1700        dev->aif_thread = 1;
1701        add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1702        set_current_state(TASK_INTERRUPTIBLE);
1703        dprintk ((KERN_INFO "aac_command_thread start\n"));
1704        while (1) {
1705                spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1706                while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1707                        struct list_head *entry;
1708                        struct aac_aifcmd * aifcmd;
1709
1710                        set_current_state(TASK_RUNNING);
1711
1712                        entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1713                        list_del(entry);
1714
1715                        spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1716                        fib = list_entry(entry, struct fib, fiblink);
1717                        /*
1718                         *      We will process the FIB here or pass it to a
1719                         *      worker thread that is TBD. We Really can't
1720                         *      do anything at this point since we don't have
1721                         *      anything defined for this thread to do.
1722                         */
1723                        hw_fib = fib->hw_fib_va;
1724                        memset(fib, 0, sizeof(struct fib));
1725                        fib->type = FSAFS_NTC_FIB_CONTEXT;
1726                        fib->size = sizeof(struct fib);
1727                        fib->hw_fib_va = hw_fib;
1728                        fib->data = hw_fib->data;
1729                        fib->dev = dev;
1730                        /*
1731                         *      We only handle AifRequest fibs from the adapter.
1732                         */
1733                        aifcmd = (struct aac_aifcmd *) hw_fib->data;
1734                        if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1735                                /* Handle Driver Notify Events */
1736                                aac_handle_aif(dev, fib);
1737                                *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1738                                aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1739                        } else {
1740                                /* The u32 here is important and intended. We are using
1741                                   32bit wrapping time to fit the adapter field */
1742
1743                                u32 time_now, time_last;
1744                                unsigned long flagv;
1745                                unsigned num;
1746                                struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1747                                struct fib ** fib_pool, ** fib_p;
1748
1749                                /* Sniff events */
1750                                if ((aifcmd->command ==
1751                                     cpu_to_le32(AifCmdEventNotify)) ||
1752                                    (aifcmd->command ==
1753                                     cpu_to_le32(AifCmdJobProgress))) {
1754                                        aac_handle_aif(dev, fib);
1755                                }
1756
1757                                time_now = jiffies/HZ;
1758
1759                                /*
1760                                 * Warning: no sleep allowed while
1761                                 * holding spinlock. We take the estimate
1762                                 * and pre-allocate a set of fibs outside the
1763                                 * lock.
1764                                 */
1765                                num = le32_to_cpu(dev->init->AdapterFibsSize)
1766                                    / sizeof(struct hw_fib); /* some extra */
1767                                spin_lock_irqsave(&dev->fib_lock, flagv);
1768                                entry = dev->fib_list.next;
1769                                while (entry != &dev->fib_list) {
1770                                        entry = entry->next;
1771                                        ++num;
1772                                }
1773                                spin_unlock_irqrestore(&dev->fib_lock, flagv);
1774                                hw_fib_pool = NULL;
1775                                fib_pool = NULL;
1776                                if (num
1777                                 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1778                                 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1779                                        hw_fib_p = hw_fib_pool;
1780                                        fib_p = fib_pool;
1781                                        while (hw_fib_p < &hw_fib_pool[num]) {
1782                                                if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1783                                                        --hw_fib_p;
1784                                                        break;
1785                                                }
1786                                                if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1787                                                        kfree(*(--hw_fib_p));
1788                                                        break;
1789                                                }
1790                                        }
1791                                        if ((num = hw_fib_p - hw_fib_pool) == 0) {
1792                                                kfree(fib_pool);
1793                                                fib_pool = NULL;
1794                                                kfree(hw_fib_pool);
1795                                                hw_fib_pool = NULL;
1796                                        }
1797                                } else {
1798                                        kfree(hw_fib_pool);
1799                                        hw_fib_pool = NULL;
1800                                }
1801                                spin_lock_irqsave(&dev->fib_lock, flagv);
1802                                entry = dev->fib_list.next;
1803                                /*
1804                                 * For each Context that is on the
1805                                 * fibctxList, make a copy of the
1806                                 * fib, and then set the event to wake up the
1807                                 * thread that is waiting for it.
1808                                 */
1809                                hw_fib_p = hw_fib_pool;
1810                                fib_p = fib_pool;
1811                                while (entry != &dev->fib_list) {
1812                                        /*
1813                                         * Extract the fibctx
1814                                         */
1815                                        fibctx = list_entry(entry, struct aac_fib_context, next);
1816                                        /*
1817                                         * Check if the queue is getting
1818                                         * backlogged
1819                                         */
1820                                        if (fibctx->count > 20)
1821                                        {
1822                                                /*
1823                                                 * It's *not* jiffies folks,
1824                                                 * but jiffies / HZ so do not
1825                                                 * panic ...
1826                                                 */
1827                                                time_last = fibctx->jiffies;
1828                                                /*
1829                                                 * Has it been > 2 minutes
1830                                                 * since the last read off
1831                                                 * the queue?
1832                                                 */
1833                                                if ((time_now - time_last) > aif_timeout) {
1834                                                        entry = entry->next;
1835                                                        aac_close_fib_context(dev, fibctx);
1836                                                        continue;
1837                                                }
1838                                        }
1839                                        /*
1840                                         * Warning: no sleep allowed while
1841                                         * holding spinlock
1842                                         */
1843                                        if (hw_fib_p < &hw_fib_pool[num]) {
1844                                                hw_newfib = *hw_fib_p;
1845                                                *(hw_fib_p++) = NULL;
1846                                                newfib = *fib_p;
1847                                                *(fib_p++) = NULL;
1848                                                /*
1849                                                 * Make the copy of the FIB
1850                                                 */
1851                                                memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1852                                                memcpy(newfib, fib, sizeof(struct fib));
1853                                                newfib->hw_fib_va = hw_newfib;
1854                                                /*
1855                                                 * Put the FIB onto the
1856                                                 * fibctx's fibs
1857                                                 */
1858                                                list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1859                                                fibctx->count++;
1860                                                /*
1861                                                 * Set the event to wake up the
1862                                                 * thread that is waiting.
1863                                                 */
1864                                                up(&fibctx->wait_sem);
1865                                        } else {
1866                                                printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1867                                        }
1868                                        entry = entry->next;
1869                                }
1870                                /*
1871                                 *      Set the status of this FIB
1872                                 */
1873                                *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1874                                aac_fib_adapter_complete(fib, sizeof(u32));
1875                                spin_unlock_irqrestore(&dev->fib_lock, flagv);
1876                                /* Free up the remaining resources */
1877                                hw_fib_p = hw_fib_pool;
1878                                fib_p = fib_pool;
1879                                while (hw_fib_p < &hw_fib_pool[num]) {
1880                                        kfree(*hw_fib_p);
1881                                        kfree(*fib_p);
1882                                        ++fib_p;
1883                                        ++hw_fib_p;
1884                                }
1885                                kfree(hw_fib_pool);
1886                                kfree(fib_pool);
1887                        }
1888                        kfree(fib);
1889                        spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1890                }
1891                /*
1892                 *      There are no more AIF's
1893                 */
1894                spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1895
1896                /*
1897                 *      Background activity
1898                 */
1899                if ((time_before(next_check_jiffies,next_jiffies))
1900                 && ((difference = next_check_jiffies - jiffies) <= 0)) {
1901                        next_check_jiffies = next_jiffies;
1902                        if (aac_check_health(dev) == 0) {
1903                                difference = ((long)(unsigned)check_interval)
1904                                           * HZ;
1905                                next_check_jiffies = jiffies + difference;
1906                        } else if (!dev->queues)
1907                                break;
1908                }
1909                if (!time_before(next_check_jiffies,next_jiffies)
1910                 && ((difference = next_jiffies - jiffies) <= 0)) {
1911                        struct timeval now;
1912                        int ret;
1913
1914                        /* Don't even try to talk to adapter if its sick */
1915                        ret = aac_check_health(dev);
1916                        if (!ret && !dev->queues)
1917                                break;
1918                        next_check_jiffies = jiffies
1919                                           + ((long)(unsigned)check_interval)
1920                                           * HZ;
1921                        do_gettimeofday(&now);
1922
1923                        /* Synchronize our watches */
1924                        if (((1000000 - (1000000 / HZ)) > now.tv_usec)
1925                         && (now.tv_usec > (1000000 / HZ)))
1926                                difference = (((1000000 - now.tv_usec) * HZ)
1927                                  + 500000) / 1000000;
1928                        else if (ret == 0) {
1929                                struct fib *fibptr;
1930
1931                                if ((fibptr = aac_fib_alloc(dev))) {
1932                                        int status;
1933                                        __le32 *info;
1934
1935                                        aac_fib_init(fibptr);
1936
1937                                        info = (__le32 *) fib_data(fibptr);
1938                                        if (now.tv_usec > 500000)
1939                                                ++now.tv_sec;
1940
1941                                        *info = cpu_to_le32(now.tv_sec);
1942
1943                                        status = aac_fib_send(SendHostTime,
1944                                                fibptr,
1945                                                sizeof(*info),
1946                                                FsaNormal,
1947                                                1, 1,
1948                                                NULL,
1949                                                NULL);
1950                                        /* Do not set XferState to zero unless
1951                                         * receives a response from F/W */
1952                                        if (status >= 0)
1953                                                aac_fib_complete(fibptr);
1954                                        /* FIB should be freed only after
1955                                         * getting the response from the F/W */
1956                                        if (status != -ERESTARTSYS)
1957                                                aac_fib_free(fibptr);
1958                                }
1959                                difference = (long)(unsigned)update_interval*HZ;
1960                        } else {
1961                                /* retry shortly */
1962                                difference = 10 * HZ;
1963                        }
1964                        next_jiffies = jiffies + difference;
1965                        if (time_before(next_check_jiffies,next_jiffies))
1966                                difference = next_check_jiffies - jiffies;
1967                }
1968                if (difference <= 0)
1969                        difference = 1;
1970                set_current_state(TASK_INTERRUPTIBLE);
1971                schedule_timeout(difference);
1972
1973                if (kthread_should_stop())
1974                        break;
1975        }
1976        if (dev->queues)
1977                remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1978        dev->aif_thread = 0;
1979        return 0;
1980}
1981