linux/drivers/scsi/aacraid/commsup.c
<<
>>
Prefs
   1/*
   2 *      Adaptec AAC series RAID controller driver
   3 *      (c) Copyright 2001 Red Hat Inc.
   4 *
   5 * based on the old aacraid driver that is..
   6 * Adaptec aacraid device driver for Linux.
   7 *
   8 * Copyright (c) 2000-2010 Adaptec, Inc.
   9 *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; see the file COPYING.  If not, write to
  23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24 *
  25 * Module Name:
  26 *  commsup.c
  27 *
  28 * Abstract: Contain all routines that are required for FSA host/adapter
  29 *    communication.
  30 *
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/init.h>
  35#include <linux/types.h>
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/spinlock.h>
  39#include <linux/slab.h>
  40#include <linux/completion.h>
  41#include <linux/blkdev.h>
  42#include <linux/delay.h>
  43#include <linux/kthread.h>
  44#include <linux/interrupt.h>
  45#include <linux/semaphore.h>
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_host.h>
  48#include <scsi/scsi_device.h>
  49#include <scsi/scsi_cmnd.h>
  50
  51#include "aacraid.h"
  52
  53/**
  54 *      fib_map_alloc           -       allocate the fib objects
  55 *      @dev: Adapter to allocate for
  56 *
  57 *      Allocate and map the shared PCI space for the FIB blocks used to
  58 *      talk to the Adaptec firmware.
  59 */
  60
  61static int fib_map_alloc(struct aac_dev *dev)
  62{
  63        dprintk((KERN_INFO
  64          "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
  65          dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
  66          AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  67        dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
  68                (dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
  69                * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
  70                &dev->hw_fib_pa);
  71        if (dev->hw_fib_va == NULL)
  72                return -ENOMEM;
  73        return 0;
  74}
  75
  76/**
  77 *      aac_fib_map_free                -       free the fib objects
  78 *      @dev: Adapter to free
  79 *
  80 *      Free the PCI mappings and the memory allocated for FIB blocks
  81 *      on this adapter.
  82 */
  83
  84void aac_fib_map_free(struct aac_dev *dev)
  85{
  86        if (dev->hw_fib_va && dev->max_fib_size) {
  87                pci_free_consistent(dev->pdev,
  88                (dev->max_fib_size *
  89                (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
  90                dev->hw_fib_va, dev->hw_fib_pa);
  91        }
  92        dev->hw_fib_va = NULL;
  93        dev->hw_fib_pa = 0;
  94}
  95
  96void aac_fib_vector_assign(struct aac_dev *dev)
  97{
  98        u32 i = 0;
  99        u32 vector = 1;
 100        struct fib *fibptr = NULL;
 101
 102        for (i = 0, fibptr = &dev->fibs[i];
 103                i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
 104                i++, fibptr++) {
 105                if ((dev->max_msix == 1) ||
 106                  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
 107                        - dev->vector_cap))) {
 108                        fibptr->vector_no = 0;
 109                } else {
 110                        fibptr->vector_no = vector;
 111                        vector++;
 112                        if (vector == dev->max_msix)
 113                                vector = 1;
 114                }
 115        }
 116}
 117
 118/**
 119 *      aac_fib_setup   -       setup the fibs
 120 *      @dev: Adapter to set up
 121 *
 122 *      Allocate the PCI space for the fibs, map it and then initialise the
 123 *      fib area, the unmapped fib data and also the free list
 124 */
 125
 126int aac_fib_setup(struct aac_dev * dev)
 127{
 128        struct fib *fibptr;
 129        struct hw_fib *hw_fib;
 130        dma_addr_t hw_fib_pa;
 131        int i;
 132
 133        while (((i = fib_map_alloc(dev)) == -ENOMEM)
 134         && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
 135                dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
 136                dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
 137        }
 138        if (i<0)
 139                return -ENOMEM;
 140
 141        /* 32 byte alignment for PMC */
 142        hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
 143        dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
 144                (hw_fib_pa - dev->hw_fib_pa));
 145        dev->hw_fib_pa = hw_fib_pa;
 146        memset(dev->hw_fib_va, 0,
 147                (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
 148                (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
 149
 150        /* add Xport header */
 151        dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
 152                sizeof(struct aac_fib_xporthdr));
 153        dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
 154
 155        hw_fib = dev->hw_fib_va;
 156        hw_fib_pa = dev->hw_fib_pa;
 157        /*
 158         *      Initialise the fibs
 159         */
 160        for (i = 0, fibptr = &dev->fibs[i];
 161                i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
 162                i++, fibptr++)
 163        {
 164                fibptr->flags = 0;
 165                fibptr->size = sizeof(struct fib);
 166                fibptr->dev = dev;
 167                fibptr->hw_fib_va = hw_fib;
 168                fibptr->data = (void *) fibptr->hw_fib_va->data;
 169                fibptr->next = fibptr+1;        /* Forward chain the fibs */
 170                sema_init(&fibptr->event_wait, 0);
 171                spin_lock_init(&fibptr->event_lock);
 172                hw_fib->header.XferState = cpu_to_le32(0xffffffff);
 173                hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
 174                fibptr->hw_fib_pa = hw_fib_pa;
 175                hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
 176                        dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
 177                hw_fib_pa = hw_fib_pa +
 178                        dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
 179        }
 180
 181        /*
 182         *Assign vector numbers to fibs
 183         */
 184        aac_fib_vector_assign(dev);
 185
 186        /*
 187         *      Add the fib chain to the free list
 188         */
 189        dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
 190        /*
 191        *       Set 8 fibs aside for management tools
 192        */
 193        dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
 194        return 0;
 195}
 196
 197/**
 198 *      aac_fib_alloc_tag-allocate a fib using tags
 199 *      @dev: Adapter to allocate the fib for
 200 *
 201 *      Allocate a fib from the adapter fib pool using tags
 202 *      from the blk layer.
 203 */
 204
 205struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
 206{
 207        struct fib *fibptr;
 208
 209        fibptr = &dev->fibs[scmd->request->tag];
 210        /*
 211         *      Null out fields that depend on being zero at the start of
 212         *      each I/O
 213         */
 214        fibptr->hw_fib_va->header.XferState = 0;
 215        fibptr->type = FSAFS_NTC_FIB_CONTEXT;
 216        fibptr->callback_data = NULL;
 217        fibptr->callback = NULL;
 218
 219        return fibptr;
 220}
 221
 222/**
 223 *      aac_fib_alloc   -       allocate a fib
 224 *      @dev: Adapter to allocate the fib for
 225 *
 226 *      Allocate a fib from the adapter fib pool. If the pool is empty we
 227 *      return NULL.
 228 */
 229
 230struct fib *aac_fib_alloc(struct aac_dev *dev)
 231{
 232        struct fib * fibptr;
 233        unsigned long flags;
 234        spin_lock_irqsave(&dev->fib_lock, flags);
 235        fibptr = dev->free_fib;
 236        if(!fibptr){
 237                spin_unlock_irqrestore(&dev->fib_lock, flags);
 238                return fibptr;
 239        }
 240        dev->free_fib = fibptr->next;
 241        spin_unlock_irqrestore(&dev->fib_lock, flags);
 242        /*
 243         *      Set the proper node type code and node byte size
 244         */
 245        fibptr->type = FSAFS_NTC_FIB_CONTEXT;
 246        fibptr->size = sizeof(struct fib);
 247        /*
 248         *      Null out fields that depend on being zero at the start of
 249         *      each I/O
 250         */
 251        fibptr->hw_fib_va->header.XferState = 0;
 252        fibptr->flags = 0;
 253        fibptr->callback = NULL;
 254        fibptr->callback_data = NULL;
 255
 256        return fibptr;
 257}
 258
 259/**
 260 *      aac_fib_free    -       free a fib
 261 *      @fibptr: fib to free up
 262 *
 263 *      Frees up a fib and places it on the appropriate queue
 264 */
 265
 266void aac_fib_free(struct fib *fibptr)
 267{
 268        unsigned long flags;
 269
 270        if (fibptr->done == 2)
 271                return;
 272
 273        spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
 274        if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 275                aac_config.fib_timeouts++;
 276        if (fibptr->hw_fib_va->header.XferState != 0) {
 277                printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
 278                         (void*)fibptr,
 279                         le32_to_cpu(fibptr->hw_fib_va->header.XferState));
 280        }
 281        fibptr->next = fibptr->dev->free_fib;
 282        fibptr->dev->free_fib = fibptr;
 283        spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
 284}
 285
 286/**
 287 *      aac_fib_init    -       initialise a fib
 288 *      @fibptr: The fib to initialize
 289 *
 290 *      Set up the generic fib fields ready for use
 291 */
 292
 293void aac_fib_init(struct fib *fibptr)
 294{
 295        struct hw_fib *hw_fib = fibptr->hw_fib_va;
 296
 297        memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
 298        hw_fib->header.StructType = FIB_MAGIC;
 299        hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
 300        hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
 301        hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
 302        hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
 303}
 304
 305/**
 306 *      fib_deallocate          -       deallocate a fib
 307 *      @fibptr: fib to deallocate
 308 *
 309 *      Will deallocate and return to the free pool the FIB pointed to by the
 310 *      caller.
 311 */
 312
 313static void fib_dealloc(struct fib * fibptr)
 314{
 315        struct hw_fib *hw_fib = fibptr->hw_fib_va;
 316        hw_fib->header.XferState = 0;
 317}
 318
 319/*
 320 *      Commuication primitives define and support the queuing method we use to
 321 *      support host to adapter commuication. All queue accesses happen through
 322 *      these routines and are the only routines which have a knowledge of the
 323 *       how these queues are implemented.
 324 */
 325
 326/**
 327 *      aac_get_entry           -       get a queue entry
 328 *      @dev: Adapter
 329 *      @qid: Queue Number
 330 *      @entry: Entry return
 331 *      @index: Index return
 332 *      @nonotify: notification control
 333 *
 334 *      With a priority the routine returns a queue entry if the queue has free entries. If the queue
 335 *      is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
 336 *      returned.
 337 */
 338
 339static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
 340{
 341        struct aac_queue * q;
 342        unsigned long idx;
 343
 344        /*
 345         *      All of the queues wrap when they reach the end, so we check
 346         *      to see if they have reached the end and if they have we just
 347         *      set the index back to zero. This is a wrap. You could or off
 348         *      the high bits in all updates but this is a bit faster I think.
 349         */
 350
 351        q = &dev->queues->queue[qid];
 352
 353        idx = *index = le32_to_cpu(*(q->headers.producer));
 354        /* Interrupt Moderation, only interrupt for first two entries */
 355        if (idx != le32_to_cpu(*(q->headers.consumer))) {
 356                if (--idx == 0) {
 357                        if (qid == AdapNormCmdQueue)
 358                                idx = ADAP_NORM_CMD_ENTRIES;
 359                        else
 360                                idx = ADAP_NORM_RESP_ENTRIES;
 361                }
 362                if (idx != le32_to_cpu(*(q->headers.consumer)))
 363                        *nonotify = 1;
 364        }
 365
 366        if (qid == AdapNormCmdQueue) {
 367                if (*index >= ADAP_NORM_CMD_ENTRIES)
 368                        *index = 0; /* Wrap to front of the Producer Queue. */
 369        } else {
 370                if (*index >= ADAP_NORM_RESP_ENTRIES)
 371                        *index = 0; /* Wrap to front of the Producer Queue. */
 372        }
 373
 374        /* Queue is full */
 375        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
 376                printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
 377                                qid, atomic_read(&q->numpending));
 378                return 0;
 379        } else {
 380                *entry = q->base + *index;
 381                return 1;
 382        }
 383}
 384
 385/**
 386 *      aac_queue_get           -       get the next free QE
 387 *      @dev: Adapter
 388 *      @index: Returned index
 389 *      @priority: Priority of fib
 390 *      @fib: Fib to associate with the queue entry
 391 *      @wait: Wait if queue full
 392 *      @fibptr: Driver fib object to go with fib
 393 *      @nonotify: Don't notify the adapter
 394 *
 395 *      Gets the next free QE off the requested priorty adapter command
 396 *      queue and associates the Fib with the QE. The QE represented by
 397 *      index is ready to insert on the queue when this routine returns
 398 *      success.
 399 */
 400
 401int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
 402{
 403        struct aac_entry * entry = NULL;
 404        int map = 0;
 405
 406        if (qid == AdapNormCmdQueue) {
 407                /*  if no entries wait for some if caller wants to */
 408                while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
 409                        printk(KERN_ERR "GetEntries failed\n");
 410                }
 411                /*
 412                 *      Setup queue entry with a command, status and fib mapped
 413                 */
 414                entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
 415                map = 1;
 416        } else {
 417                while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
 418                        /* if no entries wait for some if caller wants to */
 419                }
 420                /*
 421                 *      Setup queue entry with command, status and fib mapped
 422                 */
 423                entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
 424                entry->addr = hw_fib->header.SenderFibAddress;
 425                        /* Restore adapters pointer to the FIB */
 426                hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress;  /* Let the adapter now where to find its data */
 427                map = 0;
 428        }
 429        /*
 430         *      If MapFib is true than we need to map the Fib and put pointers
 431         *      in the queue entry.
 432         */
 433        if (map)
 434                entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
 435        return 0;
 436}
 437
 438/*
 439 *      Define the highest level of host to adapter communication routines.
 440 *      These routines will support host to adapter FS commuication. These
 441 *      routines have no knowledge of the commuication method used. This level
 442 *      sends and receives FIBs. This level has no knowledge of how these FIBs
 443 *      get passed back and forth.
 444 */
 445
 446/**
 447 *      aac_fib_send    -       send a fib to the adapter
 448 *      @command: Command to send
 449 *      @fibptr: The fib
 450 *      @size: Size of fib data area
 451 *      @priority: Priority of Fib
 452 *      @wait: Async/sync select
 453 *      @reply: True if a reply is wanted
 454 *      @callback: Called with reply
 455 *      @callback_data: Passed to callback
 456 *
 457 *      Sends the requested FIB to the adapter and optionally will wait for a
 458 *      response FIB. If the caller does not wish to wait for a response than
 459 *      an event to wait on must be supplied. This event will be set when a
 460 *      response FIB is received from the adapter.
 461 */
 462
 463int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
 464                int priority, int wait, int reply, fib_callback callback,
 465                void *callback_data)
 466{
 467        struct aac_dev * dev = fibptr->dev;
 468        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 469        unsigned long flags = 0;
 470        unsigned long mflags = 0;
 471        unsigned long sflags = 0;
 472
 473
 474        if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
 475                return -EBUSY;
 476        /*
 477         *      There are 5 cases with the wait and response requested flags.
 478         *      The only invalid cases are if the caller requests to wait and
 479         *      does not request a response and if the caller does not want a
 480         *      response and the Fib is not allocated from pool. If a response
 481         *      is not requesed the Fib will just be deallocaed by the DPC
 482         *      routine when the response comes back from the adapter. No
 483         *      further processing will be done besides deleting the Fib. We
 484         *      will have a debug mode where the adapter can notify the host
 485         *      it had a problem and the host can log that fact.
 486         */
 487        fibptr->flags = 0;
 488        if (wait && !reply) {
 489                return -EINVAL;
 490        } else if (!wait && reply) {
 491                hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
 492                FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
 493        } else if (!wait && !reply) {
 494                hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
 495                FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
 496        } else if (wait && reply) {
 497                hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
 498                FIB_COUNTER_INCREMENT(aac_config.NormalSent);
 499        }
 500        /*
 501         *      Map the fib into 32bits by using the fib number
 502         */
 503
 504        hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
 505        hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
 506        /*
 507         *      Set FIB state to indicate where it came from and if we want a
 508         *      response from the adapter. Also load the command from the
 509         *      caller.
 510         *
 511         *      Map the hw fib pointer as a 32bit value
 512         */
 513        hw_fib->header.Command = cpu_to_le16(command);
 514        hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
 515        /*
 516         *      Set the size of the Fib we want to send to the adapter
 517         */
 518        hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
 519        if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
 520                return -EMSGSIZE;
 521        }
 522        /*
 523         *      Get a queue entry connect the FIB to it and send an notify
 524         *      the adapter a command is ready.
 525         */
 526        hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
 527
 528        /*
 529         *      Fill in the Callback and CallbackContext if we are not
 530         *      going to wait.
 531         */
 532        if (!wait) {
 533                fibptr->callback = callback;
 534                fibptr->callback_data = callback_data;
 535                fibptr->flags = FIB_CONTEXT_FLAG;
 536        }
 537
 538        fibptr->done = 0;
 539
 540        FIB_COUNTER_INCREMENT(aac_config.FibsSent);
 541
 542        dprintk((KERN_DEBUG "Fib contents:.\n"));
 543        dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
 544        dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
 545        dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
 546        dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
 547        dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
 548        dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
 549
 550        if (!dev->queues)
 551                return -EBUSY;
 552
 553        if (wait) {
 554
 555                spin_lock_irqsave(&dev->manage_lock, mflags);
 556                if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
 557                        printk(KERN_INFO "No management Fibs Available:%d\n",
 558                                                dev->management_fib_count);
 559                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 560                        return -EBUSY;
 561                }
 562                dev->management_fib_count++;
 563                spin_unlock_irqrestore(&dev->manage_lock, mflags);
 564                spin_lock_irqsave(&fibptr->event_lock, flags);
 565        }
 566
 567        if (dev->sync_mode) {
 568                if (wait)
 569                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 570                spin_lock_irqsave(&dev->sync_lock, sflags);
 571                if (dev->sync_fib) {
 572                        list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
 573                        spin_unlock_irqrestore(&dev->sync_lock, sflags);
 574                } else {
 575                        dev->sync_fib = fibptr;
 576                        spin_unlock_irqrestore(&dev->sync_lock, sflags);
 577                        aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
 578                                (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
 579                                NULL, NULL, NULL, NULL, NULL);
 580                }
 581                if (wait) {
 582                        fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
 583                        if (down_interruptible(&fibptr->event_wait)) {
 584                                fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
 585                                return -EFAULT;
 586                        }
 587                        return 0;
 588                }
 589                return -EINPROGRESS;
 590        }
 591
 592        if (aac_adapter_deliver(fibptr) != 0) {
 593                printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
 594                if (wait) {
 595                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 596                        spin_lock_irqsave(&dev->manage_lock, mflags);
 597                        dev->management_fib_count--;
 598                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 599                }
 600                return -EBUSY;
 601        }
 602
 603
 604        /*
 605         *      If the caller wanted us to wait for response wait now.
 606         */
 607
 608        if (wait) {
 609                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 610                /* Only set for first known interruptable command */
 611                if (wait < 0) {
 612                        /*
 613                         * *VERY* Dangerous to time out a command, the
 614                         * assumption is made that we have no hope of
 615                         * functioning because an interrupt routing or other
 616                         * hardware failure has occurred.
 617                         */
 618                        unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
 619                        while (down_trylock(&fibptr->event_wait)) {
 620                                int blink;
 621                                if (time_is_before_eq_jiffies(timeout)) {
 622                                        struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
 623                                        atomic_dec(&q->numpending);
 624                                        if (wait == -1) {
 625                                                printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
 626                                                  "Usually a result of a PCI interrupt routing problem;\n"
 627                                                  "update mother board BIOS or consider utilizing one of\n"
 628                                                  "the SAFE mode kernel options (acpi, apic etc)\n");
 629                                        }
 630                                        return -ETIMEDOUT;
 631                                }
 632                                if ((blink = aac_adapter_check_health(dev)) > 0) {
 633                                        if (wait == -1) {
 634                                                printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
 635                                                  "Usually a result of a serious unrecoverable hardware problem\n",
 636                                                  blink);
 637                                        }
 638                                        return -EFAULT;
 639                                }
 640                                /* We used to udelay() here but that absorbed
 641                                 * a CPU when a timeout occured. Not very
 642                                 * useful. */
 643                                cpu_relax();
 644                        }
 645                } else if (down_interruptible(&fibptr->event_wait)) {
 646                        /* Do nothing ... satisfy
 647                         * down_interruptible must_check */
 648                }
 649
 650                spin_lock_irqsave(&fibptr->event_lock, flags);
 651                if (fibptr->done == 0) {
 652                        fibptr->done = 2; /* Tell interrupt we aborted */
 653                        spin_unlock_irqrestore(&fibptr->event_lock, flags);
 654                        return -ERESTARTSYS;
 655                }
 656                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 657                BUG_ON(fibptr->done == 0);
 658
 659                if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
 660                        return -ETIMEDOUT;
 661                return 0;
 662        }
 663        /*
 664         *      If the user does not want a response than return success otherwise
 665         *      return pending
 666         */
 667        if (reply)
 668                return -EINPROGRESS;
 669        else
 670                return 0;
 671}
 672
 673/**
 674 *      aac_consumer_get        -       get the top of the queue
 675 *      @dev: Adapter
 676 *      @q: Queue
 677 *      @entry: Return entry
 678 *
 679 *      Will return a pointer to the entry on the top of the queue requested that
 680 *      we are a consumer of, and return the address of the queue entry. It does
 681 *      not change the state of the queue.
 682 */
 683
 684int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
 685{
 686        u32 index;
 687        int status;
 688        if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
 689                status = 0;
 690        } else {
 691                /*
 692                 *      The consumer index must be wrapped if we have reached
 693                 *      the end of the queue, else we just use the entry
 694                 *      pointed to by the header index
 695                 */
 696                if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 697                        index = 0;
 698                else
 699                        index = le32_to_cpu(*q->headers.consumer);
 700                *entry = q->base + index;
 701                status = 1;
 702        }
 703        return(status);
 704}
 705
 706/**
 707 *      aac_consumer_free       -       free consumer entry
 708 *      @dev: Adapter
 709 *      @q: Queue
 710 *      @qid: Queue ident
 711 *
 712 *      Frees up the current top of the queue we are a consumer of. If the
 713 *      queue was full notify the producer that the queue is no longer full.
 714 */
 715
 716void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
 717{
 718        int wasfull = 0;
 719        u32 notify;
 720
 721        if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
 722                wasfull = 1;
 723
 724        if (le32_to_cpu(*q->headers.consumer) >= q->entries)
 725                *q->headers.consumer = cpu_to_le32(1);
 726        else
 727                le32_add_cpu(q->headers.consumer, 1);
 728
 729        if (wasfull) {
 730                switch (qid) {
 731
 732                case HostNormCmdQueue:
 733                        notify = HostNormCmdNotFull;
 734                        break;
 735                case HostNormRespQueue:
 736                        notify = HostNormRespNotFull;
 737                        break;
 738                default:
 739                        BUG();
 740                        return;
 741                }
 742                aac_adapter_notify(dev, notify);
 743        }
 744}
 745
 746/**
 747 *      aac_fib_adapter_complete        -       complete adapter issued fib
 748 *      @fibptr: fib to complete
 749 *      @size: size of fib
 750 *
 751 *      Will do all necessary work to complete a FIB that was sent from
 752 *      the adapter.
 753 */
 754
 755int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
 756{
 757        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 758        struct aac_dev * dev = fibptr->dev;
 759        struct aac_queue * q;
 760        unsigned long nointr = 0;
 761        unsigned long qflags;
 762
 763        if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
 764            dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
 765                kfree(hw_fib);
 766                return 0;
 767        }
 768
 769        if (hw_fib->header.XferState == 0) {
 770                if (dev->comm_interface == AAC_COMM_MESSAGE)
 771                        kfree(hw_fib);
 772                return 0;
 773        }
 774        /*
 775         *      If we plan to do anything check the structure type first.
 776         */
 777        if (hw_fib->header.StructType != FIB_MAGIC &&
 778            hw_fib->header.StructType != FIB_MAGIC2 &&
 779            hw_fib->header.StructType != FIB_MAGIC2_64) {
 780                if (dev->comm_interface == AAC_COMM_MESSAGE)
 781                        kfree(hw_fib);
 782                return -EINVAL;
 783        }
 784        /*
 785         *      This block handles the case where the adapter had sent us a
 786         *      command and we have finished processing the command. We
 787         *      call completeFib when we are done processing the command
 788         *      and want to send a response back to the adapter. This will
 789         *      send the completed cdb to the adapter.
 790         */
 791        if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
 792                if (dev->comm_interface == AAC_COMM_MESSAGE) {
 793                        kfree (hw_fib);
 794                } else {
 795                        u32 index;
 796                        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
 797                        if (size) {
 798                                size += sizeof(struct aac_fibhdr);
 799                                if (size > le16_to_cpu(hw_fib->header.SenderSize))
 800                                        return -EMSGSIZE;
 801                                hw_fib->header.Size = cpu_to_le16(size);
 802                        }
 803                        q = &dev->queues->queue[AdapNormRespQueue];
 804                        spin_lock_irqsave(q->lock, qflags);
 805                        aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
 806                        *(q->headers.producer) = cpu_to_le32(index + 1);
 807                        spin_unlock_irqrestore(q->lock, qflags);
 808                        if (!(nointr & (int)aac_config.irq_mod))
 809                                aac_adapter_notify(dev, AdapNormRespQueue);
 810                }
 811        } else {
 812                printk(KERN_WARNING "aac_fib_adapter_complete: "
 813                        "Unknown xferstate detected.\n");
 814                BUG();
 815        }
 816        return 0;
 817}
 818
 819/**
 820 *      aac_fib_complete        -       fib completion handler
 821 *      @fib: FIB to complete
 822 *
 823 *      Will do all necessary work to complete a FIB.
 824 */
 825
 826int aac_fib_complete(struct fib *fibptr)
 827{
 828        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 829
 830        /*
 831         *      Check for a fib which has already been completed
 832         */
 833
 834        if (hw_fib->header.XferState == 0)
 835                return 0;
 836        /*
 837         *      If we plan to do anything check the structure type first.
 838         */
 839
 840        if (hw_fib->header.StructType != FIB_MAGIC &&
 841            hw_fib->header.StructType != FIB_MAGIC2 &&
 842            hw_fib->header.StructType != FIB_MAGIC2_64)
 843                return -EINVAL;
 844        /*
 845         *      This block completes a cdb which orginated on the host and we
 846         *      just need to deallocate the cdb or reinit it. At this point the
 847         *      command is complete that we had sent to the adapter and this
 848         *      cdb could be reused.
 849         */
 850
 851        if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
 852                (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
 853        {
 854                fib_dealloc(fibptr);
 855        }
 856        else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
 857        {
 858                /*
 859                 *      This handles the case when the host has aborted the I/O
 860                 *      to the adapter because the adapter is not responding
 861                 */
 862                fib_dealloc(fibptr);
 863        } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
 864                fib_dealloc(fibptr);
 865        } else {
 866                BUG();
 867        }
 868        return 0;
 869}
 870
 871/**
 872 *      aac_printf      -       handle printf from firmware
 873 *      @dev: Adapter
 874 *      @val: Message info
 875 *
 876 *      Print a message passed to us by the controller firmware on the
 877 *      Adaptec board
 878 */
 879
 880void aac_printf(struct aac_dev *dev, u32 val)
 881{
 882        char *cp = dev->printfbuf;
 883        if (dev->printf_enabled)
 884        {
 885                int length = val & 0xffff;
 886                int level = (val >> 16) & 0xffff;
 887
 888                /*
 889                 *      The size of the printfbuf is set in port.c
 890                 *      There is no variable or define for it
 891                 */
 892                if (length > 255)
 893                        length = 255;
 894                if (cp[length] != 0)
 895                        cp[length] = 0;
 896                if (level == LOG_AAC_HIGH_ERROR)
 897                        printk(KERN_WARNING "%s:%s", dev->name, cp);
 898                else
 899                        printk(KERN_INFO "%s:%s", dev->name, cp);
 900        }
 901        memset(cp, 0, 256);
 902}
 903
 904
 905/**
 906 *      aac_handle_aif          -       Handle a message from the firmware
 907 *      @dev: Which adapter this fib is from
 908 *      @fibptr: Pointer to fibptr from adapter
 909 *
 910 *      This routine handles a driver notify fib from the adapter and
 911 *      dispatches it to the appropriate routine for handling.
 912 */
 913
 914#define AIF_SNIFF_TIMEOUT       (500*HZ)
 915static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
 916{
 917        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 918        struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
 919        u32 channel, id, lun, container;
 920        struct scsi_device *device;
 921        enum {
 922                NOTHING,
 923                DELETE,
 924                ADD,
 925                CHANGE
 926        } device_config_needed = NOTHING;
 927
 928        /* Sniff for container changes */
 929
 930        if (!dev || !dev->fsa_dev)
 931                return;
 932        container = channel = id = lun = (u32)-1;
 933
 934        /*
 935         *      We have set this up to try and minimize the number of
 936         * re-configures that take place. As a result of this when
 937         * certain AIF's come in we will set a flag waiting for another
 938         * type of AIF before setting the re-config flag.
 939         */
 940        switch (le32_to_cpu(aifcmd->command)) {
 941        case AifCmdDriverNotify:
 942                switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
 943                case AifRawDeviceRemove:
 944                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
 945                        if ((container >> 28)) {
 946                                container = (u32)-1;
 947                                break;
 948                        }
 949                        channel = (container >> 24) & 0xF;
 950                        if (channel >= dev->maximum_num_channels) {
 951                                container = (u32)-1;
 952                                break;
 953                        }
 954                        id = container & 0xFFFF;
 955                        if (id >= dev->maximum_num_physicals) {
 956                                container = (u32)-1;
 957                                break;
 958                        }
 959                        lun = (container >> 16) & 0xFF;
 960                        container = (u32)-1;
 961                        channel = aac_phys_to_logical(channel);
 962                        device_config_needed =
 963                          (((__le32 *)aifcmd->data)[0] ==
 964                            cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
 965
 966                        if (device_config_needed == ADD) {
 967                                device = scsi_device_lookup(
 968                                        dev->scsi_host_ptr,
 969                                        channel, id, lun);
 970                                if (device) {
 971                                        scsi_remove_device(device);
 972                                        scsi_device_put(device);
 973                                }
 974                        }
 975                        break;
 976                /*
 977                 *      Morph or Expand complete
 978                 */
 979                case AifDenMorphComplete:
 980                case AifDenVolumeExtendComplete:
 981                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
 982                        if (container >= dev->maximum_num_containers)
 983                                break;
 984
 985                        /*
 986                         *      Find the scsi_device associated with the SCSI
 987                         * address. Make sure we have the right array, and if
 988                         * so set the flag to initiate a new re-config once we
 989                         * see an AifEnConfigChange AIF come through.
 990                         */
 991
 992                        if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
 993                                device = scsi_device_lookup(dev->scsi_host_ptr,
 994                                        CONTAINER_TO_CHANNEL(container),
 995                                        CONTAINER_TO_ID(container),
 996                                        CONTAINER_TO_LUN(container));
 997                                if (device) {
 998                                        dev->fsa_dev[container].config_needed = CHANGE;
 999                                        dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
1000                                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1001                                        scsi_device_put(device);
1002                                }
1003                        }
1004                }
1005
1006                /*
1007                 *      If we are waiting on something and this happens to be
1008                 * that thing then set the re-configure flag.
1009                 */
1010                if (container != (u32)-1) {
1011                        if (container >= dev->maximum_num_containers)
1012                                break;
1013                        if ((dev->fsa_dev[container].config_waiting_on ==
1014                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1015                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1016                                dev->fsa_dev[container].config_waiting_on = 0;
1017                } else for (container = 0;
1018                    container < dev->maximum_num_containers; ++container) {
1019                        if ((dev->fsa_dev[container].config_waiting_on ==
1020                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1021                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1022                                dev->fsa_dev[container].config_waiting_on = 0;
1023                }
1024                break;
1025
1026        case AifCmdEventNotify:
1027                switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1028                case AifEnBatteryEvent:
1029                        dev->cache_protected =
1030                                (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
1031                        break;
1032                /*
1033                 *      Add an Array.
1034                 */
1035                case AifEnAddContainer:
1036                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1037                        if (container >= dev->maximum_num_containers)
1038                                break;
1039                        dev->fsa_dev[container].config_needed = ADD;
1040                        dev->fsa_dev[container].config_waiting_on =
1041                                AifEnConfigChange;
1042                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1043                        break;
1044
1045                /*
1046                 *      Delete an Array.
1047                 */
1048                case AifEnDeleteContainer:
1049                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1050                        if (container >= dev->maximum_num_containers)
1051                                break;
1052                        dev->fsa_dev[container].config_needed = DELETE;
1053                        dev->fsa_dev[container].config_waiting_on =
1054                                AifEnConfigChange;
1055                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1056                        break;
1057
1058                /*
1059                 *      Container change detected. If we currently are not
1060                 * waiting on something else, setup to wait on a Config Change.
1061                 */
1062                case AifEnContainerChange:
1063                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1064                        if (container >= dev->maximum_num_containers)
1065                                break;
1066                        if (dev->fsa_dev[container].config_waiting_on &&
1067                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1068                                break;
1069                        dev->fsa_dev[container].config_needed = CHANGE;
1070                        dev->fsa_dev[container].config_waiting_on =
1071                                AifEnConfigChange;
1072                        dev->fsa_dev[container].config_waiting_stamp = jiffies;
1073                        break;
1074
1075                case AifEnConfigChange:
1076                        break;
1077
1078                case AifEnAddJBOD:
1079                case AifEnDeleteJBOD:
1080                        container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1081                        if ((container >> 28)) {
1082                                container = (u32)-1;
1083                                break;
1084                        }
1085                        channel = (container >> 24) & 0xF;
1086                        if (channel >= dev->maximum_num_channels) {
1087                                container = (u32)-1;
1088                                break;
1089                        }
1090                        id = container & 0xFFFF;
1091                        if (id >= dev->maximum_num_physicals) {
1092                                container = (u32)-1;
1093                                break;
1094                        }
1095                        lun = (container >> 16) & 0xFF;
1096                        container = (u32)-1;
1097                        channel = aac_phys_to_logical(channel);
1098                        device_config_needed =
1099                          (((__le32 *)aifcmd->data)[0] ==
1100                            cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
1101                        if (device_config_needed == ADD) {
1102                                device = scsi_device_lookup(dev->scsi_host_ptr,
1103                                        channel,
1104                                        id,
1105                                        lun);
1106                                if (device) {
1107                                        scsi_remove_device(device);
1108                                        scsi_device_put(device);
1109                                }
1110                        }
1111                        break;
1112
1113                case AifEnEnclosureManagement:
1114                        /*
1115                         * If in JBOD mode, automatic exposure of new
1116                         * physical target to be suppressed until configured.
1117                         */
1118                        if (dev->jbod)
1119                                break;
1120                        switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
1121                        case EM_DRIVE_INSERTION:
1122                        case EM_DRIVE_REMOVAL:
1123                        case EM_SES_DRIVE_INSERTION:
1124                        case EM_SES_DRIVE_REMOVAL:
1125                                container = le32_to_cpu(
1126                                        ((__le32 *)aifcmd->data)[2]);
1127                                if ((container >> 28)) {
1128                                        container = (u32)-1;
1129                                        break;
1130                                }
1131                                channel = (container >> 24) & 0xF;
1132                                if (channel >= dev->maximum_num_channels) {
1133                                        container = (u32)-1;
1134                                        break;
1135                                }
1136                                id = container & 0xFFFF;
1137                                lun = (container >> 16) & 0xFF;
1138                                container = (u32)-1;
1139                                if (id >= dev->maximum_num_physicals) {
1140                                        /* legacy dev_t ? */
1141                                        if ((0x2000 <= id) || lun || channel ||
1142                                          ((channel = (id >> 7) & 0x3F) >=
1143                                          dev->maximum_num_channels))
1144                                                break;
1145                                        lun = (id >> 4) & 7;
1146                                        id &= 0xF;
1147                                }
1148                                channel = aac_phys_to_logical(channel);
1149                                device_config_needed =
1150                                  ((((__le32 *)aifcmd->data)[3]
1151                                    == cpu_to_le32(EM_DRIVE_INSERTION)) ||
1152                                    (((__le32 *)aifcmd->data)[3]
1153                                    == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
1154                                  ADD : DELETE;
1155                                break;
1156                        }
1157                        break;
1158                }
1159
1160                /*
1161                 *      If we are waiting on something and this happens to be
1162                 * that thing then set the re-configure flag.
1163                 */
1164                if (container != (u32)-1) {
1165                        if (container >= dev->maximum_num_containers)
1166                                break;
1167                        if ((dev->fsa_dev[container].config_waiting_on ==
1168                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1169                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1170                                dev->fsa_dev[container].config_waiting_on = 0;
1171                } else for (container = 0;
1172                    container < dev->maximum_num_containers; ++container) {
1173                        if ((dev->fsa_dev[container].config_waiting_on ==
1174                            le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1175                         time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1176                                dev->fsa_dev[container].config_waiting_on = 0;
1177                }
1178                break;
1179
1180        case AifCmdJobProgress:
1181                /*
1182                 *      These are job progress AIF's. When a Clear is being
1183                 * done on a container it is initially created then hidden from
1184                 * the OS. When the clear completes we don't get a config
1185                 * change so we monitor the job status complete on a clear then
1186                 * wait for a container change.
1187                 */
1188
1189                if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1190                    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1191                     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1192                        for (container = 0;
1193                            container < dev->maximum_num_containers;
1194                            ++container) {
1195                                /*
1196                                 * Stomp on all config sequencing for all
1197                                 * containers?
1198                                 */
1199                                dev->fsa_dev[container].config_waiting_on =
1200                                        AifEnContainerChange;
1201                                dev->fsa_dev[container].config_needed = ADD;
1202                                dev->fsa_dev[container].config_waiting_stamp =
1203                                        jiffies;
1204                        }
1205                }
1206                if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1207                    ((__le32 *)aifcmd->data)[6] == 0 &&
1208                    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1209                        for (container = 0;
1210                            container < dev->maximum_num_containers;
1211                            ++container) {
1212                                /*
1213                                 * Stomp on all config sequencing for all
1214                                 * containers?
1215                                 */
1216                                dev->fsa_dev[container].config_waiting_on =
1217                                        AifEnContainerChange;
1218                                dev->fsa_dev[container].config_needed = DELETE;
1219                                dev->fsa_dev[container].config_waiting_stamp =
1220                                        jiffies;
1221                        }
1222                }
1223                break;
1224        }
1225
1226        container = 0;
1227retry_next:
1228        if (device_config_needed == NOTHING)
1229        for (; container < dev->maximum_num_containers; ++container) {
1230                if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1231                        (dev->fsa_dev[container].config_needed != NOTHING) &&
1232                        time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1233                        device_config_needed =
1234                                dev->fsa_dev[container].config_needed;
1235                        dev->fsa_dev[container].config_needed = NOTHING;
1236                        channel = CONTAINER_TO_CHANNEL(container);
1237                        id = CONTAINER_TO_ID(container);
1238                        lun = CONTAINER_TO_LUN(container);
1239                        break;
1240                }
1241        }
1242        if (device_config_needed == NOTHING)
1243                return;
1244
1245        /*
1246         *      If we decided that a re-configuration needs to be done,
1247         * schedule it here on the way out the door, please close the door
1248         * behind you.
1249         */
1250
1251        /*
1252         *      Find the scsi_device associated with the SCSI address,
1253         * and mark it as changed, invalidating the cache. This deals
1254         * with changes to existing device IDs.
1255         */
1256
1257        if (!dev || !dev->scsi_host_ptr)
1258                return;
1259        /*
1260         * force reload of disk info via aac_probe_container
1261         */
1262        if ((channel == CONTAINER_CHANNEL) &&
1263          (device_config_needed != NOTHING)) {
1264                if (dev->fsa_dev[container].valid == 1)
1265                        dev->fsa_dev[container].valid = 2;
1266                aac_probe_container(dev, container);
1267        }
1268        device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1269        if (device) {
1270                switch (device_config_needed) {
1271                case DELETE:
1272#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1273                        scsi_remove_device(device);
1274#else
1275                        if (scsi_device_online(device)) {
1276                                scsi_device_set_state(device, SDEV_OFFLINE);
1277                                sdev_printk(KERN_INFO, device,
1278                                        "Device offlined - %s\n",
1279                                        (channel == CONTAINER_CHANNEL) ?
1280                                                "array deleted" :
1281                                                "enclosure services event");
1282                        }
1283#endif
1284                        break;
1285                case ADD:
1286                        if (!scsi_device_online(device)) {
1287                                sdev_printk(KERN_INFO, device,
1288                                        "Device online - %s\n",
1289                                        (channel == CONTAINER_CHANNEL) ?
1290                                                "array created" :
1291                                                "enclosure services event");
1292                                scsi_device_set_state(device, SDEV_RUNNING);
1293                        }
1294                        /* FALLTHRU */
1295                case CHANGE:
1296                        if ((channel == CONTAINER_CHANNEL)
1297                         && (!dev->fsa_dev[container].valid)) {
1298#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1299                                scsi_remove_device(device);
1300#else
1301                                if (!scsi_device_online(device))
1302                                        break;
1303                                scsi_device_set_state(device, SDEV_OFFLINE);
1304                                sdev_printk(KERN_INFO, device,
1305                                        "Device offlined - %s\n",
1306                                        "array failed");
1307#endif
1308                                break;
1309                        }
1310                        scsi_rescan_device(&device->sdev_gendev);
1311
1312                default:
1313                        break;
1314                }
1315                scsi_device_put(device);
1316                device_config_needed = NOTHING;
1317        }
1318        if (device_config_needed == ADD)
1319                scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1320        if (channel == CONTAINER_CHANNEL) {
1321                container++;
1322                device_config_needed = NOTHING;
1323                goto retry_next;
1324        }
1325}
1326
1327static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1328{
1329        int index, quirks;
1330        int retval;
1331        struct Scsi_Host *host;
1332        struct scsi_device *dev;
1333        struct scsi_cmnd *command;
1334        struct scsi_cmnd *command_list;
1335        int jafo = 0;
1336
1337        /*
1338         * Assumptions:
1339         *      - host is locked, unless called by the aacraid thread.
1340         *        (a matter of convenience, due to legacy issues surrounding
1341         *        eh_host_adapter_reset).
1342         *      - in_reset is asserted, so no new i/o is getting to the
1343         *        card.
1344         *      - The card is dead, or will be very shortly ;-/ so no new
1345         *        commands are completing in the interrupt service.
1346         */
1347        host = aac->scsi_host_ptr;
1348        scsi_block_requests(host);
1349        aac_adapter_disable_int(aac);
1350        if (aac->thread->pid != current->pid) {
1351                spin_unlock_irq(host->host_lock);
1352                kthread_stop(aac->thread);
1353                jafo = 1;
1354        }
1355
1356        /*
1357         *      If a positive health, means in a known DEAD PANIC
1358         * state and the adapter could be reset to `try again'.
1359         */
1360        retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
1361
1362        if (retval)
1363                goto out;
1364
1365        /*
1366         *      Loop through the fibs, close the synchronous FIBS
1367         */
1368        for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1369                struct fib *fib = &aac->fibs[index];
1370                if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1371                  (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1372                        unsigned long flagv;
1373                        spin_lock_irqsave(&fib->event_lock, flagv);
1374                        up(&fib->event_wait);
1375                        spin_unlock_irqrestore(&fib->event_lock, flagv);
1376                        schedule();
1377                        retval = 0;
1378                }
1379        }
1380        /* Give some extra time for ioctls to complete. */
1381        if (retval == 0)
1382                ssleep(2);
1383        index = aac->cardtype;
1384
1385        /*
1386         * Re-initialize the adapter, first free resources, then carefully
1387         * apply the initialization sequence to come back again. Only risk
1388         * is a change in Firmware dropping cache, it is assumed the caller
1389         * will ensure that i/o is queisced and the card is flushed in that
1390         * case.
1391         */
1392        aac_fib_map_free(aac);
1393        pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1394        aac->comm_addr = NULL;
1395        aac->comm_phys = 0;
1396        kfree(aac->queues);
1397        aac->queues = NULL;
1398        aac_free_irq(aac);
1399        kfree(aac->fsa_dev);
1400        aac->fsa_dev = NULL;
1401        quirks = aac_get_driver_ident(index)->quirks;
1402        if (quirks & AAC_QUIRK_31BIT) {
1403                if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
1404                  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
1405                        goto out;
1406        } else {
1407                if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
1408                  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
1409                        goto out;
1410        }
1411        if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1412                goto out;
1413        if (quirks & AAC_QUIRK_31BIT)
1414                if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
1415                        goto out;
1416        if (jafo) {
1417                aac->thread = kthread_run(aac_command_thread, aac, "%s",
1418                                          aac->name);
1419                if (IS_ERR(aac->thread)) {
1420                        retval = PTR_ERR(aac->thread);
1421                        goto out;
1422                }
1423        }
1424        (void)aac_get_adapter_info(aac);
1425        if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1426                host->sg_tablesize = 34;
1427                host->max_sectors = (host->sg_tablesize * 8) + 112;
1428        }
1429        if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1430                host->sg_tablesize = 17;
1431                host->max_sectors = (host->sg_tablesize * 8) + 112;
1432        }
1433        aac_get_config_status(aac, 1);
1434        aac_get_containers(aac);
1435        /*
1436         * This is where the assumption that the Adapter is quiesced
1437         * is important.
1438         */
1439        command_list = NULL;
1440        __shost_for_each_device(dev, host) {
1441                unsigned long flags;
1442                spin_lock_irqsave(&dev->list_lock, flags);
1443                list_for_each_entry(command, &dev->cmd_list, list)
1444                        if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1445                                command->SCp.buffer = (struct scatterlist *)command_list;
1446                                command_list = command;
1447                        }
1448                spin_unlock_irqrestore(&dev->list_lock, flags);
1449        }
1450        while ((command = command_list)) {
1451                command_list = (struct scsi_cmnd *)command->SCp.buffer;
1452                command->SCp.buffer = NULL;
1453                command->result = DID_OK << 16
1454                  | COMMAND_COMPLETE << 8
1455                  | SAM_STAT_TASK_SET_FULL;
1456                command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1457                command->scsi_done(command);
1458        }
1459        retval = 0;
1460
1461out:
1462        aac->in_reset = 0;
1463        scsi_unblock_requests(host);
1464        if (jafo) {
1465                spin_lock_irq(host->host_lock);
1466        }
1467        return retval;
1468}
1469
1470int aac_reset_adapter(struct aac_dev * aac, int forced)
1471{
1472        unsigned long flagv = 0;
1473        int retval;
1474        struct Scsi_Host * host;
1475
1476        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1477                return -EBUSY;
1478
1479        if (aac->in_reset) {
1480                spin_unlock_irqrestore(&aac->fib_lock, flagv);
1481                return -EBUSY;
1482        }
1483        aac->in_reset = 1;
1484        spin_unlock_irqrestore(&aac->fib_lock, flagv);
1485
1486        /*
1487         * Wait for all commands to complete to this specific
1488         * target (block maximum 60 seconds). Although not necessary,
1489         * it does make us a good storage citizen.
1490         */
1491        host = aac->scsi_host_ptr;
1492        scsi_block_requests(host);
1493        if (forced < 2) for (retval = 60; retval; --retval) {
1494                struct scsi_device * dev;
1495                struct scsi_cmnd * command;
1496                int active = 0;
1497
1498                __shost_for_each_device(dev, host) {
1499                        spin_lock_irqsave(&dev->list_lock, flagv);
1500                        list_for_each_entry(command, &dev->cmd_list, list) {
1501                                if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1502                                        active++;
1503                                        break;
1504                                }
1505                        }
1506                        spin_unlock_irqrestore(&dev->list_lock, flagv);
1507                        if (active)
1508                                break;
1509
1510                }
1511                /*
1512                 * We can exit If all the commands are complete
1513                 */
1514                if (active == 0)
1515                        break;
1516                ssleep(1);
1517        }
1518
1519        /* Quiesce build, flush cache, write through mode */
1520        if (forced < 2)
1521                aac_send_shutdown(aac);
1522        spin_lock_irqsave(host->host_lock, flagv);
1523        retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
1524        spin_unlock_irqrestore(host->host_lock, flagv);
1525
1526        if ((forced < 2) && (retval == -ENODEV)) {
1527                /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1528                struct fib * fibctx = aac_fib_alloc(aac);
1529                if (fibctx) {
1530                        struct aac_pause *cmd;
1531                        int status;
1532
1533                        aac_fib_init(fibctx);
1534
1535                        cmd = (struct aac_pause *) fib_data(fibctx);
1536
1537                        cmd->command = cpu_to_le32(VM_ContainerConfig);
1538                        cmd->type = cpu_to_le32(CT_PAUSE_IO);
1539                        cmd->timeout = cpu_to_le32(1);
1540                        cmd->min = cpu_to_le32(1);
1541                        cmd->noRescan = cpu_to_le32(1);
1542                        cmd->count = cpu_to_le32(0);
1543
1544                        status = aac_fib_send(ContainerCommand,
1545                          fibctx,
1546                          sizeof(struct aac_pause),
1547                          FsaNormal,
1548                          -2 /* Timeout silently */, 1,
1549                          NULL, NULL);
1550
1551                        if (status >= 0)
1552                                aac_fib_complete(fibctx);
1553                        /* FIB should be freed only after getting
1554                         * the response from the F/W */
1555                        if (status != -ERESTARTSYS)
1556                                aac_fib_free(fibctx);
1557                }
1558        }
1559
1560        return retval;
1561}
1562
1563int aac_check_health(struct aac_dev * aac)
1564{
1565        int BlinkLED;
1566        unsigned long time_now, flagv = 0;
1567        struct list_head * entry;
1568        struct Scsi_Host * host;
1569
1570        /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1571        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1572                return 0;
1573
1574        if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1575                spin_unlock_irqrestore(&aac->fib_lock, flagv);
1576                return 0; /* OK */
1577        }
1578
1579        aac->in_reset = 1;
1580
1581        /* Fake up an AIF:
1582         *      aac_aifcmd.command = AifCmdEventNotify = 1
1583         *      aac_aifcmd.seqnum = 0xFFFFFFFF
1584         *      aac_aifcmd.data[0] = AifEnExpEvent = 23
1585         *      aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1586         *      aac.aifcmd.data[2] = AifHighPriority = 3
1587         *      aac.aifcmd.data[3] = BlinkLED
1588         */
1589
1590        time_now = jiffies/HZ;
1591        entry = aac->fib_list.next;
1592
1593        /*
1594         * For each Context that is on the
1595         * fibctxList, make a copy of the
1596         * fib, and then set the event to wake up the
1597         * thread that is waiting for it.
1598         */
1599        while (entry != &aac->fib_list) {
1600                /*
1601                 * Extract the fibctx
1602                 */
1603                struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1604                struct hw_fib * hw_fib;
1605                struct fib * fib;
1606                /*
1607                 * Check if the queue is getting
1608                 * backlogged
1609                 */
1610                if (fibctx->count > 20) {
1611                        /*
1612                         * It's *not* jiffies folks,
1613                         * but jiffies / HZ, so do not
1614                         * panic ...
1615                         */
1616                        u32 time_last = fibctx->jiffies;
1617                        /*
1618                         * Has it been > 2 minutes
1619                         * since the last read off
1620                         * the queue?
1621                         */
1622                        if ((time_now - time_last) > aif_timeout) {
1623                                entry = entry->next;
1624                                aac_close_fib_context(aac, fibctx);
1625                                continue;
1626                        }
1627                }
1628                /*
1629                 * Warning: no sleep allowed while
1630                 * holding spinlock
1631                 */
1632                hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1633                fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1634                if (fib && hw_fib) {
1635                        struct aac_aifcmd * aif;
1636
1637                        fib->hw_fib_va = hw_fib;
1638                        fib->dev = aac;
1639                        aac_fib_init(fib);
1640                        fib->type = FSAFS_NTC_FIB_CONTEXT;
1641                        fib->size = sizeof (struct fib);
1642                        fib->data = hw_fib->data;
1643                        aif = (struct aac_aifcmd *)hw_fib->data;
1644                        aif->command = cpu_to_le32(AifCmdEventNotify);
1645                        aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1646                        ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1647                        ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1648                        ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1649                        ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1650
1651                        /*
1652                         * Put the FIB onto the
1653                         * fibctx's fibs
1654                         */
1655                        list_add_tail(&fib->fiblink, &fibctx->fib_list);
1656                        fibctx->count++;
1657                        /*
1658                         * Set the event to wake up the
1659                         * thread that will waiting.
1660                         */
1661                        up(&fibctx->wait_sem);
1662                } else {
1663                        printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1664                        kfree(fib);
1665                        kfree(hw_fib);
1666                }
1667                entry = entry->next;
1668        }
1669
1670        spin_unlock_irqrestore(&aac->fib_lock, flagv);
1671
1672        if (BlinkLED < 0) {
1673                printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1674                goto out;
1675        }
1676
1677        printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1678
1679        if (!aac_check_reset || ((aac_check_reset == 1) &&
1680                (aac->supplement_adapter_info.SupportedOptions2 &
1681                        AAC_OPTION_IGNORE_RESET)))
1682                goto out;
1683        host = aac->scsi_host_ptr;
1684        if (aac->thread->pid != current->pid)
1685                spin_lock_irqsave(host->host_lock, flagv);
1686        BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
1687        if (aac->thread->pid != current->pid)
1688                spin_unlock_irqrestore(host->host_lock, flagv);
1689        return BlinkLED;
1690
1691out:
1692        aac->in_reset = 0;
1693        return BlinkLED;
1694}
1695
1696
1697/**
1698 *      aac_command_thread      -       command processing thread
1699 *      @dev: Adapter to monitor
1700 *
1701 *      Waits on the commandready event in it's queue. When the event gets set
1702 *      it will pull FIBs off it's queue. It will continue to pull FIBs off
1703 *      until the queue is empty. When the queue is empty it will wait for
1704 *      more FIBs.
1705 */
1706
1707int aac_command_thread(void *data)
1708{
1709        struct aac_dev *dev = data;
1710        struct hw_fib *hw_fib, *hw_newfib;
1711        struct fib *fib, *newfib;
1712        struct aac_fib_context *fibctx;
1713        unsigned long flags;
1714        DECLARE_WAITQUEUE(wait, current);
1715        unsigned long next_jiffies = jiffies + HZ;
1716        unsigned long next_check_jiffies = next_jiffies;
1717        long difference = HZ;
1718
1719        /*
1720         *      We can only have one thread per adapter for AIF's.
1721         */
1722        if (dev->aif_thread)
1723                return -EINVAL;
1724
1725        /*
1726         *      Let the DPC know it has a place to send the AIF's to.
1727         */
1728        dev->aif_thread = 1;
1729        add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1730        set_current_state(TASK_INTERRUPTIBLE);
1731        dprintk ((KERN_INFO "aac_command_thread start\n"));
1732        while (1) {
1733                spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1734                while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1735                        struct list_head *entry;
1736                        struct aac_aifcmd * aifcmd;
1737
1738                        set_current_state(TASK_RUNNING);
1739
1740                        entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1741                        list_del(entry);
1742
1743                        spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1744                        fib = list_entry(entry, struct fib, fiblink);
1745                        /*
1746                         *      We will process the FIB here or pass it to a
1747                         *      worker thread that is TBD. We Really can't
1748                         *      do anything at this point since we don't have
1749                         *      anything defined for this thread to do.
1750                         */
1751                        hw_fib = fib->hw_fib_va;
1752                        memset(fib, 0, sizeof(struct fib));
1753                        fib->type = FSAFS_NTC_FIB_CONTEXT;
1754                        fib->size = sizeof(struct fib);
1755                        fib->hw_fib_va = hw_fib;
1756                        fib->data = hw_fib->data;
1757                        fib->dev = dev;
1758                        /*
1759                         *      We only handle AifRequest fibs from the adapter.
1760                         */
1761                        aifcmd = (struct aac_aifcmd *) hw_fib->data;
1762                        if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1763                                /* Handle Driver Notify Events */
1764                                aac_handle_aif(dev, fib);
1765                                *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1766                                aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1767                        } else {
1768                                /* The u32 here is important and intended. We are using
1769                                   32bit wrapping time to fit the adapter field */
1770
1771                                u32 time_now, time_last;
1772                                unsigned long flagv;
1773                                unsigned num;
1774                                struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1775                                struct fib ** fib_pool, ** fib_p;
1776
1777                                /* Sniff events */
1778                                if ((aifcmd->command ==
1779                                     cpu_to_le32(AifCmdEventNotify)) ||
1780                                    (aifcmd->command ==
1781                                     cpu_to_le32(AifCmdJobProgress))) {
1782                                        aac_handle_aif(dev, fib);
1783                                }
1784
1785                                time_now = jiffies/HZ;
1786
1787                                /*
1788                                 * Warning: no sleep allowed while
1789                                 * holding spinlock. We take the estimate
1790                                 * and pre-allocate a set of fibs outside the
1791                                 * lock.
1792                                 */
1793                                num = le32_to_cpu(dev->init->AdapterFibsSize)
1794                                    / sizeof(struct hw_fib); /* some extra */
1795                                spin_lock_irqsave(&dev->fib_lock, flagv);
1796                                entry = dev->fib_list.next;
1797                                while (entry != &dev->fib_list) {
1798                                        entry = entry->next;
1799                                        ++num;
1800                                }
1801                                spin_unlock_irqrestore(&dev->fib_lock, flagv);
1802                                hw_fib_pool = NULL;
1803                                fib_pool = NULL;
1804                                if (num
1805                                 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1806                                 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1807                                        hw_fib_p = hw_fib_pool;
1808                                        fib_p = fib_pool;
1809                                        while (hw_fib_p < &hw_fib_pool[num]) {
1810                                                if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1811                                                        --hw_fib_p;
1812                                                        break;
1813                                                }
1814                                                if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1815                                                        kfree(*(--hw_fib_p));
1816                                                        break;
1817                                                }
1818                                        }
1819                                        if ((num = hw_fib_p - hw_fib_pool) == 0) {
1820                                                kfree(fib_pool);
1821                                                fib_pool = NULL;
1822                                                kfree(hw_fib_pool);
1823                                                hw_fib_pool = NULL;
1824                                        }
1825                                } else {
1826                                        kfree(hw_fib_pool);
1827                                        hw_fib_pool = NULL;
1828                                }
1829                                spin_lock_irqsave(&dev->fib_lock, flagv);
1830                                entry = dev->fib_list.next;
1831                                /*
1832                                 * For each Context that is on the
1833                                 * fibctxList, make a copy of the
1834                                 * fib, and then set the event to wake up the
1835                                 * thread that is waiting for it.
1836                                 */
1837                                hw_fib_p = hw_fib_pool;
1838                                fib_p = fib_pool;
1839                                while (entry != &dev->fib_list) {
1840                                        /*
1841                                         * Extract the fibctx
1842                                         */
1843                                        fibctx = list_entry(entry, struct aac_fib_context, next);
1844                                        /*
1845                                         * Check if the queue is getting
1846                                         * backlogged
1847                                         */
1848                                        if (fibctx->count > 20)
1849                                        {
1850                                                /*
1851                                                 * It's *not* jiffies folks,
1852                                                 * but jiffies / HZ so do not
1853                                                 * panic ...
1854                                                 */
1855                                                time_last = fibctx->jiffies;
1856                                                /*
1857                                                 * Has it been > 2 minutes
1858                                                 * since the last read off
1859                                                 * the queue?
1860                                                 */
1861                                                if ((time_now - time_last) > aif_timeout) {
1862                                                        entry = entry->next;
1863                                                        aac_close_fib_context(dev, fibctx);
1864                                                        continue;
1865                                                }
1866                                        }
1867                                        /*
1868                                         * Warning: no sleep allowed while
1869                                         * holding spinlock
1870                                         */
1871                                        if (hw_fib_p < &hw_fib_pool[num]) {
1872                                                hw_newfib = *hw_fib_p;
1873                                                *(hw_fib_p++) = NULL;
1874                                                newfib = *fib_p;
1875                                                *(fib_p++) = NULL;
1876                                                /*
1877                                                 * Make the copy of the FIB
1878                                                 */
1879                                                memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1880                                                memcpy(newfib, fib, sizeof(struct fib));
1881                                                newfib->hw_fib_va = hw_newfib;
1882                                                /*
1883                                                 * Put the FIB onto the
1884                                                 * fibctx's fibs
1885                                                 */
1886                                                list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1887                                                fibctx->count++;
1888                                                /*
1889                                                 * Set the event to wake up the
1890                                                 * thread that is waiting.
1891                                                 */
1892                                                up(&fibctx->wait_sem);
1893                                        } else {
1894                                                printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1895                                        }
1896                                        entry = entry->next;
1897                                }
1898                                /*
1899                                 *      Set the status of this FIB
1900                                 */
1901                                *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1902                                aac_fib_adapter_complete(fib, sizeof(u32));
1903                                spin_unlock_irqrestore(&dev->fib_lock, flagv);
1904                                /* Free up the remaining resources */
1905                                hw_fib_p = hw_fib_pool;
1906                                fib_p = fib_pool;
1907                                while (hw_fib_p < &hw_fib_pool[num]) {
1908                                        kfree(*hw_fib_p);
1909                                        kfree(*fib_p);
1910                                        ++fib_p;
1911                                        ++hw_fib_p;
1912                                }
1913                                kfree(hw_fib_pool);
1914                                kfree(fib_pool);
1915                        }
1916                        kfree(fib);
1917                        spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1918                }
1919                /*
1920                 *      There are no more AIF's
1921                 */
1922                spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1923
1924                /*
1925                 *      Background activity
1926                 */
1927                if ((time_before(next_check_jiffies,next_jiffies))
1928                 && ((difference = next_check_jiffies - jiffies) <= 0)) {
1929                        next_check_jiffies = next_jiffies;
1930                        if (aac_check_health(dev) == 0) {
1931                                difference = ((long)(unsigned)check_interval)
1932                                           * HZ;
1933                                next_check_jiffies = jiffies + difference;
1934                        } else if (!dev->queues)
1935                                break;
1936                }
1937                if (!time_before(next_check_jiffies,next_jiffies)
1938                 && ((difference = next_jiffies - jiffies) <= 0)) {
1939                        struct timeval now;
1940                        int ret;
1941
1942                        /* Don't even try to talk to adapter if its sick */
1943                        ret = aac_check_health(dev);
1944                        if (!ret && !dev->queues)
1945                                break;
1946                        next_check_jiffies = jiffies
1947                                           + ((long)(unsigned)check_interval)
1948                                           * HZ;
1949                        do_gettimeofday(&now);
1950
1951                        /* Synchronize our watches */
1952                        if (((1000000 - (1000000 / HZ)) > now.tv_usec)
1953                         && (now.tv_usec > (1000000 / HZ)))
1954                                difference = (((1000000 - now.tv_usec) * HZ)
1955                                  + 500000) / 1000000;
1956                        else if (ret == 0) {
1957                                struct fib *fibptr;
1958
1959                                if ((fibptr = aac_fib_alloc(dev))) {
1960                                        int status;
1961                                        __le32 *info;
1962
1963                                        aac_fib_init(fibptr);
1964
1965                                        info = (__le32 *) fib_data(fibptr);
1966                                        if (now.tv_usec > 500000)
1967                                                ++now.tv_sec;
1968
1969                                        *info = cpu_to_le32(now.tv_sec);
1970
1971                                        status = aac_fib_send(SendHostTime,
1972                                                fibptr,
1973                                                sizeof(*info),
1974                                                FsaNormal,
1975                                                1, 1,
1976                                                NULL,
1977                                                NULL);
1978                                        /* Do not set XferState to zero unless
1979                                         * receives a response from F/W */
1980                                        if (status >= 0)
1981                                                aac_fib_complete(fibptr);
1982                                        /* FIB should be freed only after
1983                                         * getting the response from the F/W */
1984                                        if (status != -ERESTARTSYS)
1985                                                aac_fib_free(fibptr);
1986                                }
1987                                difference = (long)(unsigned)update_interval*HZ;
1988                        } else {
1989                                /* retry shortly */
1990                                difference = 10 * HZ;
1991                        }
1992                        next_jiffies = jiffies + difference;
1993                        if (time_before(next_check_jiffies,next_jiffies))
1994                                difference = next_check_jiffies - jiffies;
1995                }
1996                if (difference <= 0)
1997                        difference = 1;
1998                set_current_state(TASK_INTERRUPTIBLE);
1999                schedule_timeout(difference);
2000
2001                if (kthread_should_stop())
2002                        break;
2003        }
2004        if (dev->queues)
2005                remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2006        dev->aif_thread = 0;
2007        return 0;
2008}
2009
2010int aac_acquire_irq(struct aac_dev *dev)
2011{
2012        int i;
2013        int j;
2014        int ret = 0;
2015        int cpu;
2016
2017        cpu = cpumask_first(cpu_online_mask);
2018        if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
2019                for (i = 0; i < dev->max_msix; i++) {
2020                        dev->aac_msix[i].vector_no = i;
2021                        dev->aac_msix[i].dev = dev;
2022                        if (request_irq(dev->msixentry[i].vector,
2023                                        dev->a_ops.adapter_intr,
2024                                        0, "aacraid", &(dev->aac_msix[i]))) {
2025                                printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
2026                                                dev->name, dev->id, i);
2027                                for (j = 0 ; j < i ; j++)
2028                                        free_irq(dev->msixentry[j].vector,
2029                                                 &(dev->aac_msix[j]));
2030                                pci_disable_msix(dev->pdev);
2031                                ret = -1;
2032                        }
2033                        if (irq_set_affinity_hint(dev->msixentry[i].vector,
2034                                                        get_cpu_mask(cpu))) {
2035                                printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
2036                                            dev->name, dev->id, cpu);
2037                        }
2038                        cpu = cpumask_next(cpu, cpu_online_mask);
2039                }
2040        } else {
2041                dev->aac_msix[0].vector_no = 0;
2042                dev->aac_msix[0].dev = dev;
2043
2044                if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
2045                        IRQF_SHARED, "aacraid",
2046                        &(dev->aac_msix[0])) < 0) {
2047                        if (dev->msi)
2048                                pci_disable_msi(dev->pdev);
2049                        printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
2050                                        dev->name, dev->id);
2051                        ret = -1;
2052                }
2053        }
2054        return ret;
2055}
2056
2057void aac_free_irq(struct aac_dev *dev)
2058{
2059        int i;
2060        int cpu;
2061
2062        cpu = cpumask_first(cpu_online_mask);
2063        if (dev->pdev->device == PMC_DEVICE_S6 ||
2064            dev->pdev->device == PMC_DEVICE_S7 ||
2065            dev->pdev->device == PMC_DEVICE_S8 ||
2066            dev->pdev->device == PMC_DEVICE_S9) {
2067                if (dev->max_msix > 1) {
2068                        for (i = 0; i < dev->max_msix; i++) {
2069                                if (irq_set_affinity_hint(
2070                                        dev->msixentry[i].vector, NULL)) {
2071                                        printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
2072                                            dev->name, dev->id, cpu);
2073                                }
2074                                cpu = cpumask_next(cpu, cpu_online_mask);
2075                                free_irq(dev->msixentry[i].vector,
2076                                                &(dev->aac_msix[i]));
2077                        }
2078                } else {
2079                        free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
2080                }
2081        } else {
2082                free_irq(dev->pdev->irq, dev);
2083        }
2084        if (dev->msi)
2085                pci_disable_msi(dev->pdev);
2086        else if (dev->max_msix > 1)
2087                pci_disable_msix(dev->pdev);
2088}
2089