linux/drivers/scsi/aacraid/dpcsup.c
<<
>>
Prefs
   1/*
   2 *      Adaptec AAC series RAID controller driver
   3 *      (c) Copyright 2001 Red Hat Inc.
   4 *
   5 * based on the old aacraid driver that is..
   6 * Adaptec aacraid device driver for Linux.
   7 *
   8 * Copyright (c) 2000-2010 Adaptec, Inc.
   9 *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; see the file COPYING.  If not, write to
  23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24 *
  25 * Module Name:
  26 *  dpcsup.c
  27 *
  28 * Abstract: All DPC processing routines for the cyclone board occur here.
  29 *
  30 *
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/init.h>
  35#include <linux/types.h>
  36#include <linux/spinlock.h>
  37#include <linux/slab.h>
  38#include <linux/completion.h>
  39#include <linux/blkdev.h>
  40#include <linux/semaphore.h>
  41
  42#include "aacraid.h"
  43
  44/**
  45 *      aac_response_normal     -       Handle command replies
  46 *      @q: Queue to read from
  47 *
  48 *      This DPC routine will be run when the adapter interrupts us to let us
  49 *      know there is a response on our normal priority queue. We will pull off
  50 *      all QE there are and wake up all the waiters before exiting. We will
  51 *      take a spinlock out on the queue before operating on it.
  52 */
  53
  54unsigned int aac_response_normal(struct aac_queue * q)
  55{
  56        struct aac_dev * dev = q->dev;
  57        struct aac_entry *entry;
  58        struct hw_fib * hwfib;
  59        struct fib * fib;
  60        int consumed = 0;
  61        unsigned long flags, mflags;
  62
  63        spin_lock_irqsave(q->lock, flags);
  64        /*
  65         *      Keep pulling response QEs off the response queue and waking
  66         *      up the waiters until there are no more QEs. We then return
  67         *      back to the system. If no response was requesed we just
  68         *      deallocate the Fib here and continue.
  69         */
  70        while(aac_consumer_get(dev, q, &entry))
  71        {
  72                int fast;
  73                u32 index = le32_to_cpu(entry->addr);
  74                fast = index & 0x01;
  75                fib = &dev->fibs[index >> 2];
  76                hwfib = fib->hw_fib_va;
  77                
  78                aac_consumer_free(dev, q, HostNormRespQueue);
  79                /*
  80                 *      Remove this fib from the Outstanding I/O queue.
  81                 *      But only if it has not already been timed out.
  82                 *
  83                 *      If the fib has been timed out already, then just 
  84                 *      continue. The caller has already been notified that
  85                 *      the fib timed out.
  86                 */
  87                dev->queues->queue[AdapNormCmdQueue].numpending--;
  88
  89                if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
  90                        spin_unlock_irqrestore(q->lock, flags);
  91                        aac_fib_complete(fib);
  92                        aac_fib_free(fib);
  93                        spin_lock_irqsave(q->lock, flags);
  94                        continue;
  95                }
  96                spin_unlock_irqrestore(q->lock, flags);
  97
  98                if (fast) {
  99                        /*
 100                         *      Doctor the fib
 101                         */
 102                        *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
 103                        hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
 104                }
 105
 106                FIB_COUNTER_INCREMENT(aac_config.FibRecved);
 107
 108                if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
 109                {
 110                        __le32 *pstatus = (__le32 *)hwfib->data;
 111                        if (*pstatus & cpu_to_le32(0xffff0000))
 112                                *pstatus = cpu_to_le32(ST_OK);
 113                }
 114                if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 
 115                {
 116                        if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
 117                                FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
 118                        else 
 119                                FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
 120                        /*
 121                         *      NOTE:  we cannot touch the fib after this
 122                         *          call, because it may have been deallocated.
 123                         */
 124                        fib->flags = 0;
 125                        fib->callback(fib->callback_data, fib);
 126                } else {
 127                        unsigned long flagv;
 128                        spin_lock_irqsave(&fib->event_lock, flagv);
 129                        if (!fib->done) {
 130                                fib->done = 1;
 131                                up(&fib->event_wait);
 132                        }
 133                        spin_unlock_irqrestore(&fib->event_lock, flagv);
 134
 135                        spin_lock_irqsave(&dev->manage_lock, mflags);
 136                        dev->management_fib_count--;
 137                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 138
 139                        FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
 140                        if (fib->done == 2) {
 141                                spin_lock_irqsave(&fib->event_lock, flagv);
 142                                fib->done = 0;
 143                                spin_unlock_irqrestore(&fib->event_lock, flagv);
 144                                aac_fib_complete(fib);
 145                                aac_fib_free(fib);
 146                        }
 147                }
 148                consumed++;
 149                spin_lock_irqsave(q->lock, flags);
 150        }
 151
 152        if (consumed > aac_config.peak_fibs)
 153                aac_config.peak_fibs = consumed;
 154        if (consumed == 0) 
 155                aac_config.zero_fibs++;
 156
 157        spin_unlock_irqrestore(q->lock, flags);
 158        return 0;
 159}
 160
 161
 162/**
 163 *      aac_command_normal      -       handle commands
 164 *      @q: queue to process
 165 *
 166 *      This DPC routine will be queued when the adapter interrupts us to 
 167 *      let us know there is a command on our normal priority queue. We will 
 168 *      pull off all QE there are and wake up all the waiters before exiting.
 169 *      We will take a spinlock out on the queue before operating on it.
 170 */
 171 
 172unsigned int aac_command_normal(struct aac_queue *q)
 173{
 174        struct aac_dev * dev = q->dev;
 175        struct aac_entry *entry;
 176        unsigned long flags;
 177
 178        spin_lock_irqsave(q->lock, flags);
 179
 180        /*
 181         *      Keep pulling response QEs off the response queue and waking
 182         *      up the waiters until there are no more QEs. We then return
 183         *      back to the system.
 184         */
 185        while(aac_consumer_get(dev, q, &entry))
 186        {
 187                struct fib fibctx;
 188                struct hw_fib * hw_fib;
 189                u32 index;
 190                struct fib *fib = &fibctx;
 191                
 192                index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
 193                hw_fib = &dev->aif_base_va[index];
 194                
 195                /*
 196                 *      Allocate a FIB at all costs. For non queued stuff
 197                 *      we can just use the stack so we are happy. We need
 198                 *      a fib object in order to manage the linked lists
 199                 */
 200                if (dev->aif_thread)
 201                        if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
 202                                fib = &fibctx;
 203                
 204                memset(fib, 0, sizeof(struct fib));
 205                INIT_LIST_HEAD(&fib->fiblink);
 206                fib->type = FSAFS_NTC_FIB_CONTEXT;
 207                fib->size = sizeof(struct fib);
 208                fib->hw_fib_va = hw_fib;
 209                fib->data = hw_fib->data;
 210                fib->dev = dev;
 211                
 212                                
 213                if (dev->aif_thread && fib != &fibctx) {
 214                        list_add_tail(&fib->fiblink, &q->cmdq);
 215                        aac_consumer_free(dev, q, HostNormCmdQueue);
 216                        wake_up_interruptible(&q->cmdready);
 217                } else {
 218                        aac_consumer_free(dev, q, HostNormCmdQueue);
 219                        spin_unlock_irqrestore(q->lock, flags);
 220                        /*
 221                         *      Set the status of this FIB
 222                         */
 223                        *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
 224                        aac_fib_adapter_complete(fib, sizeof(u32));
 225                        spin_lock_irqsave(q->lock, flags);
 226                }               
 227        }
 228        spin_unlock_irqrestore(q->lock, flags);
 229        return 0;
 230}
 231
 232/*
 233 *
 234 * aac_aif_callback
 235 * @context: the context set in the fib - here it is scsi cmd
 236 * @fibptr: pointer to the fib
 237 *
 238 * Handles the AIFs - new method (SRC)
 239 *
 240 */
 241
 242static void aac_aif_callback(void *context, struct fib * fibptr)
 243{
 244        struct fib *fibctx;
 245        struct aac_dev *dev;
 246        struct aac_aifcmd *cmd;
 247        int status;
 248
 249        fibctx = (struct fib *)context;
 250        BUG_ON(fibptr == NULL);
 251        dev = fibptr->dev;
 252
 253        if (fibptr->hw_fib_va->header.XferState &
 254            cpu_to_le32(NoMoreAifDataAvailable)) {
 255                aac_fib_complete(fibptr);
 256                aac_fib_free(fibptr);
 257                return;
 258        }
 259
 260        aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
 261
 262        aac_fib_init(fibctx);
 263        cmd = (struct aac_aifcmd *) fib_data(fibctx);
 264        cmd->command = cpu_to_le32(AifReqEvent);
 265
 266        status = aac_fib_send(AifRequest,
 267                fibctx,
 268                sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
 269                FsaNormal,
 270                0, 1,
 271                (fib_callback)aac_aif_callback, fibctx);
 272}
 273
 274
 275/**
 276 *      aac_intr_normal -       Handle command replies
 277 *      @dev: Device
 278 *      @index: completion reference
 279 *
 280 *      This DPC routine will be run when the adapter interrupts us to let us
 281 *      know there is a response on our normal priority queue. We will pull off
 282 *      all QE there are and wake up all the waiters before exiting.
 283 */
 284unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
 285                        int isAif, int isFastResponse, struct hw_fib *aif_fib)
 286{
 287        unsigned long mflags;
 288        dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
 289        if (isAif == 1) {       /* AIF - common */
 290                struct hw_fib * hw_fib;
 291                struct fib * fib;
 292                struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
 293                unsigned long flags;
 294
 295                /*
 296                 *      Allocate a FIB. For non queued stuff we can just use
 297                 * the stack so we are happy. We need a fib object in order to
 298                 * manage the linked lists.
 299                 */
 300                if ((!dev->aif_thread)
 301                 || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
 302                        return 1;
 303                if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
 304                        kfree (fib);
 305                        return 1;
 306                }
 307                if (aif_fib != NULL) {
 308                        memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
 309                } else {
 310                        memcpy(hw_fib,
 311                                (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
 312                                index), sizeof(struct hw_fib));
 313                }
 314                INIT_LIST_HEAD(&fib->fiblink);
 315                fib->type = FSAFS_NTC_FIB_CONTEXT;
 316                fib->size = sizeof(struct fib);
 317                fib->hw_fib_va = hw_fib;
 318                fib->data = hw_fib->data;
 319                fib->dev = dev;
 320        
 321                spin_lock_irqsave(q->lock, flags);
 322                list_add_tail(&fib->fiblink, &q->cmdq);
 323                wake_up_interruptible(&q->cmdready);
 324                spin_unlock_irqrestore(q->lock, flags);
 325                return 1;
 326        } else if (isAif == 2) {        /* AIF - new (SRC) */
 327                struct fib *fibctx;
 328                struct aac_aifcmd *cmd;
 329
 330                fibctx = aac_fib_alloc(dev);
 331                if (!fibctx)
 332                        return 1;
 333                aac_fib_init(fibctx);
 334
 335                cmd = (struct aac_aifcmd *) fib_data(fibctx);
 336                cmd->command = cpu_to_le32(AifReqEvent);
 337
 338                return aac_fib_send(AifRequest,
 339                        fibctx,
 340                        sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
 341                        FsaNormal,
 342                        0, 1,
 343                        (fib_callback)aac_aif_callback, fibctx);
 344        } else {
 345                struct fib *fib = &dev->fibs[index];
 346                struct hw_fib * hwfib = fib->hw_fib_va;
 347
 348                /*
 349                 *      Remove this fib from the Outstanding I/O queue.
 350                 *      But only if it has not already been timed out.
 351                 *
 352                 *      If the fib has been timed out already, then just 
 353                 *      continue. The caller has already been notified that
 354                 *      the fib timed out.
 355                 */
 356                dev->queues->queue[AdapNormCmdQueue].numpending--;
 357
 358                if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
 359                        aac_fib_complete(fib);
 360                        aac_fib_free(fib);
 361                        return 0;
 362                }
 363
 364                if (isFastResponse) {
 365                        /*
 366                         *      Doctor the fib
 367                         */
 368                        *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
 369                        hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
 370                }
 371
 372                FIB_COUNTER_INCREMENT(aac_config.FibRecved);
 373
 374                if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
 375                {
 376                        __le32 *pstatus = (__le32 *)hwfib->data;
 377                        if (*pstatus & cpu_to_le32(0xffff0000))
 378                                *pstatus = cpu_to_le32(ST_OK);
 379                }
 380                if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 
 381                {
 382                        if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
 383                                FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
 384                        else 
 385                                FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
 386                        /*
 387                         *      NOTE:  we cannot touch the fib after this
 388                         *          call, because it may have been deallocated.
 389                         */
 390                        fib->flags = 0;
 391                        fib->callback(fib->callback_data, fib);
 392                } else {
 393                        unsigned long flagv;
 394                        dprintk((KERN_INFO "event_wait up\n"));
 395                        spin_lock_irqsave(&fib->event_lock, flagv);
 396                        if (!fib->done) {
 397                                fib->done = 1;
 398                                up(&fib->event_wait);
 399                        }
 400                        spin_unlock_irqrestore(&fib->event_lock, flagv);
 401
 402                        spin_lock_irqsave(&dev->manage_lock, mflags);
 403                        dev->management_fib_count--;
 404                        spin_unlock_irqrestore(&dev->manage_lock, mflags);
 405
 406                        FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
 407                        if (fib->done == 2) {
 408                                spin_lock_irqsave(&fib->event_lock, flagv);
 409                                fib->done = 0;
 410                                spin_unlock_irqrestore(&fib->event_lock, flagv);
 411                                aac_fib_complete(fib);
 412                                aac_fib_free(fib);
 413                        }
 414
 415                }
 416                return 0;
 417        }
 418}
 419