linux/drivers/scsi/aacraid/rx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *      Adaptec AAC series RAID controller driver
   4 *      (c) Copyright 2001 Red Hat Inc.
   5 *
   6 * based on the old aacraid driver that is..
   7 * Adaptec aacraid device driver for Linux.
   8 *
   9 * Copyright (c) 2000-2010 Adaptec, Inc.
  10 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  11 *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  12 *
  13 * Module Name:
  14 *  rx.c
  15 *
  16 * Abstract: Hardware miniport for Drawbridge specific hardware functions.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/init.h>
  21#include <linux/types.h>
  22#include <linux/pci.h>
  23#include <linux/spinlock.h>
  24#include <linux/blkdev.h>
  25#include <linux/delay.h>
  26#include <linux/completion.h>
  27#include <linux/time.h>
  28#include <linux/interrupt.h>
  29
  30#include <scsi/scsi_host.h>
  31
  32#include "aacraid.h"
  33
  34static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
  35{
  36        struct aac_dev *dev = dev_id;
  37        unsigned long bellbits;
  38        u8 intstat = rx_readb(dev, MUnit.OISR);
  39
  40        /*
  41         *      Read mask and invert because drawbridge is reversed.
  42         *      This allows us to only service interrupts that have
  43         *      been enabled.
  44         *      Check to see if this is our interrupt.  If it isn't just return
  45         */
  46        if (likely(intstat & ~(dev->OIMR))) {
  47                bellbits = rx_readl(dev, OutboundDoorbellReg);
  48                if (unlikely(bellbits & DoorBellPrintfReady)) {
  49                        aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
  50                        rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
  51                        rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
  52                }
  53                else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
  54                        rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
  55                        aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
  56                }
  57                else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
  58                        rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
  59                        aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
  60                }
  61                else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
  62                        rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
  63                }
  64                else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
  65                        rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
  66                        rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
  67                }
  68                return IRQ_HANDLED;
  69        }
  70        return IRQ_NONE;
  71}
  72
  73static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
  74{
  75        int isAif, isFastResponse, isSpecial;
  76        struct aac_dev *dev = dev_id;
  77        u32 Index = rx_readl(dev, MUnit.OutboundQueue);
  78        if (unlikely(Index == 0xFFFFFFFFL))
  79                Index = rx_readl(dev, MUnit.OutboundQueue);
  80        if (likely(Index != 0xFFFFFFFFL)) {
  81                do {
  82                        isAif = isFastResponse = isSpecial = 0;
  83                        if (Index & 0x00000002L) {
  84                                isAif = 1;
  85                                if (Index == 0xFFFFFFFEL)
  86                                        isSpecial = 1;
  87                                Index &= ~0x00000002L;
  88                        } else {
  89                                if (Index & 0x00000001L)
  90                                        isFastResponse = 1;
  91                                Index >>= 2;
  92                        }
  93                        if (!isSpecial) {
  94                                if (unlikely(aac_intr_normal(dev,
  95                                                Index, isAif,
  96                                                isFastResponse, NULL))) {
  97                                        rx_writel(dev,
  98                                                MUnit.OutboundQueue,
  99                                                Index);
 100                                        rx_writel(dev,
 101                                                MUnit.ODR,
 102                                                DoorBellAdapterNormRespReady);
 103                                }
 104                        }
 105                        Index = rx_readl(dev, MUnit.OutboundQueue);
 106                } while (Index != 0xFFFFFFFFL);
 107                return IRQ_HANDLED;
 108        }
 109        return IRQ_NONE;
 110}
 111
 112/**
 113 *      aac_rx_disable_interrupt        -       Disable interrupts
 114 *      @dev: Adapter
 115 */
 116
 117static void aac_rx_disable_interrupt(struct aac_dev *dev)
 118{
 119        rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
 120}
 121
 122/**
 123 *      aac_rx_enable_interrupt_producer        -       Enable interrupts
 124 *      @dev: Adapter
 125 */
 126
 127static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
 128{
 129        rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
 130}
 131
 132/**
 133 *      aac_rx_enable_interrupt_message -       Enable interrupts
 134 *      @dev: Adapter
 135 */
 136
 137static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
 138{
 139        rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
 140}
 141
 142/**
 143 *      rx_sync_cmd     -       send a command and wait
 144 *      @dev: Adapter
 145 *      @command: Command to execute
 146 *      @p1: first parameter
 147 *      @ret: adapter status
 148 *
 149 *      This routine will send a synchronous command to the adapter and wait 
 150 *      for its completion.
 151 */
 152
 153static int rx_sync_cmd(struct aac_dev *dev, u32 command,
 154        u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
 155        u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
 156{
 157        unsigned long start;
 158        int ok;
 159        /*
 160         *      Write the command into Mailbox 0
 161         */
 162        writel(command, &dev->IndexRegs->Mailbox[0]);
 163        /*
 164         *      Write the parameters into Mailboxes 1 - 6
 165         */
 166        writel(p1, &dev->IndexRegs->Mailbox[1]);
 167        writel(p2, &dev->IndexRegs->Mailbox[2]);
 168        writel(p3, &dev->IndexRegs->Mailbox[3]);
 169        writel(p4, &dev->IndexRegs->Mailbox[4]);
 170        /*
 171         *      Clear the synch command doorbell to start on a clean slate.
 172         */
 173        rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
 174        /*
 175         *      Disable doorbell interrupts
 176         */
 177        rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
 178        /*
 179         *      Force the completion of the mask register write before issuing
 180         *      the interrupt.
 181         */
 182        rx_readb (dev, MUnit.OIMR);
 183        /*
 184         *      Signal that there is a new synch command
 185         */
 186        rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
 187
 188        ok = 0;
 189        start = jiffies;
 190
 191        /*
 192         *      Wait up to 30 seconds
 193         */
 194        while (time_before(jiffies, start+30*HZ)) 
 195        {
 196                udelay(5);      /* Delay 5 microseconds to let Mon960 get info. */
 197                /*
 198                 *      Mon960 will set doorbell0 bit when it has completed the command.
 199                 */
 200                if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
 201                        /*
 202                         *      Clear the doorbell.
 203                         */
 204                        rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
 205                        ok = 1;
 206                        break;
 207                }
 208                /*
 209                 *      Yield the processor in case we are slow 
 210                 */
 211                msleep(1);
 212        }
 213        if (unlikely(ok != 1)) {
 214                /*
 215                 *      Restore interrupt mask even though we timed out
 216                 */
 217                aac_adapter_enable_int(dev);
 218                return -ETIMEDOUT;
 219        }
 220        /*
 221         *      Pull the synch status from Mailbox 0.
 222         */
 223        if (status)
 224                *status = readl(&dev->IndexRegs->Mailbox[0]);
 225        if (r1)
 226                *r1 = readl(&dev->IndexRegs->Mailbox[1]);
 227        if (r2)
 228                *r2 = readl(&dev->IndexRegs->Mailbox[2]);
 229        if (r3)
 230                *r3 = readl(&dev->IndexRegs->Mailbox[3]);
 231        if (r4)
 232                *r4 = readl(&dev->IndexRegs->Mailbox[4]);
 233        /*
 234         *      Clear the synch command doorbell.
 235         */
 236        rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
 237        /*
 238         *      Restore interrupt mask
 239         */
 240        aac_adapter_enable_int(dev);
 241        return 0;
 242
 243}
 244
 245/**
 246 *      aac_rx_interrupt_adapter        -       interrupt adapter
 247 *      @dev: Adapter
 248 *
 249 *      Send an interrupt to the i960 and breakpoint it.
 250 */
 251
 252static void aac_rx_interrupt_adapter(struct aac_dev *dev)
 253{
 254        rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
 255}
 256
 257/**
 258 *      aac_rx_notify_adapter           -       send an event to the adapter
 259 *      @dev: Adapter
 260 *      @event: Event to send
 261 *
 262 *      Notify the i960 that something it probably cares about has
 263 *      happened.
 264 */
 265
 266static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
 267{
 268        switch (event) {
 269
 270        case AdapNormCmdQue:
 271                rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
 272                break;
 273        case HostNormRespNotFull:
 274                rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
 275                break;
 276        case AdapNormRespQue:
 277                rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
 278                break;
 279        case HostNormCmdNotFull:
 280                rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
 281                break;
 282        case HostShutdown:
 283                break;
 284        case FastIo:
 285                rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
 286                break;
 287        case AdapPrintfDone:
 288                rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
 289                break;
 290        default:
 291                BUG();
 292                break;
 293        }
 294}
 295
 296/**
 297 *      aac_rx_start_adapter            -       activate adapter
 298 *      @dev:   Adapter
 299 *
 300 *      Start up processing on an i960 based AAC adapter
 301 */
 302
 303static void aac_rx_start_adapter(struct aac_dev *dev)
 304{
 305        union aac_init *init;
 306
 307        init = dev->init;
 308        init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
 309        // We can only use a 32 bit address here
 310        rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
 311          0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
 312}
 313
 314/**
 315 *      aac_rx_check_health
 316 *      @dev: device to check if healthy
 317 *
 318 *      Will attempt to determine if the specified adapter is alive and
 319 *      capable of handling requests, returning 0 if alive.
 320 */
 321static int aac_rx_check_health(struct aac_dev *dev)
 322{
 323        u32 status = rx_readl(dev, MUnit.OMRx[0]);
 324
 325        /*
 326         *      Check to see if the board failed any self tests.
 327         */
 328        if (unlikely(status & SELF_TEST_FAILED))
 329                return -1;
 330        /*
 331         *      Check to see if the board panic'd.
 332         */
 333        if (unlikely(status & KERNEL_PANIC)) {
 334                char * buffer;
 335                struct POSTSTATUS {
 336                        __le32 Post_Command;
 337                        __le32 Post_Address;
 338                } * post;
 339                dma_addr_t paddr, baddr;
 340                int ret;
 341
 342                if (likely((status & 0xFF000000L) == 0xBC000000L))
 343                        return (status >> 16) & 0xFF;
 344                buffer = dma_alloc_coherent(&dev->pdev->dev, 512, &baddr,
 345                                            GFP_KERNEL);
 346                ret = -2;
 347                if (unlikely(buffer == NULL))
 348                        return ret;
 349                post = dma_alloc_coherent(&dev->pdev->dev,
 350                                          sizeof(struct POSTSTATUS), &paddr,
 351                                          GFP_KERNEL);
 352                if (unlikely(post == NULL)) {
 353                        dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr);
 354                        return ret;
 355                }
 356                memset(buffer, 0, 512);
 357                post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
 358                post->Post_Address = cpu_to_le32(baddr);
 359                rx_writel(dev, MUnit.IMRx[0], paddr);
 360                rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
 361                  NULL, NULL, NULL, NULL, NULL);
 362                dma_free_coherent(&dev->pdev->dev, sizeof(struct POSTSTATUS),
 363                                  post, paddr);
 364                if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
 365                        ret = (hex_to_bin(buffer[2]) << 4) +
 366                                hex_to_bin(buffer[3]);
 367                }
 368                dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr);
 369                return ret;
 370        }
 371        /*
 372         *      Wait for the adapter to be up and running.
 373         */
 374        if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
 375                return -3;
 376        /*
 377         *      Everything is OK
 378         */
 379        return 0;
 380}
 381
 382/**
 383 *      aac_rx_deliver_producer
 384 *      @fib: fib to issue
 385 *
 386 *      Will send a fib, returning 0 if successful.
 387 */
 388int aac_rx_deliver_producer(struct fib * fib)
 389{
 390        struct aac_dev *dev = fib->dev;
 391        struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
 392        u32 Index;
 393        unsigned long nointr = 0;
 394
 395        aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
 396
 397        atomic_inc(&q->numpending);
 398        *(q->headers.producer) = cpu_to_le32(Index + 1);
 399        if (!(nointr & aac_config.irq_mod))
 400                aac_adapter_notify(dev, AdapNormCmdQueue);
 401
 402        return 0;
 403}
 404
 405/**
 406 *      aac_rx_deliver_message
 407 *      @fib: fib to issue
 408 *
 409 *      Will send a fib, returning 0 if successful.
 410 */
 411static int aac_rx_deliver_message(struct fib * fib)
 412{
 413        struct aac_dev *dev = fib->dev;
 414        struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
 415        u32 Index;
 416        u64 addr;
 417        volatile void __iomem *device;
 418
 419        unsigned long count = 10000000L; /* 50 seconds */
 420        atomic_inc(&q->numpending);
 421        for(;;) {
 422                Index = rx_readl(dev, MUnit.InboundQueue);
 423                if (unlikely(Index == 0xFFFFFFFFL))
 424                        Index = rx_readl(dev, MUnit.InboundQueue);
 425                if (likely(Index != 0xFFFFFFFFL))
 426                        break;
 427                if (--count == 0) {
 428                        atomic_dec(&q->numpending);
 429                        return -ETIMEDOUT;
 430                }
 431                udelay(5);
 432        }
 433        device = dev->base + Index;
 434        addr = fib->hw_fib_pa;
 435        writel((u32)(addr & 0xffffffff), device);
 436        device += sizeof(u32);
 437        writel((u32)(addr >> 32), device);
 438        device += sizeof(u32);
 439        writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
 440        rx_writel(dev, MUnit.InboundQueue, Index);
 441        return 0;
 442}
 443
 444/**
 445 *      aac_rx_ioremap
 446 *      @size: mapping resize request
 447 *
 448 */
 449static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
 450{
 451        if (!size) {
 452                iounmap(dev->regs.rx);
 453                return 0;
 454        }
 455        dev->base = dev->regs.rx = ioremap(dev->base_start, size);
 456        if (dev->base == NULL)
 457                return -1;
 458        dev->IndexRegs = &dev->regs.rx->IndexRegs;
 459        return 0;
 460}
 461
 462static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
 463{
 464        u32 var = 0;
 465
 466        if (!(dev->supplement_adapter_info.supported_options2 &
 467          AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
 468                if (bled)
 469                        printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
 470                                dev->name, dev->id, bled);
 471                else {
 472                        bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
 473                          0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
 474                        if (!bled && (var != 0x00000001) && (var != 0x3803000F))
 475                                bled = -EINVAL;
 476                }
 477                if (bled && (bled != -ETIMEDOUT))
 478                        bled = aac_adapter_sync_cmd(dev, IOP_RESET,
 479                          0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
 480
 481                if (bled && (bled != -ETIMEDOUT))
 482                        return -EINVAL;
 483        }
 484        if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
 485                rx_writel(dev, MUnit.reserved2, 3);
 486                msleep(5000); /* Delay 5 seconds */
 487                var = 0x00000001;
 488        }
 489        if (bled && (var != 0x00000001))
 490                return -EINVAL;
 491        ssleep(5);
 492        if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
 493                return -ENODEV;
 494        if (startup_timeout < 300)
 495                startup_timeout = 300;
 496        return 0;
 497}
 498
 499/**
 500 *      aac_rx_select_comm      -       Select communications method
 501 *      @dev: Adapter
 502 *      @comm: communications method
 503 */
 504
 505int aac_rx_select_comm(struct aac_dev *dev, int comm)
 506{
 507        switch (comm) {
 508        case AAC_COMM_PRODUCER:
 509                dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
 510                dev->a_ops.adapter_intr = aac_rx_intr_producer;
 511                dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
 512                break;
 513        case AAC_COMM_MESSAGE:
 514                dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
 515                dev->a_ops.adapter_intr = aac_rx_intr_message;
 516                dev->a_ops.adapter_deliver = aac_rx_deliver_message;
 517                break;
 518        default:
 519                return 1;
 520        }
 521        return 0;
 522}
 523
 524/**
 525 *      aac_rx_init     -       initialize an i960 based AAC card
 526 *      @dev: device to configure
 527 *
 528 *      Allocate and set up resources for the i960 based AAC variants. The 
 529 *      device_interface in the commregion will be allocated and linked 
 530 *      to the comm region.
 531 */
 532
 533int _aac_rx_init(struct aac_dev *dev)
 534{
 535        unsigned long start;
 536        unsigned long status;
 537        int restart = 0;
 538        int instance = dev->id;
 539        const char * name = dev->name;
 540
 541        if (aac_adapter_ioremap(dev, dev->base_size)) {
 542                printk(KERN_WARNING "%s: unable to map adapter.\n", name);
 543                goto error_iounmap;
 544        }
 545
 546        /* Failure to reset here is an option ... */
 547        dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
 548        dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
 549        dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
 550
 551        if (((status & 0x0c) != 0x0c) || dev->init_reset) {
 552                dev->init_reset = false;
 553                if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
 554                        /* Make sure the Hardware FIFO is empty */
 555                        while ((++restart < 512) &&
 556                               (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
 557                }
 558        }
 559
 560        /*
 561         *      Check to see if the board panic'd while booting.
 562         */
 563        status = rx_readl(dev, MUnit.OMRx[0]);
 564        if (status & KERNEL_PANIC) {
 565                if (aac_rx_restart_adapter(dev,
 566                        aac_rx_check_health(dev), IOP_HWSOFT_RESET))
 567                        goto error_iounmap;
 568                ++restart;
 569        }
 570        /*
 571         *      Check to see if the board failed any self tests.
 572         */
 573        status = rx_readl(dev, MUnit.OMRx[0]);
 574        if (status & SELF_TEST_FAILED) {
 575                printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
 576                goto error_iounmap;
 577        }
 578        /*
 579         *      Check to see if the monitor panic'd while booting.
 580         */
 581        if (status & MONITOR_PANIC) {
 582                printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
 583                goto error_iounmap;
 584        }
 585        start = jiffies;
 586        /*
 587         *      Wait for the adapter to be up and running. Wait up to 3 minutes
 588         */
 589        while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
 590        {
 591                if ((restart &&
 592                  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
 593                  time_after(jiffies, start+HZ*startup_timeout)) {
 594                        printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 
 595                                        dev->name, instance, status);
 596                        goto error_iounmap;
 597                }
 598                if (!restart &&
 599                  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
 600                  time_after(jiffies, start + HZ *
 601                  ((startup_timeout > 60)
 602                    ? (startup_timeout - 60)
 603                    : (startup_timeout / 2))))) {
 604                        if (likely(!aac_rx_restart_adapter(dev,
 605                                aac_rx_check_health(dev), IOP_HWSOFT_RESET)))
 606                                start = jiffies;
 607                        ++restart;
 608                }
 609                msleep(1);
 610        }
 611        if (restart && aac_commit)
 612                aac_commit = 1;
 613        /*
 614         *      Fill in the common function dispatch table.
 615         */
 616        dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
 617        dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
 618        dev->a_ops.adapter_notify = aac_rx_notify_adapter;
 619        dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
 620        dev->a_ops.adapter_check_health = aac_rx_check_health;
 621        dev->a_ops.adapter_restart = aac_rx_restart_adapter;
 622        dev->a_ops.adapter_start = aac_rx_start_adapter;
 623
 624        /*
 625         *      First clear out all interrupts.  Then enable the one's that we
 626         *      can handle.
 627         */
 628        aac_adapter_comm(dev, AAC_COMM_PRODUCER);
 629        aac_adapter_disable_int(dev);
 630        rx_writel(dev, MUnit.ODR, 0xffffffff);
 631        aac_adapter_enable_int(dev);
 632
 633        if (aac_init_adapter(dev) == NULL)
 634                goto error_iounmap;
 635        aac_adapter_comm(dev, dev->comm_interface);
 636        dev->sync_mode = 0;     /* sync. mode not supported */
 637        dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
 638        if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
 639                        IRQF_SHARED, "aacraid", dev) < 0) {
 640                if (dev->msi)
 641                        pci_disable_msi(dev->pdev);
 642                printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
 643                        name, instance);
 644                goto error_iounmap;
 645        }
 646        dev->dbg_base = dev->base_start;
 647        dev->dbg_base_mapped = dev->base;
 648        dev->dbg_size = dev->base_size;
 649
 650        aac_adapter_enable_int(dev);
 651        /*
 652         *      Tell the adapter that all is configured, and it can
 653         * start accepting requests
 654         */
 655        aac_rx_start_adapter(dev);
 656
 657        return 0;
 658
 659error_iounmap:
 660
 661        return -1;
 662}
 663
 664int aac_rx_init(struct aac_dev *dev)
 665{
 666        /*
 667         *      Fill in the function dispatch table.
 668         */
 669        dev->a_ops.adapter_ioremap = aac_rx_ioremap;
 670        dev->a_ops.adapter_comm = aac_rx_select_comm;
 671
 672        return _aac_rx_init(dev);
 673}
 674