linux/drivers/ata/pdc_adma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  pdc_adma.c - Pacific Digital Corporation ADMA
   4 *
   5 *  Maintained by:  Tejun Heo <tj@kernel.org>
   6 *
   7 *  Copyright 2005 Mark Lord
   8 *
   9 *  libata documentation is available via 'make {ps|pdf}docs',
  10 *  as Documentation/driver-api/libata.rst
  11 *
  12 *  Supports ATA disks in single-packet ADMA mode.
  13 *  Uses PIO for everything else.
  14 *
  15 *  TODO:  Use ADMA transfers for ATAPI devices, when possible.
  16 *  This requires careful attention to a number of quirks of the chip.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/gfp.h>
  22#include <linux/pci.h>
  23#include <linux/blkdev.h>
  24#include <linux/delay.h>
  25#include <linux/interrupt.h>
  26#include <linux/device.h>
  27#include <scsi/scsi_host.h>
  28#include <linux/libata.h>
  29
  30#define DRV_NAME        "pdc_adma"
  31#define DRV_VERSION     "1.0"
  32
  33/* macro to calculate base address for ATA regs */
  34#define ADMA_ATA_REGS(base, port_no)    ((base) + ((port_no) * 0x40))
  35
  36/* macro to calculate base address for ADMA regs */
  37#define ADMA_REGS(base, port_no)        ((base) + 0x80 + ((port_no) * 0x20))
  38
  39/* macro to obtain addresses from ata_port */
  40#define ADMA_PORT_REGS(ap) \
  41        ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no)
  42
  43enum {
  44        ADMA_MMIO_BAR           = 4,
  45
  46        ADMA_PORTS              = 2,
  47        ADMA_CPB_BYTES          = 40,
  48        ADMA_PRD_BYTES          = LIBATA_MAX_PRD * 16,
  49        ADMA_PKT_BYTES          = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
  50
  51        ADMA_DMA_BOUNDARY       = 0xffffffff,
  52
  53        /* global register offsets */
  54        ADMA_MODE_LOCK          = 0x00c7,
  55
  56        /* per-channel register offsets */
  57        ADMA_CONTROL            = 0x0000, /* ADMA control */
  58        ADMA_STATUS             = 0x0002, /* ADMA status */
  59        ADMA_CPB_COUNT          = 0x0004, /* CPB count */
  60        ADMA_CPB_CURRENT        = 0x000c, /* current CPB address */
  61        ADMA_CPB_NEXT           = 0x000c, /* next CPB address */
  62        ADMA_CPB_LOOKUP         = 0x0010, /* CPB lookup table */
  63        ADMA_FIFO_IN            = 0x0014, /* input FIFO threshold */
  64        ADMA_FIFO_OUT           = 0x0016, /* output FIFO threshold */
  65
  66        /* ADMA_CONTROL register bits */
  67        aNIEN                   = (1 << 8), /* irq mask: 1==masked */
  68        aGO                     = (1 << 7), /* packet trigger ("Go!") */
  69        aRSTADM                 = (1 << 5), /* ADMA logic reset */
  70        aPIOMD4                 = 0x0003,   /* PIO mode 4 */
  71
  72        /* ADMA_STATUS register bits */
  73        aPSD                    = (1 << 6),
  74        aUIRQ                   = (1 << 4),
  75        aPERR                   = (1 << 0),
  76
  77        /* CPB bits */
  78        cDONE                   = (1 << 0),
  79        cATERR                  = (1 << 3),
  80
  81        cVLD                    = (1 << 0),
  82        cDAT                    = (1 << 2),
  83        cIEN                    = (1 << 3),
  84
  85        /* PRD bits */
  86        pORD                    = (1 << 4),
  87        pDIRO                   = (1 << 5),
  88        pEND                    = (1 << 7),
  89
  90        /* ATA register flags */
  91        rIGN                    = (1 << 5),
  92        rEND                    = (1 << 7),
  93
  94        /* ATA register addresses */
  95        ADMA_REGS_CONTROL       = 0x0e,
  96        ADMA_REGS_SECTOR_COUNT  = 0x12,
  97        ADMA_REGS_LBA_LOW       = 0x13,
  98        ADMA_REGS_LBA_MID       = 0x14,
  99        ADMA_REGS_LBA_HIGH      = 0x15,
 100        ADMA_REGS_DEVICE        = 0x16,
 101        ADMA_REGS_COMMAND       = 0x17,
 102
 103        /* PCI device IDs */
 104        board_1841_idx          = 0,    /* ADMA 2-port controller */
 105};
 106
 107typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
 108
 109struct adma_port_priv {
 110        u8                      *pkt;
 111        dma_addr_t              pkt_dma;
 112        adma_state_t            state;
 113};
 114
 115static int adma_ata_init_one(struct pci_dev *pdev,
 116                                const struct pci_device_id *ent);
 117static int adma_port_start(struct ata_port *ap);
 118static void adma_port_stop(struct ata_port *ap);
 119static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
 120static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
 121static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
 122static void adma_freeze(struct ata_port *ap);
 123static void adma_thaw(struct ata_port *ap);
 124static int adma_prereset(struct ata_link *link, unsigned long deadline);
 125
 126static struct scsi_host_template adma_ata_sht = {
 127        ATA_BASE_SHT(DRV_NAME),
 128        .sg_tablesize           = LIBATA_MAX_PRD,
 129        .dma_boundary           = ADMA_DMA_BOUNDARY,
 130};
 131
 132static struct ata_port_operations adma_ata_ops = {
 133        .inherits               = &ata_sff_port_ops,
 134
 135        .lost_interrupt         = ATA_OP_NULL,
 136
 137        .check_atapi_dma        = adma_check_atapi_dma,
 138        .qc_prep                = adma_qc_prep,
 139        .qc_issue               = adma_qc_issue,
 140
 141        .freeze                 = adma_freeze,
 142        .thaw                   = adma_thaw,
 143        .prereset               = adma_prereset,
 144
 145        .port_start             = adma_port_start,
 146        .port_stop              = adma_port_stop,
 147};
 148
 149static struct ata_port_info adma_port_info[] = {
 150        /* board_1841_idx */
 151        {
 152                .flags          = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
 153                .pio_mask       = ATA_PIO4_ONLY,
 154                .udma_mask      = ATA_UDMA4,
 155                .port_ops       = &adma_ata_ops,
 156        },
 157};
 158
 159static const struct pci_device_id adma_ata_pci_tbl[] = {
 160        { PCI_VDEVICE(PDC, 0x1841), board_1841_idx },
 161
 162        { }     /* terminate list */
 163};
 164
 165static struct pci_driver adma_ata_pci_driver = {
 166        .name                   = DRV_NAME,
 167        .id_table               = adma_ata_pci_tbl,
 168        .probe                  = adma_ata_init_one,
 169        .remove                 = ata_pci_remove_one,
 170};
 171
 172static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
 173{
 174        return 1;       /* ATAPI DMA not yet supported */
 175}
 176
 177static void adma_reset_engine(struct ata_port *ap)
 178{
 179        void __iomem *chan = ADMA_PORT_REGS(ap);
 180
 181        /* reset ADMA to idle state */
 182        writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
 183        udelay(2);
 184        writew(aPIOMD4, chan + ADMA_CONTROL);
 185        udelay(2);
 186}
 187
 188static void adma_reinit_engine(struct ata_port *ap)
 189{
 190        struct adma_port_priv *pp = ap->private_data;
 191        void __iomem *chan = ADMA_PORT_REGS(ap);
 192
 193        /* mask/clear ATA interrupts */
 194        writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
 195        ata_sff_check_status(ap);
 196
 197        /* reset the ADMA engine */
 198        adma_reset_engine(ap);
 199
 200        /* set in-FIFO threshold to 0x100 */
 201        writew(0x100, chan + ADMA_FIFO_IN);
 202
 203        /* set CPB pointer */
 204        writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
 205
 206        /* set out-FIFO threshold to 0x100 */
 207        writew(0x100, chan + ADMA_FIFO_OUT);
 208
 209        /* set CPB count */
 210        writew(1, chan + ADMA_CPB_COUNT);
 211
 212        /* read/discard ADMA status */
 213        readb(chan + ADMA_STATUS);
 214}
 215
 216static inline void adma_enter_reg_mode(struct ata_port *ap)
 217{
 218        void __iomem *chan = ADMA_PORT_REGS(ap);
 219
 220        writew(aPIOMD4, chan + ADMA_CONTROL);
 221        readb(chan + ADMA_STATUS);      /* flush */
 222}
 223
 224static void adma_freeze(struct ata_port *ap)
 225{
 226        void __iomem *chan = ADMA_PORT_REGS(ap);
 227
 228        /* mask/clear ATA interrupts */
 229        writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
 230        ata_sff_check_status(ap);
 231
 232        /* reset ADMA to idle state */
 233        writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
 234        udelay(2);
 235        writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL);
 236        udelay(2);
 237}
 238
 239static void adma_thaw(struct ata_port *ap)
 240{
 241        adma_reinit_engine(ap);
 242}
 243
 244static int adma_prereset(struct ata_link *link, unsigned long deadline)
 245{
 246        struct ata_port *ap = link->ap;
 247        struct adma_port_priv *pp = ap->private_data;
 248
 249        if (pp->state != adma_state_idle) /* healthy paranoia */
 250                pp->state = adma_state_mmio;
 251        adma_reinit_engine(ap);
 252
 253        return ata_sff_prereset(link, deadline);
 254}
 255
 256static int adma_fill_sg(struct ata_queued_cmd *qc)
 257{
 258        struct scatterlist *sg;
 259        struct ata_port *ap = qc->ap;
 260        struct adma_port_priv *pp = ap->private_data;
 261        u8  *buf = pp->pkt, *last_buf = NULL;
 262        int i = (2 + buf[3]) * 8;
 263        u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
 264        unsigned int si;
 265
 266        for_each_sg(qc->sg, sg, qc->n_elem, si) {
 267                u32 addr;
 268                u32 len;
 269
 270                addr = (u32)sg_dma_address(sg);
 271                *(__le32 *)(buf + i) = cpu_to_le32(addr);
 272                i += 4;
 273
 274                len = sg_dma_len(sg) >> 3;
 275                *(__le32 *)(buf + i) = cpu_to_le32(len);
 276                i += 4;
 277
 278                last_buf = &buf[i];
 279                buf[i++] = pFLAGS;
 280                buf[i++] = qc->dev->dma_mode & 0xf;
 281                buf[i++] = 0;   /* pPKLW */
 282                buf[i++] = 0;   /* reserved */
 283
 284                *(__le32 *)(buf + i) =
 285                        (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
 286                i += 4;
 287
 288                VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
 289                                        (unsigned long)addr, len);
 290        }
 291
 292        if (likely(last_buf))
 293                *last_buf |= pEND;
 294
 295        return i;
 296}
 297
 298static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
 299{
 300        struct adma_port_priv *pp = qc->ap->private_data;
 301        u8  *buf = pp->pkt;
 302        u32 pkt_dma = (u32)pp->pkt_dma;
 303        int i = 0;
 304
 305        VPRINTK("ENTER\n");
 306
 307        adma_enter_reg_mode(qc->ap);
 308        if (qc->tf.protocol != ATA_PROT_DMA)
 309                return AC_ERR_OK;
 310
 311        buf[i++] = 0;   /* Response flags */
 312        buf[i++] = 0;   /* reserved */
 313        buf[i++] = cVLD | cDAT | cIEN;
 314        i++;            /* cLEN, gets filled in below */
 315
 316        *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma);      /* cNCPB */
 317        i += 4;         /* cNCPB */
 318        i += 4;         /* cPRD, gets filled in below */
 319
 320        buf[i++] = 0;   /* reserved */
 321        buf[i++] = 0;   /* reserved */
 322        buf[i++] = 0;   /* reserved */
 323        buf[i++] = 0;   /* reserved */
 324
 325        /* ATA registers; must be a multiple of 4 */
 326        buf[i++] = qc->tf.device;
 327        buf[i++] = ADMA_REGS_DEVICE;
 328        if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
 329                buf[i++] = qc->tf.hob_nsect;
 330                buf[i++] = ADMA_REGS_SECTOR_COUNT;
 331                buf[i++] = qc->tf.hob_lbal;
 332                buf[i++] = ADMA_REGS_LBA_LOW;
 333                buf[i++] = qc->tf.hob_lbam;
 334                buf[i++] = ADMA_REGS_LBA_MID;
 335                buf[i++] = qc->tf.hob_lbah;
 336                buf[i++] = ADMA_REGS_LBA_HIGH;
 337        }
 338        buf[i++] = qc->tf.nsect;
 339        buf[i++] = ADMA_REGS_SECTOR_COUNT;
 340        buf[i++] = qc->tf.lbal;
 341        buf[i++] = ADMA_REGS_LBA_LOW;
 342        buf[i++] = qc->tf.lbam;
 343        buf[i++] = ADMA_REGS_LBA_MID;
 344        buf[i++] = qc->tf.lbah;
 345        buf[i++] = ADMA_REGS_LBA_HIGH;
 346        buf[i++] = 0;
 347        buf[i++] = ADMA_REGS_CONTROL;
 348        buf[i++] = rIGN;
 349        buf[i++] = 0;
 350        buf[i++] = qc->tf.command;
 351        buf[i++] = ADMA_REGS_COMMAND | rEND;
 352
 353        buf[3] = (i >> 3) - 2;                          /* cLEN */
 354        *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i);  /* cPRD */
 355
 356        i = adma_fill_sg(qc);
 357        wmb();  /* flush PRDs and pkt to memory */
 358#if 0
 359        /* dump out CPB + PRDs for debug */
 360        {
 361                int j, len = 0;
 362                static char obuf[2048];
 363                for (j = 0; j < i; ++j) {
 364                        len += sprintf(obuf+len, "%02x ", buf[j]);
 365                        if ((j & 7) == 7) {
 366                                printk("%s\n", obuf);
 367                                len = 0;
 368                        }
 369                }
 370                if (len)
 371                        printk("%s\n", obuf);
 372        }
 373#endif
 374        return AC_ERR_OK;
 375}
 376
 377static inline void adma_packet_start(struct ata_queued_cmd *qc)
 378{
 379        struct ata_port *ap = qc->ap;
 380        void __iomem *chan = ADMA_PORT_REGS(ap);
 381
 382        VPRINTK("ENTER, ap %p\n", ap);
 383
 384        /* fire up the ADMA engine */
 385        writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
 386}
 387
 388static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
 389{
 390        struct adma_port_priv *pp = qc->ap->private_data;
 391
 392        switch (qc->tf.protocol) {
 393        case ATA_PROT_DMA:
 394                pp->state = adma_state_pkt;
 395                adma_packet_start(qc);
 396                return 0;
 397
 398        case ATAPI_PROT_DMA:
 399                BUG();
 400                break;
 401
 402        default:
 403                break;
 404        }
 405
 406        pp->state = adma_state_mmio;
 407        return ata_sff_qc_issue(qc);
 408}
 409
 410static inline unsigned int adma_intr_pkt(struct ata_host *host)
 411{
 412        unsigned int handled = 0, port_no;
 413
 414        for (port_no = 0; port_no < host->n_ports; ++port_no) {
 415                struct ata_port *ap = host->ports[port_no];
 416                struct adma_port_priv *pp;
 417                struct ata_queued_cmd *qc;
 418                void __iomem *chan = ADMA_PORT_REGS(ap);
 419                u8 status = readb(chan + ADMA_STATUS);
 420
 421                if (status == 0)
 422                        continue;
 423                handled = 1;
 424                adma_enter_reg_mode(ap);
 425                pp = ap->private_data;
 426                if (!pp || pp->state != adma_state_pkt)
 427                        continue;
 428                qc = ata_qc_from_tag(ap, ap->link.active_tag);
 429                if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
 430                        if (status & aPERR)
 431                                qc->err_mask |= AC_ERR_HOST_BUS;
 432                        else if ((status & (aPSD | aUIRQ)))
 433                                qc->err_mask |= AC_ERR_OTHER;
 434
 435                        if (pp->pkt[0] & cATERR)
 436                                qc->err_mask |= AC_ERR_DEV;
 437                        else if (pp->pkt[0] != cDONE)
 438                                qc->err_mask |= AC_ERR_OTHER;
 439
 440                        if (!qc->err_mask)
 441                                ata_qc_complete(qc);
 442                        else {
 443                                struct ata_eh_info *ehi = &ap->link.eh_info;
 444                                ata_ehi_clear_desc(ehi);
 445                                ata_ehi_push_desc(ehi,
 446                                        "ADMA-status 0x%02X", status);
 447                                ata_ehi_push_desc(ehi,
 448                                        "pkt[0] 0x%02X", pp->pkt[0]);
 449
 450                                if (qc->err_mask == AC_ERR_DEV)
 451                                        ata_port_abort(ap);
 452                                else
 453                                        ata_port_freeze(ap);
 454                        }
 455                }
 456        }
 457        return handled;
 458}
 459
 460static inline unsigned int adma_intr_mmio(struct ata_host *host)
 461{
 462        unsigned int handled = 0, port_no;
 463
 464        for (port_no = 0; port_no < host->n_ports; ++port_no) {
 465                struct ata_port *ap = host->ports[port_no];
 466                struct adma_port_priv *pp = ap->private_data;
 467                struct ata_queued_cmd *qc;
 468
 469                if (!pp || pp->state != adma_state_mmio)
 470                        continue;
 471                qc = ata_qc_from_tag(ap, ap->link.active_tag);
 472                if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
 473
 474                        /* check main status, clearing INTRQ */
 475                        u8 status = ata_sff_check_status(ap);
 476                        if ((status & ATA_BUSY))
 477                                continue;
 478                        DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
 479                                ap->print_id, qc->tf.protocol, status);
 480
 481                        /* complete taskfile transaction */
 482                        pp->state = adma_state_idle;
 483                        qc->err_mask |= ac_err_mask(status);
 484                        if (!qc->err_mask)
 485                                ata_qc_complete(qc);
 486                        else {
 487                                struct ata_eh_info *ehi = &ap->link.eh_info;
 488                                ata_ehi_clear_desc(ehi);
 489                                ata_ehi_push_desc(ehi, "status 0x%02X", status);
 490
 491                                if (qc->err_mask == AC_ERR_DEV)
 492                                        ata_port_abort(ap);
 493                                else
 494                                        ata_port_freeze(ap);
 495                        }
 496                        handled = 1;
 497                }
 498        }
 499        return handled;
 500}
 501
 502static irqreturn_t adma_intr(int irq, void *dev_instance)
 503{
 504        struct ata_host *host = dev_instance;
 505        unsigned int handled = 0;
 506
 507        VPRINTK("ENTER\n");
 508
 509        spin_lock(&host->lock);
 510        handled  = adma_intr_pkt(host) | adma_intr_mmio(host);
 511        spin_unlock(&host->lock);
 512
 513        VPRINTK("EXIT\n");
 514
 515        return IRQ_RETVAL(handled);
 516}
 517
 518static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base)
 519{
 520        port->cmd_addr          =
 521        port->data_addr         = base + 0x000;
 522        port->error_addr        =
 523        port->feature_addr      = base + 0x004;
 524        port->nsect_addr        = base + 0x008;
 525        port->lbal_addr         = base + 0x00c;
 526        port->lbam_addr         = base + 0x010;
 527        port->lbah_addr         = base + 0x014;
 528        port->device_addr       = base + 0x018;
 529        port->status_addr       =
 530        port->command_addr      = base + 0x01c;
 531        port->altstatus_addr    =
 532        port->ctl_addr          = base + 0x038;
 533}
 534
 535static int adma_port_start(struct ata_port *ap)
 536{
 537        struct device *dev = ap->host->dev;
 538        struct adma_port_priv *pp;
 539
 540        adma_enter_reg_mode(ap);
 541        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
 542        if (!pp)
 543                return -ENOMEM;
 544        pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
 545                                      GFP_KERNEL);
 546        if (!pp->pkt)
 547                return -ENOMEM;
 548        /* paranoia? */
 549        if ((pp->pkt_dma & 7) != 0) {
 550                printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n",
 551                                                (u32)pp->pkt_dma);
 552                return -ENOMEM;
 553        }
 554        ap->private_data = pp;
 555        adma_reinit_engine(ap);
 556        return 0;
 557}
 558
 559static void adma_port_stop(struct ata_port *ap)
 560{
 561        adma_reset_engine(ap);
 562}
 563
 564static void adma_host_init(struct ata_host *host, unsigned int chip_id)
 565{
 566        unsigned int port_no;
 567
 568        /* enable/lock aGO operation */
 569        writeb(7, host->iomap[ADMA_MMIO_BAR] + ADMA_MODE_LOCK);
 570
 571        /* reset the ADMA logic */
 572        for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
 573                adma_reset_engine(host->ports[port_no]);
 574}
 575
 576static int adma_ata_init_one(struct pci_dev *pdev,
 577                             const struct pci_device_id *ent)
 578{
 579        unsigned int board_idx = (unsigned int) ent->driver_data;
 580        const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL };
 581        struct ata_host *host;
 582        void __iomem *mmio_base;
 583        int rc, port_no;
 584
 585        ata_print_version_once(&pdev->dev, DRV_VERSION);
 586
 587        /* alloc host */
 588        host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS);
 589        if (!host)
 590                return -ENOMEM;
 591
 592        /* acquire resources and fill host */
 593        rc = pcim_enable_device(pdev);
 594        if (rc)
 595                return rc;
 596
 597        if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
 598                return -ENODEV;
 599
 600        rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME);
 601        if (rc)
 602                return rc;
 603        host->iomap = pcim_iomap_table(pdev);
 604        mmio_base = host->iomap[ADMA_MMIO_BAR];
 605
 606        rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 607        if (rc) {
 608                dev_err(&pdev->dev, "32-bit DMA enable failed\n");
 609                return rc;
 610        }
 611
 612        for (port_no = 0; port_no < ADMA_PORTS; ++port_no) {
 613                struct ata_port *ap = host->ports[port_no];
 614                void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no);
 615                unsigned int offset = port_base - mmio_base;
 616
 617                adma_ata_setup_port(&ap->ioaddr, port_base);
 618
 619                ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio");
 620                ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port");
 621        }
 622
 623        /* initialize adapter */
 624        adma_host_init(host, board_idx);
 625
 626        pci_set_master(pdev);
 627        return ata_host_activate(host, pdev->irq, adma_intr, IRQF_SHARED,
 628                                 &adma_ata_sht);
 629}
 630
 631module_pci_driver(adma_ata_pci_driver);
 632
 633MODULE_AUTHOR("Mark Lord");
 634MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
 635MODULE_LICENSE("GPL");
 636MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
 637MODULE_VERSION(DRV_VERSION);
 638