linux/drivers/ide/ide-dma.c
<<
>>
Prefs
   1/*
   2 *  IDE DMA support (including IDE PCI BM-DMA).
   3 *
   4 *  Copyright (C) 1995-1998   Mark Lord
   5 *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
   6 *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
   7 *
   8 *  May be copied or modified under the terms of the GNU General Public License
   9 *
  10 *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
  11 */
  12
  13/*
  14 *  Special Thanks to Mark for his Six years of work.
  15 */
  16
  17/*
  18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
  19 * fixing the problem with the BIOS on some Acer motherboards.
  20 *
  21 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
  22 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
  23 *
  24 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
  25 * at generic DMA -- his patches were referred to when preparing this code.
  26 *
  27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
  28 * for supplying a Promise UDMA board & WD UDMA drive for this work!
  29 */
  30
  31#include <linux/types.h>
  32#include <linux/gfp.h>
  33#include <linux/kernel.h>
  34#include <linux/export.h>
  35#include <linux/ide.h>
  36#include <linux/scatterlist.h>
  37#include <linux/dma-mapping.h>
  38
  39static const struct drive_list_entry drive_whitelist[] = {
  40        { "Micropolis 2112A"    ,       NULL            },
  41        { "CONNER CTMA 4000"    ,       NULL            },
  42        { "CONNER CTT8000-A"    ,       NULL            },
  43        { "ST34342A"            ,       NULL            },
  44        { NULL                  ,       NULL            }
  45};
  46
  47static const struct drive_list_entry drive_blacklist[] = {
  48        { "WDC AC11000H"        ,       NULL            },
  49        { "WDC AC22100H"        ,       NULL            },
  50        { "WDC AC32500H"        ,       NULL            },
  51        { "WDC AC33100H"        ,       NULL            },
  52        { "WDC AC31600H"        ,       NULL            },
  53        { "WDC AC32100H"        ,       "24.09P07"      },
  54        { "WDC AC23200L"        ,       "21.10N21"      },
  55        { "Compaq CRD-8241B"    ,       NULL            },
  56        { "CRD-8400B"           ,       NULL            },
  57        { "CRD-8480B",                  NULL            },
  58        { "CRD-8482B",                  NULL            },
  59        { "CRD-84"              ,       NULL            },
  60        { "SanDisk SDP3B"       ,       NULL            },
  61        { "SanDisk SDP3B-64"    ,       NULL            },
  62        { "SANYO CD-ROM CRD"    ,       NULL            },
  63        { "HITACHI CDR-8"       ,       NULL            },
  64        { "HITACHI CDR-8335"    ,       NULL            },
  65        { "HITACHI CDR-8435"    ,       NULL            },
  66        { "Toshiba CD-ROM XM-6202B"     ,       NULL            },
  67        { "TOSHIBA CD-ROM XM-1702BC",   NULL            },
  68        { "CD-532E-A"           ,       NULL            },
  69        { "E-IDE CD-ROM CR-840",        NULL            },
  70        { "CD-ROM Drive/F5A",   NULL            },
  71        { "WPI CDD-820",                NULL            },
  72        { "SAMSUNG CD-ROM SC-148C",     NULL            },
  73        { "SAMSUNG CD-ROM SC",  NULL            },
  74        { "ATAPI CD-ROM DRIVE 40X MAXIMUM",     NULL            },
  75        { "_NEC DV5800A",               NULL            },
  76        { "SAMSUNG CD-ROM SN-124",      "N001" },
  77        { "Seagate STT20000A",          NULL  },
  78        { "CD-ROM CDR_U200",            "1.09" },
  79        { NULL                  ,       NULL            }
  80
  81};
  82
  83/**
  84 *      ide_dma_intr    -       IDE DMA interrupt handler
  85 *      @drive: the drive the interrupt is for
  86 *
  87 *      Handle an interrupt completing a read/write DMA transfer on an
  88 *      IDE device
  89 */
  90
  91ide_startstop_t ide_dma_intr(ide_drive_t *drive)
  92{
  93        ide_hwif_t *hwif = drive->hwif;
  94        struct ide_cmd *cmd = &hwif->cmd;
  95        u8 stat = 0, dma_stat = 0;
  96
  97        drive->waiting_for_dma = 0;
  98        dma_stat = hwif->dma_ops->dma_end(drive);
  99        ide_dma_unmap_sg(drive, cmd);
 100        stat = hwif->tp_ops->read_status(hwif);
 101
 102        if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
 103                if (!dma_stat) {
 104                        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
 105                                ide_finish_cmd(drive, cmd, stat);
 106                        else
 107                                ide_complete_rq(drive, BLK_STS_OK,
 108                                                blk_rq_sectors(cmd->rq) << 9);
 109                        return ide_stopped;
 110                }
 111                printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
 112                        drive->name, __func__, dma_stat);
 113        }
 114        return ide_error(drive, "dma_intr", stat);
 115}
 116
 117int ide_dma_good_drive(ide_drive_t *drive)
 118{
 119        return ide_in_drive_list(drive->id, drive_whitelist);
 120}
 121
 122/**
 123 *      ide_dma_map_sg  -       map IDE scatter gather for DMA I/O
 124 *      @drive: the drive to map the DMA table for
 125 *      @cmd: command
 126 *
 127 *      Perform the DMA mapping magic necessary to access the source or
 128 *      target buffers of a request via DMA.  The lower layers of the
 129 *      kernel provide the necessary cache management so that we can
 130 *      operate in a portable fashion.
 131 */
 132
 133static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
 134{
 135        ide_hwif_t *hwif = drive->hwif;
 136        struct scatterlist *sg = hwif->sg_table;
 137        int i;
 138
 139        if (cmd->tf_flags & IDE_TFLAG_WRITE)
 140                cmd->sg_dma_direction = DMA_TO_DEVICE;
 141        else
 142                cmd->sg_dma_direction = DMA_FROM_DEVICE;
 143
 144        i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
 145        if (i) {
 146                cmd->orig_sg_nents = cmd->sg_nents;
 147                cmd->sg_nents = i;
 148        }
 149
 150        return i;
 151}
 152
 153/**
 154 *      ide_dma_unmap_sg        -       clean up DMA mapping
 155 *      @drive: The drive to unmap
 156 *
 157 *      Teardown mappings after DMA has completed. This must be called
 158 *      after the completion of each use of ide_build_dmatable and before
 159 *      the next use of ide_build_dmatable. Failure to do so will cause
 160 *      an oops as only one mapping can be live for each target at a given
 161 *      time.
 162 */
 163
 164void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd)
 165{
 166        ide_hwif_t *hwif = drive->hwif;
 167
 168        dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
 169                     cmd->sg_dma_direction);
 170}
 171EXPORT_SYMBOL_GPL(ide_dma_unmap_sg);
 172
 173/**
 174 *      ide_dma_off_quietly     -       Generic DMA kill
 175 *      @drive: drive to control
 176 *
 177 *      Turn off the current DMA on this IDE controller.
 178 */
 179
 180void ide_dma_off_quietly(ide_drive_t *drive)
 181{
 182        drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
 183        ide_toggle_bounce(drive, 0);
 184
 185        drive->hwif->dma_ops->dma_host_set(drive, 0);
 186}
 187EXPORT_SYMBOL(ide_dma_off_quietly);
 188
 189/**
 190 *      ide_dma_off     -       disable DMA on a device
 191 *      @drive: drive to disable DMA on
 192 *
 193 *      Disable IDE DMA for a device on this IDE controller.
 194 *      Inform the user that DMA has been disabled.
 195 */
 196
 197void ide_dma_off(ide_drive_t *drive)
 198{
 199        printk(KERN_INFO "%s: DMA disabled\n", drive->name);
 200        ide_dma_off_quietly(drive);
 201}
 202EXPORT_SYMBOL(ide_dma_off);
 203
 204/**
 205 *      ide_dma_on              -       Enable DMA on a device
 206 *      @drive: drive to enable DMA on
 207 *
 208 *      Enable IDE DMA for a device on this IDE controller.
 209 */
 210
 211void ide_dma_on(ide_drive_t *drive)
 212{
 213        drive->dev_flags |= IDE_DFLAG_USING_DMA;
 214        ide_toggle_bounce(drive, 1);
 215
 216        drive->hwif->dma_ops->dma_host_set(drive, 1);
 217}
 218
 219int __ide_dma_bad_drive(ide_drive_t *drive)
 220{
 221        u16 *id = drive->id;
 222
 223        int blacklist = ide_in_drive_list(id, drive_blacklist);
 224        if (blacklist) {
 225                printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
 226                                    drive->name, (char *)&id[ATA_ID_PROD]);
 227                return blacklist;
 228        }
 229        return 0;
 230}
 231EXPORT_SYMBOL(__ide_dma_bad_drive);
 232
 233static const u8 xfer_mode_bases[] = {
 234        XFER_UDMA_0,
 235        XFER_MW_DMA_0,
 236        XFER_SW_DMA_0,
 237};
 238
 239static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
 240{
 241        u16 *id = drive->id;
 242        ide_hwif_t *hwif = drive->hwif;
 243        const struct ide_port_ops *port_ops = hwif->port_ops;
 244        unsigned int mask = 0;
 245
 246        switch (base) {
 247        case XFER_UDMA_0:
 248                if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
 249                        break;
 250                mask = id[ATA_ID_UDMA_MODES];
 251                if (port_ops && port_ops->udma_filter)
 252                        mask &= port_ops->udma_filter(drive);
 253                else
 254                        mask &= hwif->ultra_mask;
 255
 256                /*
 257                 * avoid false cable warning from eighty_ninty_three()
 258                 */
 259                if (req_mode > XFER_UDMA_2) {
 260                        if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
 261                                mask &= 0x07;
 262                }
 263                break;
 264        case XFER_MW_DMA_0:
 265                mask = id[ATA_ID_MWDMA_MODES];
 266
 267                /* Also look for the CF specific MWDMA modes... */
 268                if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) {
 269                        u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1;
 270
 271                        mask |= ((2 << mode) - 1) << 3;
 272                }
 273
 274                if (port_ops && port_ops->mdma_filter)
 275                        mask &= port_ops->mdma_filter(drive);
 276                else
 277                        mask &= hwif->mwdma_mask;
 278                break;
 279        case XFER_SW_DMA_0:
 280                mask = id[ATA_ID_SWDMA_MODES];
 281                if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) {
 282                        u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
 283
 284                        /*
 285                         * if the mode is valid convert it to the mask
 286                         * (the maximum allowed mode is XFER_SW_DMA_2)
 287                         */
 288                        if (mode <= 2)
 289                                mask = (2 << mode) - 1;
 290                }
 291                mask &= hwif->swdma_mask;
 292                break;
 293        default:
 294                BUG();
 295                break;
 296        }
 297
 298        return mask;
 299}
 300
 301/**
 302 *      ide_find_dma_mode       -       compute DMA speed
 303 *      @drive: IDE device
 304 *      @req_mode: requested mode
 305 *
 306 *      Checks the drive/host capabilities and finds the speed to use for
 307 *      the DMA transfer.  The speed is then limited by the requested mode.
 308 *
 309 *      Returns 0 if the drive/host combination is incapable of DMA transfers
 310 *      or if the requested mode is not a DMA mode.
 311 */
 312
 313u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
 314{
 315        ide_hwif_t *hwif = drive->hwif;
 316        unsigned int mask;
 317        int x, i;
 318        u8 mode = 0;
 319
 320        if (drive->media != ide_disk) {
 321                if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
 322                        return 0;
 323        }
 324
 325        for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
 326                if (req_mode < xfer_mode_bases[i])
 327                        continue;
 328                mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
 329                x = fls(mask) - 1;
 330                if (x >= 0) {
 331                        mode = xfer_mode_bases[i] + x;
 332                        break;
 333                }
 334        }
 335
 336        if (hwif->chipset == ide_acorn && mode == 0) {
 337                /*
 338                 * is this correct?
 339                 */
 340                if (ide_dma_good_drive(drive) &&
 341                    drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
 342                        mode = XFER_MW_DMA_1;
 343        }
 344
 345        mode = min(mode, req_mode);
 346
 347        printk(KERN_INFO "%s: %s mode selected\n", drive->name,
 348                          mode ? ide_xfer_verbose(mode) : "no DMA");
 349
 350        return mode;
 351}
 352
 353static int ide_tune_dma(ide_drive_t *drive)
 354{
 355        ide_hwif_t *hwif = drive->hwif;
 356        u8 speed;
 357
 358        if (ata_id_has_dma(drive->id) == 0 ||
 359            (drive->dev_flags & IDE_DFLAG_NODMA))
 360                return 0;
 361
 362        /* consult the list of known "bad" drives */
 363        if (__ide_dma_bad_drive(drive))
 364                return 0;
 365
 366        if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
 367                return config_drive_for_dma(drive);
 368
 369        speed = ide_max_dma_mode(drive);
 370
 371        if (!speed)
 372                return 0;
 373
 374        if (ide_set_dma_mode(drive, speed))
 375                return 0;
 376
 377        return 1;
 378}
 379
 380static int ide_dma_check(ide_drive_t *drive)
 381{
 382        ide_hwif_t *hwif = drive->hwif;
 383
 384        if (ide_tune_dma(drive))
 385                return 0;
 386
 387        /* TODO: always do PIO fallback */
 388        if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
 389                return -1;
 390
 391        ide_set_max_pio(drive);
 392
 393        return -1;
 394}
 395
 396int ide_set_dma(ide_drive_t *drive)
 397{
 398        int rc;
 399
 400        /*
 401         * Force DMAing for the beginning of the check.
 402         * Some chipsets appear to do interesting
 403         * things, if not checked and cleared.
 404         *   PARANOIA!!!
 405         */
 406        ide_dma_off_quietly(drive);
 407
 408        rc = ide_dma_check(drive);
 409        if (rc)
 410                return rc;
 411
 412        ide_dma_on(drive);
 413
 414        return 0;
 415}
 416
 417void ide_check_dma_crc(ide_drive_t *drive)
 418{
 419        u8 mode;
 420
 421        ide_dma_off_quietly(drive);
 422        drive->crc_count = 0;
 423        mode = drive->current_speed;
 424        /*
 425         * Don't try non Ultra-DMA modes without iCRC's.  Force the
 426         * device to PIO and make the user enable SWDMA/MWDMA modes.
 427         */
 428        if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
 429                mode--;
 430        else
 431                mode = XFER_PIO_4;
 432        ide_set_xfer_rate(drive, mode);
 433        if (drive->current_speed >= XFER_SW_DMA_0)
 434                ide_dma_on(drive);
 435}
 436
 437void ide_dma_lost_irq(ide_drive_t *drive)
 438{
 439        printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
 440}
 441EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
 442
 443/*
 444 * un-busy the port etc, and clear any pending DMA status. we want to
 445 * retry the current request in pio mode instead of risking tossing it
 446 * all away
 447 */
 448ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
 449{
 450        ide_hwif_t *hwif = drive->hwif;
 451        const struct ide_dma_ops *dma_ops = hwif->dma_ops;
 452        struct ide_cmd *cmd = &hwif->cmd;
 453        ide_startstop_t ret = ide_stopped;
 454
 455        /*
 456         * end current dma transaction
 457         */
 458
 459        if (error < 0) {
 460                printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
 461                drive->waiting_for_dma = 0;
 462                (void)dma_ops->dma_end(drive);
 463                ide_dma_unmap_sg(drive, cmd);
 464                ret = ide_error(drive, "dma timeout error",
 465                                hwif->tp_ops->read_status(hwif));
 466        } else {
 467                printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
 468                if (dma_ops->dma_clear)
 469                        dma_ops->dma_clear(drive);
 470                printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
 471                if (dma_ops->dma_test_irq(drive) == 0) {
 472                        ide_dump_status(drive, "DMA timeout",
 473                                        hwif->tp_ops->read_status(hwif));
 474                        drive->waiting_for_dma = 0;
 475                        (void)dma_ops->dma_end(drive);
 476                        ide_dma_unmap_sg(drive, cmd);
 477                }
 478        }
 479
 480        /*
 481         * disable dma for now, but remember that we did so because of
 482         * a timeout -- we'll reenable after we finish this next request
 483         * (or rather the first chunk of it) in pio.
 484         */
 485        drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
 486        drive->retry_pio++;
 487        ide_dma_off_quietly(drive);
 488
 489        /*
 490         * make sure request is sane
 491         */
 492        if (hwif->rq)
 493                scsi_req(hwif->rq)->result = 0;
 494        return ret;
 495}
 496
 497void ide_release_dma_engine(ide_hwif_t *hwif)
 498{
 499        if (hwif->dmatable_cpu) {
 500                int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
 501
 502                dma_free_coherent(hwif->dev, prd_size,
 503                                  hwif->dmatable_cpu, hwif->dmatable_dma);
 504                hwif->dmatable_cpu = NULL;
 505        }
 506}
 507EXPORT_SYMBOL_GPL(ide_release_dma_engine);
 508
 509int ide_allocate_dma_engine(ide_hwif_t *hwif)
 510{
 511        int prd_size;
 512
 513        if (hwif->prd_max_nents == 0)
 514                hwif->prd_max_nents = PRD_ENTRIES;
 515        if (hwif->prd_ent_size == 0)
 516                hwif->prd_ent_size = PRD_BYTES;
 517
 518        prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
 519
 520        hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
 521                                                &hwif->dmatable_dma,
 522                                                GFP_ATOMIC);
 523        if (hwif->dmatable_cpu == NULL) {
 524                printk(KERN_ERR "%s: unable to allocate PRD table\n",
 525                        hwif->name);
 526                return -ENOMEM;
 527        }
 528
 529        return 0;
 530}
 531EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
 532
 533int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd)
 534{
 535        const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops;
 536
 537        if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
 538            (dma_ops->dma_check && dma_ops->dma_check(drive, cmd)))
 539                goto out;
 540        ide_map_sg(drive, cmd);
 541        if (ide_dma_map_sg(drive, cmd) == 0)
 542                goto out_map;
 543        if (dma_ops->dma_setup(drive, cmd))
 544                goto out_dma_unmap;
 545        drive->waiting_for_dma = 1;
 546        return 0;
 547out_dma_unmap:
 548        ide_dma_unmap_sg(drive, cmd);
 549out_map:
 550        ide_map_sg(drive, cmd);
 551out:
 552        return 1;
 553}
 554