linux/drivers/ide/ide-taskfile.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2000-2002        Michael Cornwell <cornwell@acm.org>
   3 *  Copyright (C) 2000-2002        Andre Hedrick <andre@linux-ide.org>
   4 *  Copyright (C) 2001-2002        Klaus Smolin
   5 *                                      IBM Storage Technology Division
   6 *  Copyright (C) 2003-2004, 2007  Bartlomiej Zolnierkiewicz
   7 *
   8 *  The big the bad and the ugly.
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/string.h>
  13#include <linux/kernel.h>
  14#include <linux/export.h>
  15#include <linux/sched.h>
  16#include <linux/interrupt.h>
  17#include <linux/errno.h>
  18#include <linux/slab.h>
  19#include <linux/delay.h>
  20#include <linux/hdreg.h>
  21#include <linux/ide.h>
  22#include <linux/nmi.h>
  23#include <linux/scatterlist.h>
  24#include <linux/uaccess.h>
  25
  26#include <asm/io.h>
  27
  28void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd)
  29{
  30        ide_hwif_t *hwif = drive->hwif;
  31        const struct ide_tp_ops *tp_ops = hwif->tp_ops;
  32
  33        /* Be sure we're looking at the low order bytes */
  34        tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
  35
  36        tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf);
  37
  38        if (cmd->tf_flags & IDE_TFLAG_LBA48) {
  39                tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS);
  40
  41                tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob);
  42        }
  43}
  44
  45void ide_tf_dump(const char *s, struct ide_cmd *cmd)
  46{
  47#ifdef DEBUG
  48        printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
  49                "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
  50               s, cmd->tf.feature, cmd->tf.nsect,
  51               cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah,
  52               cmd->tf.device, cmd->tf.command);
  53        printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n",
  54               s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah);
  55#endif
  56}
  57
  58int taskfile_lib_get_identify(ide_drive_t *drive, u8 *buf)
  59{
  60        struct ide_cmd cmd;
  61
  62        memset(&cmd, 0, sizeof(cmd));
  63        cmd.tf.nsect = 0x01;
  64        if (drive->media == ide_disk)
  65                cmd.tf.command = ATA_CMD_ID_ATA;
  66        else
  67                cmd.tf.command = ATA_CMD_ID_ATAPI;
  68        cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
  69        cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
  70        cmd.protocol = ATA_PROT_PIO;
  71
  72        return ide_raw_taskfile(drive, &cmd, buf, 1);
  73}
  74
  75static ide_startstop_t task_no_data_intr(ide_drive_t *);
  76static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *);
  77static ide_startstop_t task_pio_intr(ide_drive_t *);
  78
  79ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
  80{
  81        ide_hwif_t *hwif = drive->hwif;
  82        struct ide_cmd *cmd = &hwif->cmd;
  83        struct ide_taskfile *tf = &cmd->tf;
  84        ide_handler_t *handler = NULL;
  85        const struct ide_tp_ops *tp_ops = hwif->tp_ops;
  86        const struct ide_dma_ops *dma_ops = hwif->dma_ops;
  87
  88        if (orig_cmd->protocol == ATA_PROT_PIO &&
  89            (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) &&
  90            drive->mult_count == 0) {
  91                pr_err("%s: multimode not set!\n", drive->name);
  92                return ide_stopped;
  93        }
  94
  95        if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
  96                orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS;
  97
  98        memcpy(cmd, orig_cmd, sizeof(*cmd));
  99
 100        if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
 101                ide_tf_dump(drive->name, cmd);
 102                tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
 103
 104                if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
 105                        u8 data[2] = { cmd->tf.data, cmd->hob.data };
 106
 107                        tp_ops->output_data(drive, cmd, data, 2);
 108                }
 109
 110                if (cmd->valid.out.tf & IDE_VALID_DEVICE) {
 111                        u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ?
 112                                  0xE0 : 0xEF;
 113
 114                        if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED))
 115                                cmd->tf.device &= HIHI;
 116                        cmd->tf.device |= drive->select;
 117                }
 118
 119                tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob);
 120                tp_ops->tf_load(drive, &cmd->tf,  cmd->valid.out.tf);
 121        }
 122
 123        switch (cmd->protocol) {
 124        case ATA_PROT_PIO:
 125                if (cmd->tf_flags & IDE_TFLAG_WRITE) {
 126                        tp_ops->exec_command(hwif, tf->command);
 127                        ndelay(400);    /* FIXME */
 128                        return pre_task_out_intr(drive, cmd);
 129                }
 130                handler = task_pio_intr;
 131                /* fall-through */
 132        case ATA_PROT_NODATA:
 133                if (handler == NULL)
 134                        handler = task_no_data_intr;
 135                ide_execute_command(drive, cmd, handler, WAIT_WORSTCASE);
 136                return ide_started;
 137        case ATA_PROT_DMA:
 138                if (ide_dma_prepare(drive, cmd))
 139                        return ide_stopped;
 140                hwif->expiry = dma_ops->dma_timer_expiry;
 141                ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
 142                dma_ops->dma_start(drive);
 143        default:
 144                return ide_started;
 145        }
 146}
 147EXPORT_SYMBOL_GPL(do_rw_taskfile);
 148
 149static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
 150{
 151        ide_hwif_t *hwif = drive->hwif;
 152        struct ide_cmd *cmd = &hwif->cmd;
 153        struct ide_taskfile *tf = &cmd->tf;
 154        int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
 155        int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
 156        u8 stat;
 157
 158        local_irq_enable_in_hardirq();
 159
 160        while (1) {
 161                stat = hwif->tp_ops->read_status(hwif);
 162                if ((stat & ATA_BUSY) == 0 || retries-- == 0)
 163                        break;
 164                udelay(10);
 165        };
 166
 167        if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
 168                if (custom && tf->command == ATA_CMD_SET_MULTI) {
 169                        drive->mult_req = drive->mult_count = 0;
 170                        drive->special_flags |= IDE_SFLAG_RECALIBRATE;
 171                        (void)ide_dump_status(drive, __func__, stat);
 172                        return ide_stopped;
 173                } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
 174                        if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
 175                                ide_set_handler(drive, &task_no_data_intr,
 176                                                WAIT_WORSTCASE);
 177                                return ide_started;
 178                        }
 179                }
 180                return ide_error(drive, "task_no_data_intr", stat);
 181        }
 182
 183        if (custom && tf->command == ATA_CMD_SET_MULTI)
 184                drive->mult_count = drive->mult_req;
 185
 186        if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE ||
 187            tf->command == ATA_CMD_CHK_POWER) {
 188                struct request *rq = hwif->rq;
 189
 190                if (ata_pm_request(rq))
 191                        ide_complete_pm_rq(drive, rq);
 192                else
 193                        ide_finish_cmd(drive, cmd, stat);
 194        }
 195
 196        return ide_stopped;
 197}
 198
 199static u8 wait_drive_not_busy(ide_drive_t *drive)
 200{
 201        ide_hwif_t *hwif = drive->hwif;
 202        int retries;
 203        u8 stat;
 204
 205        /*
 206         * Last sector was transferred, wait until device is ready.  This can
 207         * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
 208         */
 209        for (retries = 0; retries < 1000; retries++) {
 210                stat = hwif->tp_ops->read_status(hwif);
 211
 212                if (stat & ATA_BUSY)
 213                        udelay(10);
 214                else
 215                        break;
 216        }
 217
 218        if (stat & ATA_BUSY)
 219                pr_err("%s: drive still BUSY!\n", drive->name);
 220
 221        return stat;
 222}
 223
 224void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
 225                   unsigned int write, unsigned int len)
 226{
 227        ide_hwif_t *hwif = drive->hwif;
 228        struct scatterlist *sg = hwif->sg_table;
 229        struct scatterlist *cursg = cmd->cursg;
 230        unsigned long uninitialized_var(flags);
 231        struct page *page;
 232        unsigned int offset;
 233        u8 *buf;
 234
 235        if (cursg == NULL)
 236                cursg = cmd->cursg = sg;
 237
 238        while (len) {
 239                unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs);
 240
 241                page = sg_page(cursg);
 242                offset = cursg->offset + cmd->cursg_ofs;
 243
 244                /* get the current page and offset */
 245                page = nth_page(page, (offset >> PAGE_SHIFT));
 246                offset %= PAGE_SIZE;
 247
 248                nr_bytes = min_t(unsigned, nr_bytes, (PAGE_SIZE - offset));
 249
 250                buf = kmap_atomic(page) + offset;
 251
 252                cmd->nleft -= nr_bytes;
 253                cmd->cursg_ofs += nr_bytes;
 254
 255                if (cmd->cursg_ofs == cursg->length) {
 256                        cursg = cmd->cursg = sg_next(cmd->cursg);
 257                        cmd->cursg_ofs = 0;
 258                }
 259
 260                /* do the actual data transfer */
 261                if (write)
 262                        hwif->tp_ops->output_data(drive, cmd, buf, nr_bytes);
 263                else
 264                        hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
 265
 266                kunmap_atomic(buf);
 267
 268                len -= nr_bytes;
 269        }
 270}
 271EXPORT_SYMBOL_GPL(ide_pio_bytes);
 272
 273static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd,
 274                              unsigned int write)
 275{
 276        unsigned int nr_bytes;
 277
 278        u8 saved_io_32bit = drive->io_32bit;
 279
 280        if (cmd->tf_flags & IDE_TFLAG_FS)
 281                scsi_req(cmd->rq)->result = 0;
 282
 283        if (cmd->tf_flags & IDE_TFLAG_IO_16BIT)
 284                drive->io_32bit = 0;
 285
 286        touch_softlockup_watchdog();
 287
 288        if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
 289                nr_bytes = min_t(unsigned, cmd->nleft, drive->mult_count << 9);
 290        else
 291                nr_bytes = SECTOR_SIZE;
 292
 293        ide_pio_bytes(drive, cmd, write, nr_bytes);
 294
 295        drive->io_32bit = saved_io_32bit;
 296}
 297
 298static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
 299{
 300        if (cmd->tf_flags & IDE_TFLAG_FS) {
 301                int nr_bytes = cmd->nbytes - cmd->nleft;
 302
 303                if (cmd->protocol == ATA_PROT_PIO &&
 304                    ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) {
 305                        if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
 306                                nr_bytes -= drive->mult_count << 9;
 307                        else
 308                                nr_bytes -= SECTOR_SIZE;
 309                }
 310
 311                if (nr_bytes > 0)
 312                        ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
 313        }
 314}
 315
 316void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
 317{
 318        struct request *rq = drive->hwif->rq;
 319        u8 err = ide_read_error(drive), nsect = cmd->tf.nsect;
 320        u8 set_xfer = !!(cmd->tf_flags & IDE_TFLAG_SET_XFER);
 321
 322        ide_complete_cmd(drive, cmd, stat, err);
 323        scsi_req(rq)->result = err;
 324
 325        if (err == 0 && set_xfer) {
 326                ide_set_xfer_rate(drive, nsect);
 327                ide_driveid_update(drive);
 328        }
 329
 330        ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
 331}
 332
 333/*
 334 * Handler for command with PIO data phase.
 335 */
 336static ide_startstop_t task_pio_intr(ide_drive_t *drive)
 337{
 338        ide_hwif_t *hwif = drive->hwif;
 339        struct ide_cmd *cmd = &drive->hwif->cmd;
 340        u8 stat = hwif->tp_ops->read_status(hwif);
 341        u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
 342
 343        if (write == 0) {
 344                /* Error? */
 345                if (stat & ATA_ERR)
 346                        goto out_err;
 347
 348                /* Didn't want any data? Odd. */
 349                if ((stat & ATA_DRQ) == 0) {
 350                        /* Command all done? */
 351                        if (OK_STAT(stat, ATA_DRDY, ATA_BUSY))
 352                                goto out_end;
 353
 354                        /* Assume it was a spurious irq */
 355                        goto out_wait;
 356                }
 357        } else {
 358                if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
 359                        goto out_err;
 360
 361                /* Deal with unexpected ATA data phase. */
 362                if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0))
 363                        goto out_err;
 364        }
 365
 366        if (write && cmd->nleft == 0)
 367                goto out_end;
 368
 369        /* Still data left to transfer. */
 370        ide_pio_datablock(drive, cmd, write);
 371
 372        /* Are we done? Check status and finish transfer. */
 373        if (write == 0 && cmd->nleft == 0) {
 374                stat = wait_drive_not_busy(drive);
 375                if (!OK_STAT(stat, 0, BAD_STAT))
 376                        goto out_err;
 377
 378                goto out_end;
 379        }
 380out_wait:
 381        /* Still data left to transfer. */
 382        ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
 383        return ide_started;
 384out_end:
 385        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
 386                ide_finish_cmd(drive, cmd, stat);
 387        else
 388                ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9);
 389        return ide_stopped;
 390out_err:
 391        ide_error_cmd(drive, cmd);
 392        return ide_error(drive, __func__, stat);
 393}
 394
 395static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
 396                                         struct ide_cmd *cmd)
 397{
 398        ide_startstop_t startstop;
 399
 400        if (ide_wait_stat(&startstop, drive, ATA_DRQ,
 401                          drive->bad_wstat, WAIT_DRQ)) {
 402                pr_err("%s: no DRQ after issuing %sWRITE%s\n", drive->name,
 403                        (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "",
 404                        (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
 405                return startstop;
 406        }
 407
 408        if (!force_irqthreads && (drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
 409                local_irq_disable();
 410
 411        ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
 412
 413        ide_pio_datablock(drive, cmd, 1);
 414
 415        return ide_started;
 416}
 417
 418int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
 419                     u16 nsect)
 420{
 421        struct request *rq;
 422        int error;
 423
 424        rq = blk_get_request(drive->queue,
 425                (cmd->tf_flags & IDE_TFLAG_WRITE) ?
 426                        REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
 427        ide_req(rq)->type = ATA_PRIV_TASKFILE;
 428
 429        /*
 430         * (ks) We transfer currently only whole sectors.
 431         * This is suffient for now.  But, it would be great,
 432         * if we would find a solution to transfer any size.
 433         * To support special commands like READ LONG.
 434         */
 435        if (nsect) {
 436                error = blk_rq_map_kern(drive->queue, rq, buf,
 437                                        nsect * SECTOR_SIZE, GFP_NOIO);
 438                if (error)
 439                        goto put_req;
 440        }
 441
 442        rq->special = cmd;
 443        cmd->rq = rq;
 444
 445        blk_execute_rq(drive->queue, NULL, rq, 0);
 446        error = scsi_req(rq)->result ? -EIO : 0;
 447put_req:
 448        blk_put_request(rq);
 449        return error;
 450}
 451EXPORT_SYMBOL(ide_raw_taskfile);
 452
 453int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd)
 454{
 455        cmd->protocol = ATA_PROT_NODATA;
 456
 457        return ide_raw_taskfile(drive, cmd, NULL, 0);
 458}
 459EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
 460
 461#ifdef CONFIG_IDE_TASK_IOCTL
 462int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
 463{
 464        ide_task_request_t      *req_task;
 465        struct ide_cmd          cmd;
 466        u8 *outbuf              = NULL;
 467        u8 *inbuf               = NULL;
 468        u8 *data_buf            = NULL;
 469        int err                 = 0;
 470        int tasksize            = sizeof(struct ide_task_request_s);
 471        unsigned int taskin     = 0;
 472        unsigned int taskout    = 0;
 473        u16 nsect               = 0;
 474        char __user *buf = (char __user *)arg;
 475
 476        req_task = memdup_user(buf, tasksize);
 477        if (IS_ERR(req_task))
 478                return PTR_ERR(req_task);
 479
 480        taskout = req_task->out_size;
 481        taskin  = req_task->in_size;
 482
 483        if (taskin > 65536 || taskout > 65536) {
 484                err = -EINVAL;
 485                goto abort;
 486        }
 487
 488        if (taskout) {
 489                int outtotal = tasksize;
 490                outbuf = kzalloc(taskout, GFP_KERNEL);
 491                if (outbuf == NULL) {
 492                        err = -ENOMEM;
 493                        goto abort;
 494                }
 495                if (copy_from_user(outbuf, buf + outtotal, taskout)) {
 496                        err = -EFAULT;
 497                        goto abort;
 498                }
 499        }
 500
 501        if (taskin) {
 502                int intotal = tasksize + taskout;
 503                inbuf = kzalloc(taskin, GFP_KERNEL);
 504                if (inbuf == NULL) {
 505                        err = -ENOMEM;
 506                        goto abort;
 507                }
 508                if (copy_from_user(inbuf, buf + intotal, taskin)) {
 509                        err = -EFAULT;
 510                        goto abort;
 511                }
 512        }
 513
 514        memset(&cmd, 0, sizeof(cmd));
 515
 516        memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
 517        memcpy(&cmd.tf,  req_task->io_ports,  HDIO_DRIVE_TASK_HDR_SIZE);
 518
 519        cmd.valid.out.tf = IDE_VALID_DEVICE;
 520        cmd.valid.in.tf  = IDE_VALID_DEVICE | IDE_VALID_IN_TF;
 521        cmd.tf_flags = IDE_TFLAG_IO_16BIT;
 522
 523        if (drive->dev_flags & IDE_DFLAG_LBA48) {
 524                cmd.tf_flags |= IDE_TFLAG_LBA48;
 525                cmd.valid.in.hob = IDE_VALID_IN_HOB;
 526        }
 527
 528        if (req_task->out_flags.all) {
 529                cmd.ftf_flags |= IDE_FTFLAG_FLAGGED;
 530
 531                if (req_task->out_flags.b.data)
 532                        cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA;
 533
 534                if (req_task->out_flags.b.nsector_hob)
 535                        cmd.valid.out.hob |= IDE_VALID_NSECT;
 536                if (req_task->out_flags.b.sector_hob)
 537                        cmd.valid.out.hob |= IDE_VALID_LBAL;
 538                if (req_task->out_flags.b.lcyl_hob)
 539                        cmd.valid.out.hob |= IDE_VALID_LBAM;
 540                if (req_task->out_flags.b.hcyl_hob)
 541                        cmd.valid.out.hob |= IDE_VALID_LBAH;
 542
 543                if (req_task->out_flags.b.error_feature)
 544                        cmd.valid.out.tf  |= IDE_VALID_FEATURE;
 545                if (req_task->out_flags.b.nsector)
 546                        cmd.valid.out.tf  |= IDE_VALID_NSECT;
 547                if (req_task->out_flags.b.sector)
 548                        cmd.valid.out.tf  |= IDE_VALID_LBAL;
 549                if (req_task->out_flags.b.lcyl)
 550                        cmd.valid.out.tf  |= IDE_VALID_LBAM;
 551                if (req_task->out_flags.b.hcyl)
 552                        cmd.valid.out.tf  |= IDE_VALID_LBAH;
 553        } else {
 554                cmd.valid.out.tf |= IDE_VALID_OUT_TF;
 555                if (cmd.tf_flags & IDE_TFLAG_LBA48)
 556                        cmd.valid.out.hob |= IDE_VALID_OUT_HOB;
 557        }
 558
 559        if (req_task->in_flags.b.data)
 560                cmd.ftf_flags |= IDE_FTFLAG_IN_DATA;
 561
 562        if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) {
 563                /* fixup data phase if needed */
 564                if (req_task->data_phase == TASKFILE_IN_DMAQ ||
 565                    req_task->data_phase == TASKFILE_IN_DMA)
 566                        cmd.tf_flags |= IDE_TFLAG_WRITE;
 567        }
 568
 569        cmd.protocol = ATA_PROT_DMA;
 570
 571        switch (req_task->data_phase) {
 572        case TASKFILE_MULTI_OUT:
 573                if (!drive->mult_count) {
 574                        /* (hs): give up if multcount is not set */
 575                        pr_err("%s: %s Multimode Write multcount is not set\n",
 576                                drive->name, __func__);
 577                        err = -EPERM;
 578                        goto abort;
 579                }
 580                cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
 581                /* fall through */
 582        case TASKFILE_OUT:
 583                cmd.protocol = ATA_PROT_PIO;
 584                /* fall through */
 585        case TASKFILE_OUT_DMAQ:
 586        case TASKFILE_OUT_DMA:
 587                cmd.tf_flags |= IDE_TFLAG_WRITE;
 588                nsect = taskout / SECTOR_SIZE;
 589                data_buf = outbuf;
 590                break;
 591        case TASKFILE_MULTI_IN:
 592                if (!drive->mult_count) {
 593                        /* (hs): give up if multcount is not set */
 594                        pr_err("%s: %s Multimode Read multcount is not set\n",
 595                                drive->name, __func__);
 596                        err = -EPERM;
 597                        goto abort;
 598                }
 599                cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
 600                /* fall through */
 601        case TASKFILE_IN:
 602                cmd.protocol = ATA_PROT_PIO;
 603                /* fall through */
 604        case TASKFILE_IN_DMAQ:
 605        case TASKFILE_IN_DMA:
 606                nsect = taskin / SECTOR_SIZE;
 607                data_buf = inbuf;
 608                break;
 609        case TASKFILE_NO_DATA:
 610                cmd.protocol = ATA_PROT_NODATA;
 611                break;
 612        default:
 613                err = -EFAULT;
 614                goto abort;
 615        }
 616
 617        if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
 618                nsect = 0;
 619        else if (!nsect) {
 620                nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect;
 621
 622                if (!nsect) {
 623                        pr_err("%s: in/out command without data\n",
 624                                        drive->name);
 625                        err = -EFAULT;
 626                        goto abort;
 627                }
 628        }
 629
 630        err = ide_raw_taskfile(drive, &cmd, data_buf, nsect);
 631
 632        memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2);
 633        memcpy(req_task->io_ports,  &cmd.tf,  HDIO_DRIVE_TASK_HDR_SIZE);
 634
 635        if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) &&
 636            req_task->in_flags.all == 0) {
 637                req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
 638                if (drive->dev_flags & IDE_DFLAG_LBA48)
 639                        req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
 640        }
 641
 642        if (copy_to_user(buf, req_task, tasksize)) {
 643                err = -EFAULT;
 644                goto abort;
 645        }
 646        if (taskout) {
 647                int outtotal = tasksize;
 648                if (copy_to_user(buf + outtotal, outbuf, taskout)) {
 649                        err = -EFAULT;
 650                        goto abort;
 651                }
 652        }
 653        if (taskin) {
 654                int intotal = tasksize + taskout;
 655                if (copy_to_user(buf + intotal, inbuf, taskin)) {
 656                        err = -EFAULT;
 657                        goto abort;
 658                }
 659        }
 660abort:
 661        kfree(req_task);
 662        kfree(outbuf);
 663        kfree(inbuf);
 664
 665        return err;
 666}
 667#endif
 668