1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/kernel.h>
31#include <linux/timer.h>
32#include <linux/mm.h>
33#include <linux/interrupt.h>
34#include <linux/major.h>
35#include <linux/errno.h>
36#include <linux/genhd.h>
37#include <linux/blkpg.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/ide.h>
43#include <linux/completion.h>
44#include <linux/reboot.h>
45#include <linux/cdrom.h>
46#include <linux/seq_file.h>
47#include <linux/device.h>
48#include <linux/kmod.h>
49#include <linux/scatterlist.h>
50#include <linux/bitops.h>
51
52#include <asm/byteorder.h>
53#include <asm/irq.h>
54#include <linux/uaccess.h>
55#include <asm/io.h>
56
57int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
58 unsigned int nr_bytes)
59{
60
61
62
63
64 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
65 drive->retry_pio <= 3) {
66 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
67 ide_dma_on(drive);
68 }
69
70 if (!blk_update_request(rq, error, nr_bytes)) {
71 if (rq == drive->sense_rq) {
72 drive->sense_rq = NULL;
73 drive->sense_rq_active = false;
74 }
75
76 __blk_mq_end_request(rq, error);
77 return 0;
78 }
79
80 return 1;
81}
82EXPORT_SYMBOL_GPL(ide_end_rq);
83
84void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
85{
86 const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
87 struct ide_taskfile *tf = &cmd->tf;
88 struct request *rq = cmd->rq;
89 u8 tf_cmd = tf->command;
90
91 tf->error = err;
92 tf->status = stat;
93
94 if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
95 u8 data[2];
96
97 tp_ops->input_data(drive, cmd, data, 2);
98
99 cmd->tf.data = data[0];
100 cmd->hob.data = data[1];
101 }
102
103 ide_tf_readback(drive, cmd);
104
105 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
106 tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
107 if (tf->lbal != 0xc4) {
108 printk(KERN_ERR "%s: head unload failed!\n",
109 drive->name);
110 ide_tf_dump(drive->name, cmd);
111 } else
112 drive->dev_flags |= IDE_DFLAG_PARKED;
113 }
114
115 if (rq && ata_taskfile_request(rq)) {
116 struct ide_cmd *orig_cmd = ide_req(rq)->special;
117
118 if (cmd->tf_flags & IDE_TFLAG_DYN)
119 kfree(orig_cmd);
120 else if (cmd != orig_cmd)
121 memcpy(orig_cmd, cmd, sizeof(*cmd));
122 }
123}
124
125int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
126{
127 ide_hwif_t *hwif = drive->hwif;
128 struct request *rq = hwif->rq;
129 int rc;
130
131
132
133
134
135 if (blk_noretry_request(rq) && error)
136 nr_bytes = blk_rq_sectors(rq) << 9;
137
138 rc = ide_end_rq(drive, rq, error, nr_bytes);
139 if (rc == 0)
140 hwif->rq = NULL;
141
142 return rc;
143}
144EXPORT_SYMBOL(ide_complete_rq);
145
146void ide_kill_rq(ide_drive_t *drive, struct request *rq)
147{
148 u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
149 u8 media = drive->media;
150
151 drive->failed_pc = NULL;
152
153 if ((media == ide_floppy || media == ide_tape) && drv_req) {
154 scsi_req(rq)->result = 0;
155 } else {
156 if (media == ide_tape)
157 scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL;
158 else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
159 scsi_req(rq)->result = -EIO;
160 }
161
162 ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
163}
164
165static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
166{
167 tf->nsect = drive->sect;
168 tf->lbal = drive->sect;
169 tf->lbam = drive->cyl;
170 tf->lbah = drive->cyl >> 8;
171 tf->device = (drive->head - 1) | drive->select;
172 tf->command = ATA_CMD_INIT_DEV_PARAMS;
173}
174
175static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
176{
177 tf->nsect = drive->sect;
178 tf->command = ATA_CMD_RESTORE;
179}
180
181static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
182{
183 tf->nsect = drive->mult_req;
184 tf->command = ATA_CMD_SET_MULTI;
185}
186
187
188
189
190
191
192
193
194
195static ide_startstop_t do_special(ide_drive_t *drive)
196{
197 struct ide_cmd cmd;
198
199#ifdef DEBUG
200 printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
201 drive->special_flags);
202#endif
203 if (drive->media != ide_disk) {
204 drive->special_flags = 0;
205 drive->mult_req = 0;
206 return ide_stopped;
207 }
208
209 memset(&cmd, 0, sizeof(cmd));
210 cmd.protocol = ATA_PROT_NODATA;
211
212 if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
213 drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
214 ide_tf_set_specify_cmd(drive, &cmd.tf);
215 } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
216 drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
217 ide_tf_set_restore_cmd(drive, &cmd.tf);
218 } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
219 drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
220 ide_tf_set_setmult_cmd(drive, &cmd.tf);
221 } else
222 BUG();
223
224 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
225 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
226 cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
227
228 do_rw_taskfile(drive, &cmd);
229
230 return ide_started;
231}
232
233void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
234{
235 ide_hwif_t *hwif = drive->hwif;
236 struct scatterlist *sg = hwif->sg_table, *last_sg = NULL;
237 struct request *rq = cmd->rq;
238
239 cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg);
240 if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask))
241 last_sg->length +=
242 (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
243}
244EXPORT_SYMBOL_GPL(ide_map_sg);
245
246void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
247{
248 cmd->nbytes = cmd->nleft = nr_bytes;
249 cmd->cursg_ofs = 0;
250 cmd->cursg = NULL;
251}
252EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
253
254
255
256
257
258
259
260
261
262
263
264
265
266static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
267 struct request *rq)
268{
269 struct ide_cmd *cmd = ide_req(rq)->special;
270
271 if (cmd) {
272 if (cmd->protocol == ATA_PROT_PIO) {
273 ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
274 ide_map_sg(drive, cmd);
275 }
276
277 return do_rw_taskfile(drive, cmd);
278 }
279
280
281
282
283
284#ifdef DEBUG
285 printk("%s: DRIVE_CMD (null)\n", drive->name);
286#endif
287 scsi_req(rq)->result = 0;
288 ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
289
290 return ide_stopped;
291}
292
293static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
294{
295 u8 cmd = scsi_req(rq)->cmd[0];
296
297 switch (cmd) {
298 case REQ_PARK_HEADS:
299 case REQ_UNPARK_HEADS:
300 return ide_do_park_unpark(drive, rq);
301 case REQ_DEVSET_EXEC:
302 return ide_do_devset(drive, rq);
303 case REQ_DRIVE_RESET:
304 return ide_do_reset(drive);
305 default:
306 BUG();
307 }
308}
309
310
311
312
313
314
315
316
317
318
319static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
320{
321 ide_startstop_t startstop;
322
323#ifdef DEBUG
324 printk("%s: start_request: current=0x%08lx\n",
325 drive->hwif->name, (unsigned long) rq);
326#endif
327
328
329 if (drive->max_failures && (drive->failures > drive->max_failures)) {
330 rq->rq_flags |= RQF_FAILED;
331 goto kill_rq;
332 }
333
334 if (drive->prep_rq && !drive->prep_rq(drive, rq))
335 return ide_stopped;
336
337 if (ata_pm_request(rq))
338 ide_check_pm_state(drive, rq);
339
340 drive->hwif->tp_ops->dev_select(drive);
341 if (ide_wait_stat(&startstop, drive, drive->ready_stat,
342 ATA_BUSY | ATA_DRQ, WAIT_READY)) {
343 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
344 return startstop;
345 }
346
347 if (drive->special_flags == 0) {
348 struct ide_driver *drv;
349
350
351
352
353
354 if (drive->current_speed == 0xff)
355 ide_config_drive_speed(drive, drive->desired_speed);
356
357 if (ata_taskfile_request(rq))
358 return execute_drive_cmd(drive, rq);
359 else if (ata_pm_request(rq)) {
360 struct ide_pm_state *pm = ide_req(rq)->special;
361#ifdef DEBUG_PM
362 printk("%s: start_power_step(step: %d)\n",
363 drive->name, pm->pm_step);
364#endif
365 startstop = ide_start_power_step(drive, rq);
366 if (startstop == ide_stopped &&
367 pm->pm_step == IDE_PM_COMPLETED)
368 ide_complete_pm_rq(drive, rq);
369 return startstop;
370 } else if (!rq->rq_disk && ata_misc_request(rq))
371
372
373
374
375
376
377
378
379 return ide_special_rq(drive, rq);
380
381 drv = *(struct ide_driver **)rq->rq_disk->private_data;
382
383 return drv->do_request(drive, rq, blk_rq_pos(rq));
384 }
385 return do_special(drive);
386kill_rq:
387 ide_kill_rq(drive, rq);
388 return ide_stopped;
389}
390
391
392
393
394
395
396
397
398
399
400void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
401{
402 if (timeout > WAIT_WORSTCASE)
403 timeout = WAIT_WORSTCASE;
404 drive->sleep = timeout + jiffies;
405 drive->dev_flags |= IDE_DFLAG_SLEEPING;
406}
407EXPORT_SYMBOL(ide_stall_queue);
408
409static inline int ide_lock_port(ide_hwif_t *hwif)
410{
411 if (hwif->busy)
412 return 1;
413
414 hwif->busy = 1;
415
416 return 0;
417}
418
419static inline void ide_unlock_port(ide_hwif_t *hwif)
420{
421 hwif->busy = 0;
422}
423
424static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
425{
426 int rc = 0;
427
428 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
429 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
430 if (rc == 0) {
431 if (host->get_lock)
432 host->get_lock(ide_intr, hwif);
433 }
434 }
435 return rc;
436}
437
438static inline void ide_unlock_host(struct ide_host *host)
439{
440 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
441 if (host->release_lock)
442 host->release_lock();
443 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
444 }
445}
446
447void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
448{
449 struct request_queue *q = drive->queue;
450
451
452 if (rq) {
453 blk_mq_requeue_request(rq, false);
454 blk_mq_delay_kick_requeue_list(q, 3);
455 } else
456 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
457}
458
459blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
460 bool local_requeue)
461{
462 ide_hwif_t *hwif = drive->hwif;
463 struct ide_host *host = hwif->host;
464 ide_startstop_t startstop;
465
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
467 rq->rq_flags |= RQF_DONTPREP;
468 ide_req(rq)->special = NULL;
469 }
470
471
472 might_sleep();
473
474 if (ide_lock_host(host, hwif))
475 return BLK_STS_DEV_RESOURCE;
476
477 spin_lock_irq(&hwif->lock);
478
479 if (!ide_lock_port(hwif)) {
480 ide_hwif_t *prev_port;
481
482 WARN_ON_ONCE(hwif->rq);
483repeat:
484 prev_port = hwif->host->cur_port;
485 if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
486 time_after(drive->sleep, jiffies)) {
487 ide_unlock_port(hwif);
488 goto plug_device;
489 }
490
491 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
492 hwif != prev_port) {
493 ide_drive_t *cur_dev =
494 prev_port ? prev_port->cur_dev : NULL;
495
496
497
498
499
500 if (cur_dev &&
501 (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
502 prev_port->tp_ops->write_devctl(prev_port,
503 ATA_NIEN |
504 ATA_DEVCTL_OBS);
505
506 hwif->host->cur_port = hwif;
507 }
508 hwif->cur_dev = drive;
509 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
525 ata_pm_request(rq) == 0 &&
526 (rq->rq_flags & RQF_PREEMPT) == 0) {
527
528 ide_unlock_port(hwif);
529 goto plug_device;
530 }
531
532 scsi_req(rq)->resid_len = blk_rq_bytes(rq);
533 hwif->rq = rq;
534
535 spin_unlock_irq(&hwif->lock);
536 startstop = start_request(drive, rq);
537 spin_lock_irq(&hwif->lock);
538
539 if (startstop == ide_stopped) {
540 rq = hwif->rq;
541 hwif->rq = NULL;
542 if (rq)
543 goto repeat;
544 ide_unlock_port(hwif);
545 goto out;
546 }
547 } else {
548plug_device:
549 if (local_requeue)
550 list_add(&rq->queuelist, &drive->rq_list);
551 spin_unlock_irq(&hwif->lock);
552 ide_unlock_host(host);
553 if (!local_requeue)
554 ide_requeue_and_plug(drive, rq);
555 return BLK_STS_OK;
556 }
557
558out:
559 spin_unlock_irq(&hwif->lock);
560 if (rq == NULL)
561 ide_unlock_host(host);
562 return BLK_STS_OK;
563}
564
565
566
567
568blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
569 const struct blk_mq_queue_data *bd)
570{
571 ide_drive_t *drive = hctx->queue->queuedata;
572 ide_hwif_t *hwif = drive->hwif;
573
574 spin_lock_irq(&hwif->lock);
575 if (drive->sense_rq_active) {
576 spin_unlock_irq(&hwif->lock);
577 return BLK_STS_DEV_RESOURCE;
578 }
579 spin_unlock_irq(&hwif->lock);
580
581 blk_mq_start_request(bd->rq);
582 return ide_issue_rq(drive, bd->rq, false);
583}
584
585static int drive_is_ready(ide_drive_t *drive)
586{
587 ide_hwif_t *hwif = drive->hwif;
588 u8 stat = 0;
589
590 if (drive->waiting_for_dma)
591 return hwif->dma_ops->dma_test_irq(drive);
592
593 if (hwif->io_ports.ctl_addr &&
594 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
595 stat = hwif->tp_ops->read_altstatus(hwif);
596 else
597
598 stat = hwif->tp_ops->read_status(hwif);
599
600 if (stat & ATA_BUSY)
601
602 return 0;
603
604
605 return 1;
606}
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622void ide_timer_expiry (struct timer_list *t)
623{
624 ide_hwif_t *hwif = from_timer(hwif, t, timer);
625 ide_drive_t *uninitialized_var(drive);
626 ide_handler_t *handler;
627 unsigned long flags;
628 int wait = -1;
629 int plug_device = 0;
630 struct request *uninitialized_var(rq_in_flight);
631
632 spin_lock_irqsave(&hwif->lock, flags);
633
634 handler = hwif->handler;
635
636 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
637
638
639
640
641
642
643 } else {
644 ide_expiry_t *expiry = hwif->expiry;
645 ide_startstop_t startstop = ide_stopped;
646
647 drive = hwif->cur_dev;
648
649 if (expiry) {
650 wait = expiry(drive);
651 if (wait > 0) {
652
653 hwif->timer.expires = jiffies + wait;
654 hwif->req_gen_timer = hwif->req_gen;
655 add_timer(&hwif->timer);
656 spin_unlock_irqrestore(&hwif->lock, flags);
657 return;
658 }
659 }
660 hwif->handler = NULL;
661 hwif->expiry = NULL;
662
663
664
665
666
667 spin_unlock(&hwif->lock);
668
669 disable_irq(hwif->irq);
670
671 if (hwif->polling) {
672 startstop = handler(drive);
673 } else if (drive_is_ready(drive)) {
674 if (drive->waiting_for_dma)
675 hwif->dma_ops->dma_lost_irq(drive);
676 if (hwif->port_ops && hwif->port_ops->clear_irq)
677 hwif->port_ops->clear_irq(drive);
678
679 printk(KERN_WARNING "%s: lost interrupt\n",
680 drive->name);
681 startstop = handler(drive);
682 } else {
683 if (drive->waiting_for_dma)
684 startstop = ide_dma_timeout_retry(drive, wait);
685 else
686 startstop = ide_error(drive, "irq timeout",
687 hwif->tp_ops->read_status(hwif));
688 }
689
690 spin_lock_irq(&hwif->lock);
691 enable_irq(hwif->irq);
692 if (startstop == ide_stopped && hwif->polling == 0) {
693 rq_in_flight = hwif->rq;
694 hwif->rq = NULL;
695 ide_unlock_port(hwif);
696 plug_device = 1;
697 }
698 }
699 spin_unlock_irqrestore(&hwif->lock, flags);
700
701 if (plug_device) {
702 ide_unlock_host(hwif->host);
703 ide_requeue_and_plug(drive, rq_in_flight);
704 }
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736static void unexpected_intr(int irq, ide_hwif_t *hwif)
737{
738 u8 stat = hwif->tp_ops->read_status(hwif);
739
740 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
741
742 static unsigned long last_msgtime, count;
743 ++count;
744
745 if (time_after(jiffies, last_msgtime + HZ)) {
746 last_msgtime = jiffies;
747 printk(KERN_ERR "%s: unexpected interrupt, "
748 "status=0x%02x, count=%ld\n",
749 hwif->name, stat, count);
750 }
751 }
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779irqreturn_t ide_intr (int irq, void *dev_id)
780{
781 ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
782 struct ide_host *host = hwif->host;
783 ide_drive_t *uninitialized_var(drive);
784 ide_handler_t *handler;
785 unsigned long flags;
786 ide_startstop_t startstop;
787 irqreturn_t irq_ret = IRQ_NONE;
788 int plug_device = 0;
789 struct request *uninitialized_var(rq_in_flight);
790
791 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
792 if (hwif != host->cur_port)
793 goto out_early;
794 }
795
796 spin_lock_irqsave(&hwif->lock, flags);
797
798 if (hwif->port_ops && hwif->port_ops->test_irq &&
799 hwif->port_ops->test_irq(hwif) == 0)
800 goto out;
801
802 handler = hwif->handler;
803
804 if (handler == NULL || hwif->polling) {
805
806
807
808
809
810
811
812
813
814
815
816
817 if ((host->irq_flags & IRQF_SHARED) == 0) {
818
819
820
821
822 unexpected_intr(irq, hwif);
823 } else {
824
825
826
827
828 (void)hwif->tp_ops->read_status(hwif);
829 }
830 goto out;
831 }
832
833 drive = hwif->cur_dev;
834
835 if (!drive_is_ready(drive))
836
837
838
839
840
841
842
843 goto out;
844
845 hwif->handler = NULL;
846 hwif->expiry = NULL;
847 hwif->req_gen++;
848 del_timer(&hwif->timer);
849 spin_unlock(&hwif->lock);
850
851 if (hwif->port_ops && hwif->port_ops->clear_irq)
852 hwif->port_ops->clear_irq(drive);
853
854 if (drive->dev_flags & IDE_DFLAG_UNMASK)
855 local_irq_enable_in_hardirq();
856
857
858 startstop = handler(drive);
859
860 spin_lock_irq(&hwif->lock);
861
862
863
864
865
866
867
868 if (startstop == ide_stopped && hwif->polling == 0) {
869 BUG_ON(hwif->handler);
870 rq_in_flight = hwif->rq;
871 hwif->rq = NULL;
872 ide_unlock_port(hwif);
873 plug_device = 1;
874 }
875 irq_ret = IRQ_HANDLED;
876out:
877 spin_unlock_irqrestore(&hwif->lock, flags);
878out_early:
879 if (plug_device) {
880 ide_unlock_host(hwif->host);
881 ide_requeue_and_plug(drive, rq_in_flight);
882 }
883
884 return irq_ret;
885}
886EXPORT_SYMBOL_GPL(ide_intr);
887
888void ide_pad_transfer(ide_drive_t *drive, int write, int len)
889{
890 ide_hwif_t *hwif = drive->hwif;
891 u8 buf[4] = { 0 };
892
893 while (len > 0) {
894 if (write)
895 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
896 else
897 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
898 len -= 4;
899 }
900}
901EXPORT_SYMBOL_GPL(ide_pad_transfer);
902
903void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
904{
905 drive->sense_rq_active = true;
906 list_add_tail(&rq->queuelist, &drive->rq_list);
907 kblockd_schedule_work(&drive->rq_work);
908}
909EXPORT_SYMBOL_GPL(ide_insert_request_head);
910