1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/kernel.h>
31#include <linux/timer.h>
32#include <linux/mm.h>
33#include <linux/interrupt.h>
34#include <linux/major.h>
35#include <linux/errno.h>
36#include <linux/genhd.h>
37#include <linux/blkpg.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/ide.h>
43#include <linux/completion.h>
44#include <linux/reboot.h>
45#include <linux/cdrom.h>
46#include <linux/seq_file.h>
47#include <linux/device.h>
48#include <linux/kmod.h>
49#include <linux/scatterlist.h>
50#include <linux/bitops.h>
51
52#include <asm/byteorder.h>
53#include <asm/irq.h>
54#include <asm/uaccess.h>
55#include <asm/io.h>
56
57static int __ide_end_request(ide_drive_t *drive, struct request *rq,
58 int uptodate, unsigned int nr_bytes, int dequeue)
59{
60 int ret = 1;
61
62
63
64
65
66 if (blk_noretry_request(rq) && end_io_error(uptodate))
67 nr_bytes = rq->hard_nr_sectors << 9;
68
69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
70 rq->errors = -EIO;
71
72
73
74
75
76 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
77 drive->state = 0;
78 HWGROUP(drive)->hwif->ide_dma_on(drive);
79 }
80
81 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
82 add_disk_randomness(rq->rq_disk);
83 if (dequeue) {
84 if (!list_empty(&rq->queuelist))
85 blkdev_dequeue_request(rq);
86 HWGROUP(drive)->rq = NULL;
87 }
88 end_that_request_last(rq, uptodate);
89 ret = 0;
90 }
91
92 return ret;
93}
94
95
96
97
98
99
100
101
102
103
104
105
106int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
107{
108 unsigned int nr_bytes = nr_sectors << 9;
109 struct request *rq;
110 unsigned long flags;
111 int ret = 1;
112
113
114
115
116
117 spin_lock_irqsave(&ide_lock, flags);
118 rq = HWGROUP(drive)->rq;
119
120 if (!nr_bytes) {
121 if (blk_pc_request(rq))
122 nr_bytes = rq->data_len;
123 else
124 nr_bytes = rq->hard_cur_sectors << 9;
125 }
126
127 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
128
129 spin_unlock_irqrestore(&ide_lock, flags);
130 return ret;
131}
132EXPORT_SYMBOL(ide_end_request);
133
134
135
136
137
138
139
140enum {
141 ide_pm_flush_cache = ide_pm_state_start_suspend,
142 idedisk_pm_standby,
143
144 idedisk_pm_restore_pio = ide_pm_state_start_resume,
145 idedisk_pm_idle,
146 ide_pm_restore_dma,
147};
148
149static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error)
150{
151 struct request_pm_state *pm = rq->data;
152
153 if (drive->media != ide_disk)
154 return;
155
156 switch (pm->pm_step) {
157 case ide_pm_flush_cache:
158 if (pm->pm_state == PM_EVENT_FREEZE)
159 pm->pm_step = ide_pm_state_completed;
160 else
161 pm->pm_step = idedisk_pm_standby;
162 break;
163 case idedisk_pm_standby:
164 pm->pm_step = ide_pm_state_completed;
165 break;
166 case idedisk_pm_restore_pio:
167 pm->pm_step = idedisk_pm_idle;
168 break;
169 case idedisk_pm_idle:
170 pm->pm_step = ide_pm_restore_dma;
171 break;
172 }
173}
174
175static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
176{
177 struct request_pm_state *pm = rq->data;
178 ide_task_t *args = rq->special;
179
180 memset(args, 0, sizeof(*args));
181
182 switch (pm->pm_step) {
183 case ide_pm_flush_cache:
184 if (drive->media != ide_disk)
185 break;
186
187 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) {
188 ide_complete_power_step(drive, rq, 0, 0);
189 return ide_stopped;
190 }
191 if (ide_id_has_flush_cache_ext(drive->id))
192 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT;
193 else
194 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE;
195 args->command_type = IDE_DRIVE_TASK_NO_DATA;
196 args->handler = &task_no_data_intr;
197 return do_rw_taskfile(drive, args);
198
199 case idedisk_pm_standby:
200 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1;
201 args->command_type = IDE_DRIVE_TASK_NO_DATA;
202 args->handler = &task_no_data_intr;
203 return do_rw_taskfile(drive, args);
204
205 case idedisk_pm_restore_pio:
206 ide_set_max_pio(drive);
207
208
209
210 if (drive->media != ide_disk)
211 pm->pm_step = ide_pm_restore_dma;
212 else
213 ide_complete_power_step(drive, rq, 0, 0);
214 return ide_stopped;
215
216 case idedisk_pm_idle:
217 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE;
218 args->command_type = IDE_DRIVE_TASK_NO_DATA;
219 args->handler = task_no_data_intr;
220 return do_rw_taskfile(drive, args);
221
222 case ide_pm_restore_dma:
223
224
225
226
227
228 if (drive->hwif->ide_dma_on == NULL)
229 break;
230 drive->hwif->dma_off_quietly(drive);
231
232
233
234 ide_set_dma(drive);
235 break;
236 }
237 pm->pm_step = ide_pm_state_completed;
238 return ide_stopped;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
257 int uptodate, int nr_sectors)
258{
259 unsigned long flags;
260 int ret;
261
262 spin_lock_irqsave(&ide_lock, flags);
263 BUG_ON(!blk_rq_started(rq));
264 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
265 spin_unlock_irqrestore(&ide_lock, flags);
266
267 return ret;
268}
269EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
270
271
272
273
274
275
276
277
278
279
280static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
281{
282 unsigned long flags;
283
284#ifdef DEBUG_PM
285 printk("%s: completing PM request, %s\n", drive->name,
286 blk_pm_suspend_request(rq) ? "suspend" : "resume");
287#endif
288 spin_lock_irqsave(&ide_lock, flags);
289 if (blk_pm_suspend_request(rq)) {
290 blk_stop_queue(drive->queue);
291 } else {
292 drive->blocked = 0;
293 blk_start_queue(drive->queue);
294 }
295 blkdev_dequeue_request(rq);
296 HWGROUP(drive)->rq = NULL;
297 end_that_request_last(rq, 1);
298 spin_unlock_irqrestore(&ide_lock, flags);
299}
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
316{
317 ide_hwif_t *hwif = HWIF(drive);
318 unsigned long flags;
319 struct request *rq;
320
321 spin_lock_irqsave(&ide_lock, flags);
322 rq = HWGROUP(drive)->rq;
323 spin_unlock_irqrestore(&ide_lock, flags);
324
325 if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
326 u8 *args = (u8 *) rq->buffer;
327 if (rq->errors == 0)
328 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
329
330 if (args) {
331 args[0] = stat;
332 args[1] = err;
333 args[2] = hwif->INB(IDE_NSECTOR_REG);
334 }
335 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
336 u8 *args = (u8 *) rq->buffer;
337 if (rq->errors == 0)
338 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
339
340 if (args) {
341 args[0] = stat;
342 args[1] = err;
343
344 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
345 args[2] = hwif->INB(IDE_NSECTOR_REG);
346 args[3] = hwif->INB(IDE_SECTOR_REG);
347 args[4] = hwif->INB(IDE_LCYL_REG);
348 args[5] = hwif->INB(IDE_HCYL_REG);
349 args[6] = hwif->INB(IDE_SELECT_REG);
350 }
351 } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
352 ide_task_t *args = (ide_task_t *) rq->special;
353 if (rq->errors == 0)
354 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
355
356 if (args) {
357 if (args->tf_in_flags.b.data) {
358 u16 data = hwif->INW(IDE_DATA_REG);
359 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF;
360 args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF;
361 }
362 args->tfRegister[IDE_ERROR_OFFSET] = err;
363
364 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
365 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
366 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
367 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
368 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
369 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG);
370 args->tfRegister[IDE_STATUS_OFFSET] = stat;
371
372 if (drive->addressing == 1) {
373 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
374 args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG);
375 args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
376 args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
377 args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
378 args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
379 }
380 }
381 } else if (blk_pm_request(rq)) {
382 struct request_pm_state *pm = rq->data;
383#ifdef DEBUG_PM
384 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
385 drive->name, rq->pm->pm_step, stat, err);
386#endif
387 ide_complete_power_step(drive, rq, stat, err);
388 if (pm->pm_step == ide_pm_state_completed)
389 ide_complete_pm_request(drive, rq);
390 return;
391 }
392
393 spin_lock_irqsave(&ide_lock, flags);
394 blkdev_dequeue_request(rq);
395 HWGROUP(drive)->rq = NULL;
396 rq->errors = err;
397 end_that_request_last(rq, !rq->errors);
398 spin_unlock_irqrestore(&ide_lock, flags);
399}
400
401EXPORT_SYMBOL(ide_end_drive_cmd);
402
403
404
405
406
407
408
409
410
411
412
413static void try_to_flush_leftover_data (ide_drive_t *drive)
414{
415 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
416
417 if (drive->media != ide_disk)
418 return;
419 while (i > 0) {
420 u32 buffer[16];
421 u32 wcount = (i > 16) ? 16 : i;
422
423 i -= wcount;
424 HWIF(drive)->ata_input_data(drive, buffer, wcount);
425 }
426}
427
428static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
429{
430 if (rq->rq_disk) {
431 ide_driver_t *drv;
432
433 drv = *(ide_driver_t **)rq->rq_disk->private_data;
434 drv->end_request(drive, 0, 0);
435 } else
436 ide_end_request(drive, 0, 0);
437}
438
439static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
440{
441 ide_hwif_t *hwif = drive->hwif;
442
443 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
444
445 rq->errors |= ERROR_RESET;
446 } else if (stat & ERR_STAT) {
447
448 if (err == ABRT_ERR) {
449 if (drive->select.b.lba &&
450
451 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY)
452 return ide_stopped;
453 } else if ((err & BAD_CRC) == BAD_CRC) {
454
455 drive->crc_count++;
456 } else if (err & (BBD_ERR | ECC_ERR)) {
457
458 rq->errors = ERROR_MAX;
459 } else if (err & TRK0_ERR) {
460
461 rq->errors |= ERROR_RECAL;
462 }
463 }
464
465 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ &&
466 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0)
467 try_to_flush_leftover_data(drive);
468
469 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
470 ide_kill_rq(drive, rq);
471 return ide_stopped;
472 }
473
474 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
475 rq->errors |= ERROR_RESET;
476
477 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
478 ++rq->errors;
479 return ide_do_reset(drive);
480 }
481
482 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
483 drive->special.b.recalibrate = 1;
484
485 ++rq->errors;
486
487 return ide_stopped;
488}
489
490static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
491{
492 ide_hwif_t *hwif = drive->hwif;
493
494 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
495
496 rq->errors |= ERROR_RESET;
497 } else {
498
499 }
500
501 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
502
503 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
504
505 if (rq->errors >= ERROR_MAX) {
506 ide_kill_rq(drive, rq);
507 } else {
508 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
509 ++rq->errors;
510 return ide_do_reset(drive);
511 }
512 ++rq->errors;
513 }
514
515 return ide_stopped;
516}
517
518ide_startstop_t
519__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
520{
521 if (drive->media == ide_disk)
522 return ide_ata_error(drive, rq, stat, err);
523 return ide_atapi_error(drive, rq, stat, err);
524}
525
526EXPORT_SYMBOL_GPL(__ide_error);
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
542{
543 struct request *rq;
544 u8 err;
545
546 err = ide_dump_status(drive, msg, stat);
547
548 if ((rq = HWGROUP(drive)->rq) == NULL)
549 return ide_stopped;
550
551
552 if (!blk_fs_request(rq)) {
553 rq->errors = 1;
554 ide_end_drive_cmd(drive, stat, err);
555 return ide_stopped;
556 }
557
558 if (rq->rq_disk) {
559 ide_driver_t *drv;
560
561 drv = *(ide_driver_t **)rq->rq_disk->private_data;
562 return drv->error(drive, rq, stat, err);
563 } else
564 return __ide_error(drive, rq, stat, err);
565}
566
567EXPORT_SYMBOL_GPL(ide_error);
568
569ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq)
570{
571 if (drive->media != ide_disk)
572 rq->errors |= ERROR_RESET;
573
574 ide_kill_rq(drive, rq);
575
576 return ide_stopped;
577}
578
579EXPORT_SYMBOL_GPL(__ide_abort);
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
596{
597 struct request *rq;
598
599 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
600 return ide_stopped;
601
602
603 if (!blk_fs_request(rq)) {
604 rq->errors = 1;
605 ide_end_drive_cmd(drive, BUSY_STAT, 0);
606 return ide_stopped;
607 }
608
609 if (rq->rq_disk) {
610 ide_driver_t *drv;
611
612 drv = *(ide_driver_t **)rq->rq_disk->private_data;
613 return drv->abort(drive, rq);
614 } else
615 return __ide_abort(drive, rq);
616}
617
618
619
620
621
622
623
624
625
626
627
628
629static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect,
630 ide_handler_t *handler)
631{
632 ide_hwif_t *hwif = HWIF(drive);
633 if (IDE_CONTROL_REG)
634 hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
635 SELECT_MASK(drive,0);
636 hwif->OUTB(nsect,IDE_NSECTOR_REG);
637 ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
638}
639
640
641
642
643
644
645
646
647
648
649
650static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
651{
652 struct request *rq = HWGROUP(drive)->rq;
653 ide_hwif_t *hwif = HWIF(drive);
654 u8 *args = (u8 *) rq->buffer;
655 u8 stat = hwif->INB(IDE_STATUS_REG);
656 int retries = 10;
657
658 local_irq_enable_in_hardirq();
659 if (rq->cmd_type == REQ_TYPE_ATA_CMD &&
660 (stat & DRQ_STAT) && args && args[3]) {
661 u8 io_32bit = drive->io_32bit;
662 drive->io_32bit = 0;
663 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
664 drive->io_32bit = io_32bit;
665 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
666 udelay(100);
667 }
668
669 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
670 return ide_error(drive, "drive_cmd", stat);
671
672 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
673 return ide_stopped;
674}
675
676static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task)
677{
678 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
679 task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect;
680 task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl;
681 task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8;
682 task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF;
683 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY;
684
685 task->handler = &set_geometry_intr;
686}
687
688static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task)
689{
690 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
691 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE;
692
693 task->handler = &recal_intr;
694}
695
696static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task)
697{
698 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req;
699 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT;
700
701 task->handler = &set_multmode_intr;
702}
703
704static ide_startstop_t ide_disk_special(ide_drive_t *drive)
705{
706 special_t *s = &drive->special;
707 ide_task_t args;
708
709 memset(&args, 0, sizeof(ide_task_t));
710 args.command_type = IDE_DRIVE_TASK_NO_DATA;
711
712 if (s->b.set_geometry) {
713 s->b.set_geometry = 0;
714 ide_init_specify_cmd(drive, &args);
715 } else if (s->b.recalibrate) {
716 s->b.recalibrate = 0;
717 ide_init_restore_cmd(drive, &args);
718 } else if (s->b.set_multmode) {
719 s->b.set_multmode = 0;
720 if (drive->mult_req > drive->id->max_multsect)
721 drive->mult_req = drive->id->max_multsect;
722 ide_init_setmult_cmd(drive, &args);
723 } else if (s->all) {
724 int special = s->all;
725 s->all = 0;
726 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
727 return ide_stopped;
728 }
729
730 do_rw_taskfile(drive, &args);
731
732 return ide_started;
733}
734
735
736
737
738static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
739{
740 switch (req_pio) {
741 case 202:
742 case 201:
743 case 200:
744 case 102:
745 case 101:
746 case 100:
747 return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0;
748 case 9:
749 case 8:
750 return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0;
751 case 7:
752 case 6:
753 return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0;
754 default:
755 return 0;
756 }
757}
758
759
760
761
762
763
764
765
766
767
768static ide_startstop_t do_special (ide_drive_t *drive)
769{
770 special_t *s = &drive->special;
771
772#ifdef DEBUG
773 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
774#endif
775 if (s->b.set_tune) {
776 ide_hwif_t *hwif = drive->hwif;
777 u8 req_pio = drive->tune_req;
778
779 s->b.set_tune = 0;
780
781 if (set_pio_mode_abuse(drive->hwif, req_pio)) {
782
783 if (hwif->set_pio_mode == NULL)
784 return ide_stopped;
785
786
787
788
789 if (req_pio == 8 || req_pio == 9) {
790 unsigned long flags;
791
792 spin_lock_irqsave(&ide_lock, flags);
793 hwif->set_pio_mode(drive, req_pio);
794 spin_unlock_irqrestore(&ide_lock, flags);
795 } else
796 hwif->set_pio_mode(drive, req_pio);
797 } else {
798 int keep_dma = drive->using_dma;
799
800 ide_set_pio(drive, req_pio);
801
802 if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
803 if (keep_dma)
804 hwif->ide_dma_on(drive);
805 }
806 }
807
808 return ide_stopped;
809 } else {
810 if (drive->media == ide_disk)
811 return ide_disk_special(drive);
812
813 s->all = 0;
814 drive->mult_req = 0;
815 return ide_stopped;
816 }
817}
818
819void ide_map_sg(ide_drive_t *drive, struct request *rq)
820{
821 ide_hwif_t *hwif = drive->hwif;
822 struct scatterlist *sg = hwif->sg_table;
823
824 if (hwif->sg_mapped)
825 return;
826
827 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
828 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
829 } else {
830 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
831 hwif->sg_nents = 1;
832 }
833}
834
835EXPORT_SYMBOL_GPL(ide_map_sg);
836
837void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
838{
839 ide_hwif_t *hwif = drive->hwif;
840
841 hwif->nsect = hwif->nleft = rq->nr_sectors;
842 hwif->cursg_ofs = 0;
843 hwif->cursg = NULL;
844}
845
846EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
847
848
849
850
851
852
853
854
855
856
857
858
859
860static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
861 struct request *rq)
862{
863 ide_hwif_t *hwif = HWIF(drive);
864 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
865 ide_task_t *args = rq->special;
866
867 if (!args)
868 goto done;
869
870 hwif->data_phase = args->data_phase;
871
872 switch (hwif->data_phase) {
873 case TASKFILE_MULTI_OUT:
874 case TASKFILE_OUT:
875 case TASKFILE_MULTI_IN:
876 case TASKFILE_IN:
877 ide_init_sg_cmd(drive, rq);
878 ide_map_sg(drive, rq);
879 default:
880 break;
881 }
882
883 if (args->tf_out_flags.all != 0)
884 return flagged_taskfile(drive, args);
885 return do_rw_taskfile(drive, args);
886 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
887 u8 *args = rq->buffer;
888
889 if (!args)
890 goto done;
891#ifdef DEBUG
892 printk("%s: DRIVE_TASK_CMD ", drive->name);
893 printk("cmd=0x%02x ", args[0]);
894 printk("fr=0x%02x ", args[1]);
895 printk("ns=0x%02x ", args[2]);
896 printk("sc=0x%02x ", args[3]);
897 printk("lcyl=0x%02x ", args[4]);
898 printk("hcyl=0x%02x ", args[5]);
899 printk("sel=0x%02x\n", args[6]);
900#endif
901 hwif->OUTB(args[1], IDE_FEATURE_REG);
902 hwif->OUTB(args[3], IDE_SECTOR_REG);
903 hwif->OUTB(args[4], IDE_LCYL_REG);
904 hwif->OUTB(args[5], IDE_HCYL_REG);
905 hwif->OUTB((args[6] & 0xEF)|drive->select.all, IDE_SELECT_REG);
906 ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
907 return ide_started;
908 } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
909 u8 *args = rq->buffer;
910
911 if (!args)
912 goto done;
913#ifdef DEBUG
914 printk("%s: DRIVE_CMD ", drive->name);
915 printk("cmd=0x%02x ", args[0]);
916 printk("sc=0x%02x ", args[1]);
917 printk("fr=0x%02x ", args[2]);
918 printk("xx=0x%02x\n", args[3]);
919#endif
920 if (args[0] == WIN_SMART) {
921 hwif->OUTB(0x4f, IDE_LCYL_REG);
922 hwif->OUTB(0xc2, IDE_HCYL_REG);
923 hwif->OUTB(args[2],IDE_FEATURE_REG);
924 hwif->OUTB(args[1],IDE_SECTOR_REG);
925 ide_cmd(drive, args[0], args[3], &drive_cmd_intr);
926 return ide_started;
927 }
928 hwif->OUTB(args[2],IDE_FEATURE_REG);
929 ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
930 return ide_started;
931 }
932
933done:
934
935
936
937
938#ifdef DEBUG
939 printk("%s: DRIVE_CMD (null)\n", drive->name);
940#endif
941 ide_end_drive_cmd(drive,
942 hwif->INB(IDE_STATUS_REG),
943 hwif->INB(IDE_ERROR_REG));
944 return ide_stopped;
945}
946
947static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
948{
949 struct request_pm_state *pm = rq->data;
950
951 if (blk_pm_suspend_request(rq) &&
952 pm->pm_step == ide_pm_state_start_suspend)
953
954 drive->blocked = 1;
955 else if (blk_pm_resume_request(rq) &&
956 pm->pm_step == ide_pm_state_start_resume) {
957
958
959
960
961
962
963
964
965 int rc;
966#ifdef DEBUG_PM
967 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
968#endif
969 rc = ide_wait_not_busy(HWIF(drive), 35000);
970 if (rc)
971 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
972 SELECT_DRIVE(drive);
973 if (IDE_CONTROL_REG)
974 HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
975 rc = ide_wait_not_busy(HWIF(drive), 100000);
976 if (rc)
977 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
978 }
979}
980
981
982
983
984
985
986
987
988
989
990
991
992static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
993{
994 ide_startstop_t startstop;
995 sector_t block;
996
997 BUG_ON(!blk_rq_started(rq));
998
999#ifdef DEBUG
1000 printk("%s: start_request: current=0x%08lx\n",
1001 HWIF(drive)->name, (unsigned long) rq);
1002#endif
1003
1004
1005 if (drive->max_failures && (drive->failures > drive->max_failures)) {
1006 goto kill_rq;
1007 }
1008
1009 block = rq->sector;
1010 if (blk_fs_request(rq) &&
1011 (drive->media == ide_disk || drive->media == ide_floppy)) {
1012 block += drive->sect0;
1013 }
1014
1015
1016 if (block == 0 && drive->remap_0_to_1 == 1)
1017 block = 1;
1018
1019 if (blk_pm_request(rq))
1020 ide_check_pm_state(drive, rq);
1021
1022 SELECT_DRIVE(drive);
1023 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
1024 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
1025 return startstop;
1026 }
1027 if (!drive->special.all) {
1028 ide_driver_t *drv;
1029
1030
1031
1032
1033
1034 if (drive->current_speed == 0xff)
1035 ide_config_drive_speed(drive, drive->desired_speed);
1036
1037 if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
1038 rq->cmd_type == REQ_TYPE_ATA_TASK ||
1039 rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
1040 return execute_drive_cmd(drive, rq);
1041 else if (blk_pm_request(rq)) {
1042 struct request_pm_state *pm = rq->data;
1043#ifdef DEBUG_PM
1044 printk("%s: start_power_step(step: %d)\n",
1045 drive->name, rq->pm->pm_step);
1046#endif
1047 startstop = ide_start_power_step(drive, rq);
1048 if (startstop == ide_stopped &&
1049 pm->pm_step == ide_pm_state_completed)
1050 ide_complete_pm_request(drive, rq);
1051 return startstop;
1052 }
1053
1054 drv = *(ide_driver_t **)rq->rq_disk->private_data;
1055 return drv->do_request(drive, rq, block);
1056 }
1057 return do_special(drive);
1058kill_rq:
1059 ide_kill_rq(drive, rq);
1060 return ide_stopped;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
1073{
1074 if (timeout > WAIT_WORSTCASE)
1075 timeout = WAIT_WORSTCASE;
1076 drive->sleep = timeout + jiffies;
1077 drive->sleeping = 1;
1078}
1079
1080EXPORT_SYMBOL(ide_stall_queue);
1081
1082#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
1094{
1095 ide_drive_t *drive, *best;
1096
1097repeat:
1098 best = NULL;
1099 drive = hwgroup->drive;
1100
1101
1102
1103
1104
1105
1106 if (blk_queue_flushing(drive->queue)) {
1107
1108
1109
1110
1111
1112 blk_remove_plug(drive->queue);
1113 return drive;
1114 }
1115
1116 do {
1117 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep))
1118 && !elv_queue_empty(drive->queue)) {
1119 if (!best
1120 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep)))
1121 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best))))
1122 {
1123 if (!blk_queue_plugged(drive->queue))
1124 best = drive;
1125 }
1126 }
1127 } while ((drive = drive->next) != hwgroup->drive);
1128 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {
1129 long t = (signed long)(WAKEUP(best) - jiffies);
1130 if (t >= WAIT_MIN_SLEEP) {
1131
1132
1133
1134
1135 drive = best->next;
1136 do {
1137 if (!drive->sleeping
1138 && time_before(jiffies - best->service_time, WAKEUP(drive))
1139 && time_before(WAKEUP(drive), jiffies + t))
1140 {
1141 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP));
1142 goto repeat;
1143 }
1144 } while ((drive = drive->next) != best);
1145 }
1146 }
1147 return best;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1186{
1187 ide_drive_t *drive;
1188 ide_hwif_t *hwif;
1189 struct request *rq;
1190 ide_startstop_t startstop;
1191 int loops = 0;
1192
1193
1194 ide_get_lock(ide_intr, hwgroup);
1195
1196
1197 BUG_ON(!irqs_disabled());
1198
1199 while (!hwgroup->busy) {
1200 hwgroup->busy = 1;
1201 drive = choose_drive(hwgroup);
1202 if (drive == NULL) {
1203 int sleeping = 0;
1204 unsigned long sleep = 0;
1205 hwgroup->rq = NULL;
1206 drive = hwgroup->drive;
1207 do {
1208 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) {
1209 sleeping = 1;
1210 sleep = drive->sleep;
1211 }
1212 } while ((drive = drive->next) != hwgroup->drive);
1213 if (sleeping) {
1214
1215
1216
1217
1218
1219
1220 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP))
1221 sleep = jiffies + WAIT_MIN_SLEEP;
1222#if 1
1223 if (timer_pending(&hwgroup->timer))
1224 printk(KERN_CRIT "ide_set_handler: timer already active\n");
1225#endif
1226
1227 hwgroup->sleeping = 1;
1228 hwgroup->req_gen_timer = hwgroup->req_gen;
1229 mod_timer(&hwgroup->timer, sleep);
1230
1231
1232 } else {
1233
1234
1235
1236
1237
1238 ide_release_lock();
1239 hwgroup->busy = 0;
1240 }
1241
1242
1243 return;
1244 }
1245 again:
1246 hwif = HWIF(drive);
1247 if (hwgroup->hwif->sharing_irq &&
1248 hwif != hwgroup->hwif &&
1249 hwif->io_ports[IDE_CONTROL_OFFSET]) {
1250
1251 SELECT_INTERRUPT(drive);
1252 }
1253 hwgroup->hwif = hwif;
1254 hwgroup->drive = drive;
1255 drive->sleeping = 0;
1256 drive->service_start = jiffies;
1257
1258 if (blk_queue_plugged(drive->queue)) {
1259 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1260 break;
1261 }
1262
1263
1264
1265
1266
1267 rq = elv_next_request(drive->queue);
1268 if (!rq) {
1269 hwgroup->busy = 0;
1270 break;
1271 }
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) {
1290 drive = drive->next ? drive->next : hwgroup->drive;
1291 if (loops++ < 4 && !blk_queue_plugged(drive->queue))
1292 goto again;
1293
1294 hwgroup->busy = 0;
1295 break;
1296 }
1297
1298 hwgroup->rq = rq;
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1309 disable_irq_nosync(hwif->irq);
1310 spin_unlock(&ide_lock);
1311 local_irq_enable_in_hardirq();
1312
1313 startstop = start_request(drive, rq);
1314 spin_lock_irq(&ide_lock);
1315 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1316 enable_irq(hwif->irq);
1317 if (startstop == ide_stopped)
1318 hwgroup->busy = 0;
1319 }
1320}
1321
1322
1323
1324
1325void do_ide_request(struct request_queue *q)
1326{
1327 ide_drive_t *drive = q->queuedata;
1328
1329 ide_do_request(HWGROUP(drive), IDE_NO_IRQ);
1330}
1331
1332
1333
1334
1335
1336
1337static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1338{
1339 ide_hwif_t *hwif = HWIF(drive);
1340 struct request *rq;
1341 ide_startstop_t ret = ide_stopped;
1342
1343
1344
1345
1346
1347 if (error < 0) {
1348 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1349 (void)HWIF(drive)->ide_dma_end(drive);
1350 ret = ide_error(drive, "dma timeout error",
1351 hwif->INB(IDE_STATUS_REG));
1352 } else {
1353 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1354 hwif->dma_timeout(drive);
1355 }
1356
1357
1358
1359
1360
1361
1362 drive->retry_pio++;
1363 drive->state = DMA_PIO_RETRY;
1364 hwif->dma_off_quietly(drive);
1365
1366
1367
1368
1369
1370 rq = HWGROUP(drive)->rq;
1371
1372 if (!rq)
1373 goto out;
1374
1375 HWGROUP(drive)->rq = NULL;
1376
1377 rq->errors = 0;
1378
1379 if (!rq->bio)
1380 goto out;
1381
1382 rq->sector = rq->bio->bi_sector;
1383 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
1384 rq->hard_cur_sectors = rq->current_nr_sectors;
1385 rq->buffer = bio_data(rq->bio);
1386out:
1387 return ret;
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404void ide_timer_expiry (unsigned long data)
1405{
1406 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
1407 ide_handler_t *handler;
1408 ide_expiry_t *expiry;
1409 unsigned long flags;
1410 unsigned long wait = -1;
1411
1412 spin_lock_irqsave(&ide_lock, flags);
1413
1414 if (((handler = hwgroup->handler) == NULL) ||
1415 (hwgroup->req_gen != hwgroup->req_gen_timer)) {
1416
1417
1418
1419
1420
1421
1422 if (hwgroup->sleeping) {
1423 hwgroup->sleeping = 0;
1424 hwgroup->busy = 0;
1425 }
1426 } else {
1427 ide_drive_t *drive = hwgroup->drive;
1428 if (!drive) {
1429 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n");
1430 hwgroup->handler = NULL;
1431 } else {
1432 ide_hwif_t *hwif;
1433 ide_startstop_t startstop = ide_stopped;
1434 if (!hwgroup->busy) {
1435 hwgroup->busy = 1;
1436 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name);
1437 }
1438 if ((expiry = hwgroup->expiry) != NULL) {
1439
1440 if ((wait = expiry(drive)) > 0) {
1441
1442 hwgroup->timer.expires = jiffies + wait;
1443 hwgroup->req_gen_timer = hwgroup->req_gen;
1444 add_timer(&hwgroup->timer);
1445 spin_unlock_irqrestore(&ide_lock, flags);
1446 return;
1447 }
1448 }
1449 hwgroup->handler = NULL;
1450
1451
1452
1453
1454
1455 spin_unlock(&ide_lock);
1456 hwif = HWIF(drive);
1457#if DISABLE_IRQ_NOSYNC
1458 disable_irq_nosync(hwif->irq);
1459#else
1460
1461 disable_irq(hwif->irq);
1462#endif
1463
1464
1465 local_irq_disable();
1466 if (hwgroup->polling) {
1467 startstop = handler(drive);
1468 } else if (drive_is_ready(drive)) {
1469 if (drive->waiting_for_dma)
1470 hwgroup->hwif->dma_lost_irq(drive);
1471 (void)ide_ack_intr(hwif);
1472 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1473 startstop = handler(drive);
1474 } else {
1475 if (drive->waiting_for_dma) {
1476 startstop = ide_dma_timeout_retry(drive, wait);
1477 } else
1478 startstop =
1479 ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG));
1480 }
1481 drive->service_time = jiffies - drive->service_start;
1482 spin_lock_irq(&ide_lock);
1483 enable_irq(hwif->irq);
1484 if (startstop == ide_stopped)
1485 hwgroup->busy = 0;
1486 }
1487 }
1488 ide_do_request(hwgroup, IDE_NO_IRQ);
1489 spin_unlock_irqrestore(&ide_lock, flags);
1490}
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1526{
1527 u8 stat;
1528 ide_hwif_t *hwif = hwgroup->hwif;
1529
1530
1531
1532
1533 do {
1534 if (hwif->irq == irq) {
1535 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1536 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1537
1538 static unsigned long last_msgtime, count;
1539 ++count;
1540 if (time_after(jiffies, last_msgtime + HZ)) {
1541 last_msgtime = jiffies;
1542 printk(KERN_ERR "%s%s: unexpected interrupt, "
1543 "status=0x%02x, count=%ld\n",
1544 hwif->name,
1545 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count);
1546 }
1547 }
1548 }
1549 } while ((hwif = hwif->next) != hwgroup->hwif);
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577irqreturn_t ide_intr (int irq, void *dev_id)
1578{
1579 unsigned long flags;
1580 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1581 ide_hwif_t *hwif;
1582 ide_drive_t *drive;
1583 ide_handler_t *handler;
1584 ide_startstop_t startstop;
1585
1586 spin_lock_irqsave(&ide_lock, flags);
1587 hwif = hwgroup->hwif;
1588
1589 if (!ide_ack_intr(hwif)) {
1590 spin_unlock_irqrestore(&ide_lock, flags);
1591 return IRQ_NONE;
1592 }
1593
1594 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610#ifdef CONFIG_BLK_DEV_IDEPCI
1611 if (hwif->pci_dev && !hwif->pci_dev->vendor)
1612#endif
1613 {
1614
1615
1616
1617
1618 unexpected_intr(irq, hwgroup);
1619#ifdef CONFIG_BLK_DEV_IDEPCI
1620 } else {
1621
1622
1623
1624
1625 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1626#endif
1627 }
1628 spin_unlock_irqrestore(&ide_lock, flags);
1629 return IRQ_NONE;
1630 }
1631 drive = hwgroup->drive;
1632 if (!drive) {
1633
1634
1635
1636
1637
1638
1639 spin_unlock_irqrestore(&ide_lock, flags);
1640 return IRQ_HANDLED;
1641 }
1642 if (!drive_is_ready(drive)) {
1643
1644
1645
1646
1647
1648
1649
1650 spin_unlock_irqrestore(&ide_lock, flags);
1651 return IRQ_NONE;
1652 }
1653 if (!hwgroup->busy) {
1654 hwgroup->busy = 1;
1655 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
1656 }
1657 hwgroup->handler = NULL;
1658 hwgroup->req_gen++;
1659 del_timer(&hwgroup->timer);
1660 spin_unlock(&ide_lock);
1661
1662
1663
1664
1665
1666 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma))
1667
1668
1669
1670
1671 hwif->ide_dma_clear_irq(drive);
1672
1673 if (drive->unmask)
1674 local_irq_enable_in_hardirq();
1675
1676 startstop = handler(drive);
1677 spin_lock_irq(&ide_lock);
1678
1679
1680
1681
1682
1683
1684
1685
1686 drive->service_time = jiffies - drive->service_start;
1687 if (startstop == ide_stopped) {
1688 if (hwgroup->handler == NULL) {
1689 hwgroup->busy = 0;
1690 ide_do_request(hwgroup, hwif->irq);
1691 } else {
1692 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler "
1693 "on exit\n", drive->name);
1694 }
1695 }
1696 spin_unlock_irqrestore(&ide_lock, flags);
1697 return IRQ_HANDLED;
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710void ide_init_drive_cmd (struct request *rq)
1711{
1712 memset(rq, 0, sizeof(*rq));
1713 rq->cmd_type = REQ_TYPE_ATA_CMD;
1714 rq->ref_count = 1;
1715}
1716
1717EXPORT_SYMBOL(ide_init_drive_cmd);
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
1745{
1746 unsigned long flags;
1747 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1748 DECLARE_COMPLETION_ONSTACK(wait);
1749 int where = ELEVATOR_INSERT_BACK, err;
1750 int must_wait = (action == ide_wait || action == ide_head_wait);
1751
1752 rq->errors = 0;
1753
1754
1755
1756
1757
1758 if (must_wait) {
1759 rq->ref_count++;
1760 rq->end_io_data = &wait;
1761 rq->end_io = blk_end_sync_rq;
1762 }
1763
1764 spin_lock_irqsave(&ide_lock, flags);
1765 if (action == ide_preempt)
1766 hwgroup->rq = NULL;
1767 if (action == ide_preempt || action == ide_head_wait) {
1768 where = ELEVATOR_INSERT_FRONT;
1769 rq->cmd_flags |= REQ_PREEMPT;
1770 }
1771 __elv_add_request(drive->queue, rq, where, 0);
1772 ide_do_request(hwgroup, IDE_NO_IRQ);
1773 spin_unlock_irqrestore(&ide_lock, flags);
1774
1775 err = 0;
1776 if (must_wait) {
1777 wait_for_completion(&wait);
1778 if (rq->errors)
1779 err = -EIO;
1780
1781 blk_put_request(rq);
1782 }
1783
1784 return err;
1785}
1786
1787EXPORT_SYMBOL(ide_do_drive_cmd);
1788