1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/kernel.h>
37#include <linux/blkdev.h>
38#include <linux/spinlock.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
45#include <scsi/scsi_transport.h>
46#include <linux/libata.h>
47#include <linux/hdreg.h>
48#include <linux/uaccess.h>
49#include <linux/suspend.h>
50
51#include "libata.h"
52
53#define SECTOR_SIZE 512
54#define ATA_SCSI_RBUF_SIZE 4096
55
56static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
57static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
58
59typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
60
61static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
62 const struct scsi_device *scsidev);
63static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
64 const struct scsi_device *scsidev);
65static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
66 unsigned int id, unsigned int lun);
67
68
69#define RW_RECOVERY_MPAGE 0x1
70#define RW_RECOVERY_MPAGE_LEN 12
71#define CACHE_MPAGE 0x8
72#define CACHE_MPAGE_LEN 20
73#define CONTROL_MPAGE 0xa
74#define CONTROL_MPAGE_LEN 12
75#define ALL_MPAGES 0x3f
76#define ALL_SUB_MPAGES 0xff
77
78
79static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
80 RW_RECOVERY_MPAGE,
81 RW_RECOVERY_MPAGE_LEN - 2,
82 (1 << 7),
83 0,
84 0, 0, 0, 0,
85 0,
86 0, 0, 0
87};
88
89static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
90 CACHE_MPAGE,
91 CACHE_MPAGE_LEN - 2,
92 0,
93 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 0,
95 0, 0, 0, 0, 0, 0, 0
96};
97
98static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
99 CONTROL_MPAGE,
100 CONTROL_MPAGE_LEN - 2,
101 2,
102 0,
103 0, 0, 0, 0, 0xff, 0xff,
104 0, 30
105};
106
107
108
109
110
111static struct scsi_transport_template ata_scsi_transport_template = {
112 .eh_strategy_handler = ata_scsi_error,
113 .eh_timed_out = ata_scsi_timed_out,
114 .user_scan = ata_scsi_user_scan,
115};
116
117
118static const struct {
119 enum link_pm value;
120 const char *name;
121} link_pm_policy[] = {
122 { NOT_AVAILABLE, "max_performance" },
123 { MIN_POWER, "min_power" },
124 { MAX_PERFORMANCE, "max_performance" },
125 { MEDIUM_POWER, "medium_power" },
126};
127
128static const char *ata_scsi_lpm_get(enum link_pm policy)
129{
130 int i;
131
132 for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
133 if (link_pm_policy[i].value == policy)
134 return link_pm_policy[i].name;
135
136 return NULL;
137}
138
139static ssize_t ata_scsi_lpm_put(struct device *dev,
140 struct device_attribute *attr,
141 const char *buf, size_t count)
142{
143 struct Scsi_Host *shost = class_to_shost(dev);
144 struct ata_port *ap = ata_shost_to_port(shost);
145 enum link_pm policy = 0;
146 int i;
147
148
149
150
151
152
153
154
155 for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
156 const int len = strlen(link_pm_policy[i].name);
157 if (strncmp(link_pm_policy[i].name, buf, len) == 0 &&
158 buf[len] == '\n') {
159 policy = link_pm_policy[i].value;
160 break;
161 }
162 }
163 if (!policy)
164 return -EINVAL;
165
166 ata_lpm_schedule(ap, policy);
167 return count;
168}
169
170static ssize_t
171ata_scsi_lpm_show(struct device *dev, struct device_attribute *attr, char *buf)
172{
173 struct Scsi_Host *shost = class_to_shost(dev);
174 struct ata_port *ap = ata_shost_to_port(shost);
175 const char *policy =
176 ata_scsi_lpm_get(ap->pm_policy);
177
178 if (!policy)
179 return -EINVAL;
180
181 return snprintf(buf, 23, "%s\n", policy);
182}
183DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
184 ata_scsi_lpm_show, ata_scsi_lpm_put);
185EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
186
187static ssize_t ata_scsi_park_show(struct device *device,
188 struct device_attribute *attr, char *buf)
189{
190 struct scsi_device *sdev = to_scsi_device(device);
191 struct ata_port *ap;
192 struct ata_link *link;
193 struct ata_device *dev;
194 unsigned long flags, now;
195 unsigned int uninitialized_var(msecs);
196 int rc = 0;
197
198 ap = ata_shost_to_port(sdev->host);
199
200 spin_lock_irqsave(ap->lock, flags);
201 dev = ata_scsi_find_dev(ap, sdev);
202 if (!dev) {
203 rc = -ENODEV;
204 goto unlock;
205 }
206 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
207 rc = -EOPNOTSUPP;
208 goto unlock;
209 }
210
211 link = dev->link;
212 now = jiffies;
213 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
214 link->eh_context.unloaded_mask & (1 << dev->devno) &&
215 time_after(dev->unpark_deadline, now))
216 msecs = jiffies_to_msecs(dev->unpark_deadline - now);
217 else
218 msecs = 0;
219
220unlock:
221 spin_unlock_irq(ap->lock);
222
223 return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
224}
225
226static ssize_t ata_scsi_park_store(struct device *device,
227 struct device_attribute *attr,
228 const char *buf, size_t len)
229{
230 struct scsi_device *sdev = to_scsi_device(device);
231 struct ata_port *ap;
232 struct ata_device *dev;
233 long int input;
234 unsigned long flags;
235 int rc;
236
237 rc = strict_strtol(buf, 10, &input);
238 if (rc || input < -2)
239 return -EINVAL;
240 if (input > ATA_TMOUT_MAX_PARK) {
241 rc = -EOVERFLOW;
242 input = ATA_TMOUT_MAX_PARK;
243 }
244
245 ap = ata_shost_to_port(sdev->host);
246
247 spin_lock_irqsave(ap->lock, flags);
248 dev = ata_scsi_find_dev(ap, sdev);
249 if (unlikely(!dev)) {
250 rc = -ENODEV;
251 goto unlock;
252 }
253 if (dev->class != ATA_DEV_ATA) {
254 rc = -EOPNOTSUPP;
255 goto unlock;
256 }
257
258 if (input >= 0) {
259 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
260 rc = -EOPNOTSUPP;
261 goto unlock;
262 }
263
264 dev->unpark_deadline = ata_deadline(jiffies, input);
265 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
266 ata_port_schedule_eh(ap);
267 complete(&ap->park_req_pending);
268 } else {
269 switch (input) {
270 case -1:
271 dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
272 break;
273 case -2:
274 dev->flags |= ATA_DFLAG_NO_UNLOAD;
275 break;
276 }
277 }
278unlock:
279 spin_unlock_irqrestore(ap->lock, flags);
280
281 return rc ? rc : len;
282}
283DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
284 ata_scsi_park_show, ata_scsi_park_store);
285EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
286
287static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
288{
289 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
290
291 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
292}
293
294static ssize_t
295ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
296 const char *buf, size_t count)
297{
298 struct Scsi_Host *shost = class_to_shost(dev);
299 struct ata_port *ap = ata_shost_to_port(shost);
300 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
301 return ap->ops->em_store(ap, buf, count);
302 return -EINVAL;
303}
304
305static ssize_t
306ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
307 char *buf)
308{
309 struct Scsi_Host *shost = class_to_shost(dev);
310 struct ata_port *ap = ata_shost_to_port(shost);
311
312 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
313 return ap->ops->em_show(ap, buf);
314 return -EINVAL;
315}
316DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
317 ata_scsi_em_message_show, ata_scsi_em_message_store);
318EXPORT_SYMBOL_GPL(dev_attr_em_message);
319
320static ssize_t
321ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
322 char *buf)
323{
324 struct Scsi_Host *shost = class_to_shost(dev);
325 struct ata_port *ap = ata_shost_to_port(shost);
326
327 return snprintf(buf, 23, "%d\n", ap->em_message_type);
328}
329DEVICE_ATTR(em_message_type, S_IRUGO,
330 ata_scsi_em_message_type_show, NULL);
331EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
332
333static ssize_t
334ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
335 char *buf)
336{
337 struct scsi_device *sdev = to_scsi_device(dev);
338 struct ata_port *ap = ata_shost_to_port(sdev->host);
339 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
340
341 if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
342 return ap->ops->sw_activity_show(atadev, buf);
343 return -EINVAL;
344}
345
346static ssize_t
347ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
348 const char *buf, size_t count)
349{
350 struct scsi_device *sdev = to_scsi_device(dev);
351 struct ata_port *ap = ata_shost_to_port(sdev->host);
352 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
353 enum sw_activity val;
354 int rc;
355
356 if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
357 val = simple_strtoul(buf, NULL, 0);
358 switch (val) {
359 case OFF: case BLINK_ON: case BLINK_OFF:
360 rc = ap->ops->sw_activity_store(atadev, val);
361 if (!rc)
362 return count;
363 else
364 return rc;
365 }
366 }
367 return -EINVAL;
368}
369DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
370 ata_scsi_activity_store);
371EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
372
373struct device_attribute *ata_common_sdev_attrs[] = {
374 &dev_attr_unload_heads,
375 NULL
376};
377EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
378
379static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
380 void (*done)(struct scsi_cmnd *))
381{
382 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
383
384 done(cmd);
385}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
406 sector_t capacity, int geom[])
407{
408 geom[0] = 255;
409 geom[1] = 63;
410 sector_div(capacity, 255*63);
411 geom[2] = capacity;
412
413 return 0;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
429 void __user *arg)
430{
431 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
432 u16 __user *dst = arg;
433 char buf[40];
434
435 if (!dev)
436 return -ENOMSG;
437
438 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
439 return -EFAULT;
440
441 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
442 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
443 return -EFAULT;
444
445 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
446 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
447 return -EFAULT;
448
449 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
450 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
451 return -EFAULT;
452
453 return 0;
454}
455
456
457
458
459
460
461
462
463
464
465
466
467int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
468{
469 int rc = 0;
470 u8 scsi_cmd[MAX_COMMAND_SIZE];
471 u8 args[4], *argbuf = NULL, *sensebuf = NULL;
472 int argsize = 0;
473 enum dma_data_direction data_dir;
474 int cmd_result;
475
476 if (arg == NULL)
477 return -EINVAL;
478
479 if (copy_from_user(args, arg, sizeof(args)))
480 return -EFAULT;
481
482 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
483 if (!sensebuf)
484 return -ENOMEM;
485
486 memset(scsi_cmd, 0, sizeof(scsi_cmd));
487
488 if (args[3]) {
489 argsize = SECTOR_SIZE * args[3];
490 argbuf = kmalloc(argsize, GFP_KERNEL);
491 if (argbuf == NULL) {
492 rc = -ENOMEM;
493 goto error;
494 }
495
496 scsi_cmd[1] = (4 << 1);
497 scsi_cmd[2] = 0x0e;
498
499 data_dir = DMA_FROM_DEVICE;
500 } else {
501 scsi_cmd[1] = (3 << 1);
502 scsi_cmd[2] = 0x20;
503 data_dir = DMA_NONE;
504 }
505
506 scsi_cmd[0] = ATA_16;
507
508 scsi_cmd[4] = args[2];
509 if (args[0] == ATA_CMD_SMART) {
510 scsi_cmd[6] = args[3];
511 scsi_cmd[8] = args[1];
512 scsi_cmd[10] = 0x4f;
513 scsi_cmd[12] = 0xc2;
514 } else {
515 scsi_cmd[6] = args[1];
516 }
517 scsi_cmd[14] = args[0];
518
519
520
521 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
522 sensebuf, (10*HZ), 5, 0, NULL);
523
524 if (driver_byte(cmd_result) == DRIVER_SENSE) {
525 u8 *desc = sensebuf + 8;
526 cmd_result &= ~(0xFF<<24);
527
528
529
530 if (cmd_result & SAM_STAT_CHECK_CONDITION) {
531 struct scsi_sense_hdr sshdr;
532 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
533 &sshdr);
534 if (sshdr.sense_key == 0 &&
535 sshdr.asc == 0 && sshdr.ascq == 0)
536 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
537 }
538
539
540 if (sensebuf[0] == 0x72 &&
541 desc[0] == 0x09) {
542 args[0] = desc[13];
543 args[1] = desc[3];
544 args[2] = desc[5];
545 if (copy_to_user(arg, args, sizeof(args)))
546 rc = -EFAULT;
547 }
548 }
549
550
551 if (cmd_result) {
552 rc = -EIO;
553 goto error;
554 }
555
556 if ((argbuf)
557 && copy_to_user(arg + sizeof(args), argbuf, argsize))
558 rc = -EFAULT;
559error:
560 kfree(sensebuf);
561 kfree(argbuf);
562 return rc;
563}
564
565
566
567
568
569
570
571
572
573
574
575
576int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
577{
578 int rc = 0;
579 u8 scsi_cmd[MAX_COMMAND_SIZE];
580 u8 args[7], *sensebuf = NULL;
581 int cmd_result;
582
583 if (arg == NULL)
584 return -EINVAL;
585
586 if (copy_from_user(args, arg, sizeof(args)))
587 return -EFAULT;
588
589 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
590 if (!sensebuf)
591 return -ENOMEM;
592
593 memset(scsi_cmd, 0, sizeof(scsi_cmd));
594 scsi_cmd[0] = ATA_16;
595 scsi_cmd[1] = (3 << 1);
596 scsi_cmd[2] = 0x20;
597 scsi_cmd[4] = args[1];
598 scsi_cmd[6] = args[2];
599 scsi_cmd[8] = args[3];
600 scsi_cmd[10] = args[4];
601 scsi_cmd[12] = args[5];
602 scsi_cmd[13] = args[6] & 0x4f;
603 scsi_cmd[14] = args[0];
604
605
606
607 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
608 sensebuf, (10*HZ), 5, 0, NULL);
609
610 if (driver_byte(cmd_result) == DRIVER_SENSE) {
611 u8 *desc = sensebuf + 8;
612 cmd_result &= ~(0xFF<<24);
613
614
615
616 if (cmd_result & SAM_STAT_CHECK_CONDITION) {
617 struct scsi_sense_hdr sshdr;
618 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
619 &sshdr);
620 if (sshdr.sense_key == 0 &&
621 sshdr.asc == 0 && sshdr.ascq == 0)
622 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
623 }
624
625
626 if (sensebuf[0] == 0x72 &&
627 desc[0] == 0x09) {
628 args[0] = desc[13];
629 args[1] = desc[3];
630 args[2] = desc[5];
631 args[3] = desc[7];
632 args[4] = desc[9];
633 args[5] = desc[11];
634 args[6] = desc[12];
635 if (copy_to_user(arg, args, sizeof(args)))
636 rc = -EFAULT;
637 }
638 }
639
640 if (cmd_result) {
641 rc = -EIO;
642 goto error;
643 }
644
645 error:
646 kfree(sensebuf);
647 return rc;
648}
649
650static int ata_ioc32(struct ata_port *ap)
651{
652 if (ap->flags & ATA_FLAG_PIO_DMA)
653 return 1;
654 if (ap->pflags & ATA_PFLAG_PIO32)
655 return 1;
656 return 0;
657}
658
659int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
660 int cmd, void __user *arg)
661{
662 int val = -EINVAL, rc = -EINVAL;
663 unsigned long flags;
664
665 switch (cmd) {
666 case ATA_IOC_GET_IO32:
667 spin_lock_irqsave(ap->lock, flags);
668 val = ata_ioc32(ap);
669 spin_unlock_irqrestore(ap->lock, flags);
670 if (copy_to_user(arg, &val, 1))
671 return -EFAULT;
672 return 0;
673
674 case ATA_IOC_SET_IO32:
675 val = (unsigned long) arg;
676 rc = 0;
677 spin_lock_irqsave(ap->lock, flags);
678 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
679 if (val)
680 ap->pflags |= ATA_PFLAG_PIO32;
681 else
682 ap->pflags &= ~ATA_PFLAG_PIO32;
683 } else {
684 if (val != ata_ioc32(ap))
685 rc = -EINVAL;
686 }
687 spin_unlock_irqrestore(ap->lock, flags);
688 return rc;
689
690 case HDIO_GET_IDENTITY:
691 return ata_get_identity(ap, scsidev, arg);
692
693 case HDIO_DRIVE_CMD:
694 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
695 return -EACCES;
696 return ata_cmd_ioctl(scsidev, arg);
697
698 case HDIO_DRIVE_TASK:
699 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
700 return -EACCES;
701 return ata_task_ioctl(scsidev, arg);
702
703 default:
704 rc = -ENOTTY;
705 break;
706 }
707
708 return rc;
709}
710EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
711
712int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
713{
714 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
715 scsidev, cmd, arg);
716}
717EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
740 struct scsi_cmnd *cmd,
741 void (*done)(struct scsi_cmnd *))
742{
743 struct ata_queued_cmd *qc;
744
745 qc = ata_qc_new_init(dev);
746 if (qc) {
747 qc->scsicmd = cmd;
748 qc->scsidone = done;
749
750 qc->sg = scsi_sglist(cmd);
751 qc->n_elem = scsi_sg_count(cmd);
752 } else {
753 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
754 done(cmd);
755 }
756
757 return qc;
758}
759
760static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
761{
762 struct scsi_cmnd *scmd = qc->scsicmd;
763
764 qc->extrabytes = scmd->request->extra_len;
765 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
781{
782 u8 stat = tf->command, err = tf->feature;
783
784 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
785 if (stat & ATA_BUSY) {
786 printk("Busy }\n");
787 } else {
788 if (stat & 0x40) printk("DriveReady ");
789 if (stat & 0x20) printk("DeviceFault ");
790 if (stat & 0x10) printk("SeekComplete ");
791 if (stat & 0x08) printk("DataRequest ");
792 if (stat & 0x04) printk("CorrectedError ");
793 if (stat & 0x02) printk("Index ");
794 if (stat & 0x01) printk("Error ");
795 printk("}\n");
796
797 if (err) {
798 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
799 if (err & 0x04) printk("DriveStatusError ");
800 if (err & 0x80) {
801 if (err & 0x04) printk("BadCRC ");
802 else printk("Sector ");
803 }
804 if (err & 0x40) printk("UncorrectableError ");
805 if (err & 0x10) printk("SectorIdNotFound ");
806 if (err & 0x02) printk("TrackZeroNotFound ");
807 if (err & 0x01) printk("AddrMarkNotFound ");
808 printk("}\n");
809 }
810 }
811}
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
831 u8 *asc, u8 *ascq, int verbose)
832{
833 int i;
834
835
836 static const unsigned char sense_table[][4] = {
837
838 {0xd1, ABORTED_COMMAND, 0x00, 0x00},
839
840 {0xd0, ABORTED_COMMAND, 0x00, 0x00},
841
842 {0x61, HARDWARE_ERROR, 0x00, 0x00},
843
844 {0x84, ABORTED_COMMAND, 0x47, 0x00},
845
846 {0x37, NOT_READY, 0x04, 0x00},
847
848 {0x09, NOT_READY, 0x04, 0x00},
849
850 {0x01, MEDIUM_ERROR, 0x13, 0x00},
851
852 {0x02, HARDWARE_ERROR, 0x00, 0x00},
853
854 {0x04, ABORTED_COMMAND, 0x00, 0x00},
855
856 {0x08, NOT_READY, 0x04, 0x00},
857
858 {0x10, ABORTED_COMMAND, 0x14, 0x00},
859
860 {0x08, NOT_READY, 0x04, 0x00},
861
862 {0x40, MEDIUM_ERROR, 0x11, 0x04},
863
864 {0x80, MEDIUM_ERROR, 0x11, 0x04},
865 {0xFF, 0xFF, 0xFF, 0xFF},
866 };
867 static const unsigned char stat_table[][4] = {
868
869 {0x80, ABORTED_COMMAND, 0x47, 0x00},
870 {0x20, HARDWARE_ERROR, 0x00, 0x00},
871 {0x08, ABORTED_COMMAND, 0x47, 0x00},
872 {0x04, RECOVERED_ERROR, 0x11, 0x00},
873 {0xFF, 0xFF, 0xFF, 0xFF},
874 };
875
876
877
878
879 if (drv_stat & ATA_BUSY) {
880 drv_err = 0;
881 }
882
883 if (drv_err) {
884
885 for (i = 0; sense_table[i][0] != 0xFF; i++) {
886
887 if ((sense_table[i][0] & drv_err) ==
888 sense_table[i][0]) {
889 *sk = sense_table[i][1];
890 *asc = sense_table[i][2];
891 *ascq = sense_table[i][3];
892 goto translate_done;
893 }
894 }
895
896 if (verbose)
897 printk(KERN_WARNING "ata%u: no sense translation for "
898 "error 0x%02x\n", id, drv_err);
899 }
900
901
902 for (i = 0; stat_table[i][0] != 0xFF; i++) {
903 if (stat_table[i][0] & drv_stat) {
904 *sk = stat_table[i][1];
905 *asc = stat_table[i][2];
906 *ascq = stat_table[i][3];
907 goto translate_done;
908 }
909 }
910
911 if (verbose)
912 printk(KERN_WARNING "ata%u: no sense translation for "
913 "status: 0x%02x\n", id, drv_stat);
914
915
916
917 *sk = ABORTED_COMMAND;
918 *asc = 0x00;
919 *ascq = 0x00;
920
921 translate_done:
922 if (verbose)
923 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
924 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
925 id, drv_stat, drv_err, *sk, *asc, *ascq);
926 return;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
943{
944 struct scsi_cmnd *cmd = qc->scsicmd;
945 struct ata_taskfile *tf = &qc->result_tf;
946 unsigned char *sb = cmd->sense_buffer;
947 unsigned char *desc = sb + 8;
948 int verbose = qc->ap->ops->error_handler == NULL;
949
950 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
951
952 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
953
954
955
956
957
958 if (qc->err_mask ||
959 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
960 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
961 &sb[1], &sb[2], &sb[3], verbose);
962 sb[1] &= 0x0f;
963 }
964
965
966
967
968 sb[0] = 0x72;
969
970 desc[0] = 0x09;
971
972
973 sb[7] = 14;
974 desc[1] = 12;
975
976
977
978
979 desc[2] = 0x00;
980 desc[3] = tf->feature;
981 desc[5] = tf->nsect;
982 desc[7] = tf->lbal;
983 desc[9] = tf->lbam;
984 desc[11] = tf->lbah;
985 desc[12] = tf->device;
986 desc[13] = tf->command;
987
988
989
990
991
992 if (tf->flags & ATA_TFLAG_LBA48) {
993 desc[2] |= 0x01;
994 desc[4] = tf->hob_nsect;
995 desc[6] = tf->hob_lbal;
996 desc[8] = tf->hob_lbam;
997 desc[10] = tf->hob_lbah;
998 }
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
1012{
1013 struct ata_device *dev = qc->dev;
1014 struct scsi_cmnd *cmd = qc->scsicmd;
1015 struct ata_taskfile *tf = &qc->result_tf;
1016 unsigned char *sb = cmd->sense_buffer;
1017 unsigned char *desc = sb + 8;
1018 int verbose = qc->ap->ops->error_handler == NULL;
1019 u64 block;
1020
1021 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
1022
1023 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1024
1025
1026 sb[0] = 0x72;
1027
1028
1029
1030
1031 if (qc->err_mask ||
1032 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
1033 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
1034 &sb[1], &sb[2], &sb[3], verbose);
1035 sb[1] &= 0x0f;
1036 }
1037
1038 block = ata_tf_read_block(&qc->result_tf, dev);
1039
1040
1041 sb[7] = 12;
1042 desc[0] = 0x00;
1043 desc[1] = 10;
1044
1045 desc[2] |= 0x80;
1046 desc[6] = block >> 40;
1047 desc[7] = block >> 32;
1048 desc[8] = block >> 24;
1049 desc[9] = block >> 16;
1050 desc[10] = block >> 8;
1051 desc[11] = block;
1052}
1053
1054static void ata_scsi_sdev_config(struct scsi_device *sdev)
1055{
1056 sdev->use_10_for_rw = 1;
1057 sdev->use_10_for_ms = 1;
1058
1059
1060
1061
1062
1063
1064 sdev->max_device_blocked = 1;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static int atapi_drain_needed(struct request *rq)
1083{
1084 if (likely(!blk_pc_request(rq)))
1085 return 0;
1086
1087 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
1088 return 0;
1089
1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
1091}
1092
1093static int ata_scsi_dev_config(struct scsi_device *sdev,
1094 struct ata_device *dev)
1095{
1096 if (!ata_id_has_unload(dev->id))
1097 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1098
1099
1100 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
1101
1102 if (dev->class == ATA_DEV_ATAPI) {
1103 struct request_queue *q = sdev->request_queue;
1104 void *buf;
1105
1106
1107 blk_queue_update_dma_alignment(sdev->request_queue,
1108 ATA_DMA_PAD_SZ - 1);
1109 blk_queue_update_dma_pad(sdev->request_queue,
1110 ATA_DMA_PAD_SZ - 1);
1111
1112
1113 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
1114 if (!buf) {
1115 ata_dev_printk(dev, KERN_ERR,
1116 "drain buffer allocation failed\n");
1117 return -ENOMEM;
1118 }
1119
1120 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
1121 } else {
1122
1123 blk_queue_update_dma_alignment(sdev->request_queue,
1124 ATA_SECT_SIZE - 1);
1125 sdev->manage_start_stop = 1;
1126 }
1127
1128 if (dev->flags & ATA_DFLAG_AN)
1129 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1130
1131 if (dev->flags & ATA_DFLAG_NCQ) {
1132 int depth;
1133
1134 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1135 depth = min(ATA_MAX_QUEUE - 1, depth);
1136 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1137 }
1138
1139 return 0;
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154int ata_scsi_slave_config(struct scsi_device *sdev)
1155{
1156 struct ata_port *ap = ata_shost_to_port(sdev->host);
1157 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
1158 int rc = 0;
1159
1160 ata_scsi_sdev_config(sdev);
1161
1162 if (dev)
1163 rc = ata_scsi_dev_config(sdev, dev);
1164
1165 return rc;
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182void ata_scsi_slave_destroy(struct scsi_device *sdev)
1183{
1184 struct ata_port *ap = ata_shost_to_port(sdev->host);
1185 struct request_queue *q = sdev->request_queue;
1186 unsigned long flags;
1187 struct ata_device *dev;
1188
1189 if (!ap->ops->error_handler)
1190 return;
1191
1192 spin_lock_irqsave(ap->lock, flags);
1193 dev = __ata_scsi_find_dev(ap, sdev);
1194 if (dev && dev->sdev) {
1195
1196 dev->sdev = NULL;
1197 dev->flags |= ATA_DFLAG_DETACH;
1198 ata_port_schedule_eh(ap);
1199 }
1200 spin_unlock_irqrestore(ap->lock, flags);
1201
1202 kfree(q->dma_drain_buffer);
1203 q->dma_drain_buffer = NULL;
1204 q->dma_drain_size = 0;
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1223{
1224 struct ata_port *ap = ata_shost_to_port(sdev->host);
1225 struct ata_device *dev;
1226 unsigned long flags;
1227
1228 if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1229 return sdev->queue_depth;
1230
1231 dev = ata_scsi_find_dev(ap, sdev);
1232 if (!dev || !ata_dev_enabled(dev))
1233 return sdev->queue_depth;
1234
1235
1236 spin_lock_irqsave(ap->lock, flags);
1237 dev->flags &= ~ATA_DFLAG_NCQ_OFF;
1238 if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
1239 dev->flags |= ATA_DFLAG_NCQ_OFF;
1240 queue_depth = 1;
1241 }
1242 spin_unlock_irqrestore(ap->lock, flags);
1243
1244
1245 queue_depth = min(queue_depth, sdev->host->can_queue);
1246 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
1247 queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
1248
1249 if (sdev->queue_depth == queue_depth)
1250 return -EINVAL;
1251
1252 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
1253 return queue_depth;
1254}
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1272{
1273 struct scsi_cmnd *scmd = qc->scsicmd;
1274 struct ata_taskfile *tf = &qc->tf;
1275 const u8 *cdb = scmd->cmnd;
1276
1277 if (scmd->cmd_len < 5)
1278 goto invalid_fld;
1279
1280 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1281 tf->protocol = ATA_PROT_NODATA;
1282 if (cdb[1] & 0x1) {
1283 ;
1284 }
1285 if (cdb[4] & 0x2)
1286 goto invalid_fld;
1287 if (((cdb[4] >> 4) & 0xf) != 0)
1288 goto invalid_fld;
1289
1290 if (cdb[4] & 0x1) {
1291 tf->nsect = 1;
1292
1293 if (qc->dev->flags & ATA_DFLAG_LBA) {
1294 tf->flags |= ATA_TFLAG_LBA;
1295
1296 tf->lbah = 0x0;
1297 tf->lbam = 0x0;
1298 tf->lbal = 0x0;
1299 tf->device |= ATA_LBA;
1300 } else {
1301
1302 tf->lbal = 0x1;
1303 tf->lbam = 0x0;
1304 tf->lbah = 0x0;
1305 }
1306
1307 tf->command = ATA_CMD_VERIFY;
1308 } else {
1309
1310
1311
1312 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
1313 system_state == SYSTEM_POWER_OFF)
1314 goto skip;
1315
1316 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
1317 system_entering_hibernation())
1318 goto skip;
1319
1320
1321 tf->command = ATA_CMD_STANDBYNOW1;
1322 }
1323
1324
1325
1326
1327
1328
1329
1330
1331 return 0;
1332
1333 invalid_fld:
1334 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1335
1336 return 1;
1337 skip:
1338 scmd->result = SAM_STAT_GOOD;
1339 return 1;
1340}
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
1357{
1358 struct ata_taskfile *tf = &qc->tf;
1359
1360 tf->flags |= ATA_TFLAG_DEVICE;
1361 tf->protocol = ATA_PROT_NODATA;
1362
1363 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
1364 tf->command = ATA_CMD_FLUSH_EXT;
1365 else
1366 tf->command = ATA_CMD_FLUSH;
1367
1368
1369 qc->flags |= ATA_QCFLAG_IO;
1370
1371 return 0;
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1385{
1386 u64 lba = 0;
1387 u32 len;
1388
1389 VPRINTK("six-byte command\n");
1390
1391 lba |= ((u64)(cdb[1] & 0x1f)) << 16;
1392 lba |= ((u64)cdb[2]) << 8;
1393 lba |= ((u64)cdb[3]);
1394
1395 len = cdb[4];
1396
1397 *plba = lba;
1398 *plen = len;
1399}
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1412{
1413 u64 lba = 0;
1414 u32 len = 0;
1415
1416 VPRINTK("ten-byte command\n");
1417
1418 lba |= ((u64)cdb[2]) << 24;
1419 lba |= ((u64)cdb[3]) << 16;
1420 lba |= ((u64)cdb[4]) << 8;
1421 lba |= ((u64)cdb[5]);
1422
1423 len |= ((u32)cdb[7]) << 8;
1424 len |= ((u32)cdb[8]);
1425
1426 *plba = lba;
1427 *plen = len;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1441{
1442 u64 lba = 0;
1443 u32 len = 0;
1444
1445 VPRINTK("sixteen-byte command\n");
1446
1447 lba |= ((u64)cdb[2]) << 56;
1448 lba |= ((u64)cdb[3]) << 48;
1449 lba |= ((u64)cdb[4]) << 40;
1450 lba |= ((u64)cdb[5]) << 32;
1451 lba |= ((u64)cdb[6]) << 24;
1452 lba |= ((u64)cdb[7]) << 16;
1453 lba |= ((u64)cdb[8]) << 8;
1454 lba |= ((u64)cdb[9]);
1455
1456 len |= ((u32)cdb[10]) << 24;
1457 len |= ((u32)cdb[11]) << 16;
1458 len |= ((u32)cdb[12]) << 8;
1459 len |= ((u32)cdb[13]);
1460
1461 *plba = lba;
1462 *plen = len;
1463}
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
1478{
1479 struct scsi_cmnd *scmd = qc->scsicmd;
1480 struct ata_taskfile *tf = &qc->tf;
1481 struct ata_device *dev = qc->dev;
1482 u64 dev_sectors = qc->dev->n_sectors;
1483 const u8 *cdb = scmd->cmnd;
1484 u64 block;
1485 u32 n_block;
1486
1487 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1488 tf->protocol = ATA_PROT_NODATA;
1489
1490 if (cdb[0] == VERIFY) {
1491 if (scmd->cmd_len < 10)
1492 goto invalid_fld;
1493 scsi_10_lba_len(cdb, &block, &n_block);
1494 } else if (cdb[0] == VERIFY_16) {
1495 if (scmd->cmd_len < 16)
1496 goto invalid_fld;
1497 scsi_16_lba_len(cdb, &block, &n_block);
1498 } else
1499 goto invalid_fld;
1500
1501 if (!n_block)
1502 goto nothing_to_do;
1503 if (block >= dev_sectors)
1504 goto out_of_range;
1505 if ((block + n_block) > dev_sectors)
1506 goto out_of_range;
1507
1508 if (dev->flags & ATA_DFLAG_LBA) {
1509 tf->flags |= ATA_TFLAG_LBA;
1510
1511 if (lba_28_ok(block, n_block)) {
1512
1513 tf->command = ATA_CMD_VERIFY;
1514 tf->device |= (block >> 24) & 0xf;
1515 } else if (lba_48_ok(block, n_block)) {
1516 if (!(dev->flags & ATA_DFLAG_LBA48))
1517 goto out_of_range;
1518
1519
1520 tf->flags |= ATA_TFLAG_LBA48;
1521 tf->command = ATA_CMD_VERIFY_EXT;
1522
1523 tf->hob_nsect = (n_block >> 8) & 0xff;
1524
1525 tf->hob_lbah = (block >> 40) & 0xff;
1526 tf->hob_lbam = (block >> 32) & 0xff;
1527 tf->hob_lbal = (block >> 24) & 0xff;
1528 } else
1529
1530 goto out_of_range;
1531
1532 tf->nsect = n_block & 0xff;
1533
1534 tf->lbah = (block >> 16) & 0xff;
1535 tf->lbam = (block >> 8) & 0xff;
1536 tf->lbal = block & 0xff;
1537
1538 tf->device |= ATA_LBA;
1539 } else {
1540
1541 u32 sect, head, cyl, track;
1542
1543 if (!lba_28_ok(block, n_block))
1544 goto out_of_range;
1545
1546
1547 track = (u32)block / dev->sectors;
1548 cyl = track / dev->heads;
1549 head = track % dev->heads;
1550 sect = (u32)block % dev->sectors + 1;
1551
1552 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1553 (u32)block, track, cyl, head, sect);
1554
1555
1556
1557
1558
1559 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1560 goto out_of_range;
1561
1562 tf->command = ATA_CMD_VERIFY;
1563 tf->nsect = n_block & 0xff;
1564 tf->lbal = sect;
1565 tf->lbam = cyl;
1566 tf->lbah = cyl >> 8;
1567 tf->device |= head;
1568 }
1569
1570 return 0;
1571
1572invalid_fld:
1573 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1574
1575 return 1;
1576
1577out_of_range:
1578 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1579
1580 return 1;
1581
1582nothing_to_do:
1583 scmd->result = SAM_STAT_GOOD;
1584 return 1;
1585}
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1606{
1607 struct scsi_cmnd *scmd = qc->scsicmd;
1608 const u8 *cdb = scmd->cmnd;
1609 unsigned int tf_flags = 0;
1610 u64 block;
1611 u32 n_block;
1612 int rc;
1613
1614 if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
1615 tf_flags |= ATA_TFLAG_WRITE;
1616
1617
1618 switch (cdb[0]) {
1619 case READ_10:
1620 case WRITE_10:
1621 if (unlikely(scmd->cmd_len < 10))
1622 goto invalid_fld;
1623 scsi_10_lba_len(cdb, &block, &n_block);
1624 if (unlikely(cdb[1] & (1 << 3)))
1625 tf_flags |= ATA_TFLAG_FUA;
1626 break;
1627 case READ_6:
1628 case WRITE_6:
1629 if (unlikely(scmd->cmd_len < 6))
1630 goto invalid_fld;
1631 scsi_6_lba_len(cdb, &block, &n_block);
1632
1633
1634
1635
1636 if (!n_block)
1637 n_block = 256;
1638 break;
1639 case READ_16:
1640 case WRITE_16:
1641 if (unlikely(scmd->cmd_len < 16))
1642 goto invalid_fld;
1643 scsi_16_lba_len(cdb, &block, &n_block);
1644 if (unlikely(cdb[1] & (1 << 3)))
1645 tf_flags |= ATA_TFLAG_FUA;
1646 break;
1647 default:
1648 DPRINTK("no-byte command\n");
1649 goto invalid_fld;
1650 }
1651
1652
1653 if (!n_block)
1654
1655
1656
1657
1658
1659
1660
1661 goto nothing_to_do;
1662
1663 qc->flags |= ATA_QCFLAG_IO;
1664 qc->nbytes = n_block * ATA_SECT_SIZE;
1665
1666 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1667 qc->tag);
1668 if (likely(rc == 0))
1669 return 0;
1670
1671 if (rc == -ERANGE)
1672 goto out_of_range;
1673
1674invalid_fld:
1675 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1676
1677 return 1;
1678
1679out_of_range:
1680 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1681
1682 return 1;
1683
1684nothing_to_do:
1685 scmd->result = SAM_STAT_GOOD;
1686 return 1;
1687}
1688
1689static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1690{
1691 struct ata_port *ap = qc->ap;
1692 struct scsi_cmnd *cmd = qc->scsicmd;
1693 u8 *cdb = cmd->cmnd;
1694 int need_sense = (qc->err_mask != 0);
1695
1696
1697
1698
1699
1700
1701
1702
1703 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1704 ((cdb[2] & 0x20) || need_sense)) {
1705 ata_gen_passthru_sense(qc);
1706 } else {
1707 if (!need_sense) {
1708 cmd->result = SAM_STAT_GOOD;
1709 } else {
1710
1711
1712
1713
1714
1715
1716 ata_gen_ata_sense(qc);
1717 }
1718 }
1719
1720 if (need_sense && !ap->ops->error_handler)
1721 ata_dump_status(ap->print_id, &qc->result_tf);
1722
1723 qc->scsidone(cmd);
1724
1725 ata_qc_free(qc);
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1756 void (*done)(struct scsi_cmnd *),
1757 ata_xlat_func_t xlat_func)
1758{
1759 struct ata_port *ap = dev->link->ap;
1760 struct ata_queued_cmd *qc;
1761 int rc;
1762
1763 VPRINTK("ENTER\n");
1764
1765 qc = ata_scsi_qc_new(dev, cmd, done);
1766 if (!qc)
1767 goto err_mem;
1768
1769
1770 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1771 cmd->sc_data_direction == DMA_TO_DEVICE) {
1772 if (unlikely(scsi_bufflen(cmd) < 1)) {
1773 ata_dev_printk(dev, KERN_WARNING,
1774 "WARNING: zero len r/w req\n");
1775 goto err_did;
1776 }
1777
1778 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
1779
1780 qc->dma_dir = cmd->sc_data_direction;
1781 }
1782
1783 qc->complete_fn = ata_scsi_qc_complete;
1784
1785 if (xlat_func(qc))
1786 goto early_finish;
1787
1788 if (ap->ops->qc_defer) {
1789 if ((rc = ap->ops->qc_defer(qc)))
1790 goto defer;
1791 }
1792
1793
1794 ata_qc_issue(qc);
1795
1796 VPRINTK("EXIT\n");
1797 return 0;
1798
1799early_finish:
1800 ata_qc_free(qc);
1801 qc->scsidone(cmd);
1802 DPRINTK("EXIT - early finish (good or error)\n");
1803 return 0;
1804
1805err_did:
1806 ata_qc_free(qc);
1807 cmd->result = (DID_ERROR << 16);
1808 qc->scsidone(cmd);
1809err_mem:
1810 DPRINTK("EXIT - internal\n");
1811 return 0;
1812
1813defer:
1814 ata_qc_free(qc);
1815 DPRINTK("EXIT - defer\n");
1816 if (rc == ATA_DEFER_LINK)
1817 return SCSI_MLQUEUE_DEVICE_BUSY;
1818 else
1819 return SCSI_MLQUEUE_HOST_BUSY;
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
1837 unsigned long *flags)
1838{
1839 spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
1840
1841 memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
1842 if (copy_in)
1843 sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1844 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1845 return ata_scsi_rbuf;
1846}
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
1861 unsigned long *flags)
1862{
1863 if (copy_out)
1864 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1865 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1866 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1885 unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
1886{
1887 u8 *rbuf;
1888 unsigned int rc;
1889 struct scsi_cmnd *cmd = args->cmd;
1890 unsigned long flags;
1891
1892 rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
1893 rc = actor(args, rbuf);
1894 ata_scsi_rbuf_put(cmd, rc == 0, &flags);
1895
1896 if (rc == 0)
1897 cmd->result = SAM_STAT_GOOD;
1898 args->done(cmd);
1899}
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1913{
1914 const u8 versions[] = {
1915 0x60,
1916
1917 0x03,
1918 0x20,
1919
1920 0x02,
1921 0x60
1922 };
1923 u8 hdr[] = {
1924 TYPE_DISK,
1925 0,
1926 0x5,
1927 2,
1928 95 - 4
1929 };
1930
1931 VPRINTK("ENTER\n");
1932
1933
1934 if (ata_id_removeable(args->id))
1935 hdr[1] |= (1 << 7);
1936
1937 memcpy(rbuf, hdr, sizeof(hdr));
1938 memcpy(&rbuf[8], "ATA ", 8);
1939 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1940 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1941
1942 if (rbuf[32] == 0 || rbuf[32] == ' ')
1943 memcpy(&rbuf[32], "n/a ", 4);
1944
1945 memcpy(rbuf + 59, versions, sizeof(versions));
1946
1947 return 0;
1948}
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
1961{
1962 const u8 pages[] = {
1963 0x00,
1964 0x80,
1965 0x83,
1966 0x89,
1967 0xb1,
1968 };
1969
1970 rbuf[3] = sizeof(pages);
1971 memcpy(rbuf + 4, pages, sizeof(pages));
1972 return 0;
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
1986{
1987 const u8 hdr[] = {
1988 0,
1989 0x80,
1990 0,
1991 ATA_ID_SERNO_LEN,
1992 };
1993
1994 memcpy(rbuf, hdr, sizeof(hdr));
1995 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1996 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1997 return 0;
1998}
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
2014{
2015 const int sat_model_serial_desc_len = 68;
2016 int num;
2017
2018 rbuf[1] = 0x83;
2019 num = 4;
2020
2021
2022 rbuf[num + 0] = 2;
2023 rbuf[num + 3] = ATA_ID_SERNO_LEN;
2024 num += 4;
2025 ata_id_string(args->id, (unsigned char *) rbuf + num,
2026 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
2027 num += ATA_ID_SERNO_LEN;
2028
2029
2030
2031 rbuf[num + 0] = 2;
2032 rbuf[num + 1] = 1;
2033 rbuf[num + 3] = sat_model_serial_desc_len;
2034 num += 4;
2035 memcpy(rbuf + num, "ATA ", 8);
2036 num += 8;
2037 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
2038 ATA_ID_PROD_LEN);
2039 num += ATA_ID_PROD_LEN;
2040 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
2041 ATA_ID_SERNO_LEN);
2042 num += ATA_ID_SERNO_LEN;
2043
2044 rbuf[3] = num - 4;
2045 return 0;
2046}
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
2059{
2060 struct ata_taskfile tf;
2061
2062 memset(&tf, 0, sizeof(tf));
2063
2064 rbuf[1] = 0x89;
2065 rbuf[2] = (0x238 >> 8);
2066 rbuf[3] = (0x238 & 0xff);
2067
2068 memcpy(&rbuf[8], "linux ", 8);
2069 memcpy(&rbuf[16], "libata ", 16);
2070 memcpy(&rbuf[32], DRV_VERSION, 4);
2071 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
2072
2073
2074
2075 tf.command = ATA_DRDY;
2076 tf.lbal = 0x1;
2077 tf.nsect = 0x1;
2078
2079 ata_tf_to_fis(&tf, 0, 1, &rbuf[36]);
2080 rbuf[36] = 0x34;
2081
2082 rbuf[56] = ATA_CMD_ID_ATA;
2083
2084 memcpy(&rbuf[60], &args->id[0], 512);
2085 return 0;
2086}
2087
2088static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
2089{
2090 int form_factor = ata_id_form_factor(args->id);
2091 int media_rotation_rate = ata_id_rotation_rate(args->id);
2092
2093 rbuf[1] = 0xb1;
2094 rbuf[3] = 0x3c;
2095 rbuf[4] = media_rotation_rate >> 8;
2096 rbuf[5] = media_rotation_rate;
2097 rbuf[7] = form_factor;
2098
2099 return 0;
2100}
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
2114{
2115 VPRINTK("ENTER\n");
2116 return 0;
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131static unsigned int ata_msense_caching(u16 *id, u8 *buf)
2132{
2133 memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
2134 if (ata_id_wcache_enabled(id))
2135 buf[2] |= (1 << 2);
2136 if (!ata_id_rahead_enabled(id))
2137 buf[12] |= (1 << 5);
2138 return sizeof(def_cache_mpage);
2139}
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150static unsigned int ata_msense_ctl_mode(u8 *buf)
2151{
2152 memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
2153 return sizeof(def_control_mpage);
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165static unsigned int ata_msense_rw_recovery(u8 *buf)
2166{
2167 memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
2168 return sizeof(def_rw_recovery_mpage);
2169}
2170
2171
2172
2173
2174
2175static int ata_dev_supports_fua(u16 *id)
2176{
2177 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
2178
2179 if (!libata_fua)
2180 return 0;
2181 if (!ata_id_has_fua(id))
2182 return 0;
2183
2184 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
2185 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
2186
2187 if (strcmp(model, "Maxtor"))
2188 return 1;
2189 if (strcmp(fw, "BANC1G10"))
2190 return 1;
2191
2192 return 0;
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
2208{
2209 struct ata_device *dev = args->dev;
2210 u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
2211 const u8 sat_blk_desc[] = {
2212 0, 0, 0, 0,
2213 0,
2214 0, 0x2, 0x0
2215 };
2216 u8 pg, spg;
2217 unsigned int ebd, page_control, six_byte;
2218 u8 dpofua;
2219
2220 VPRINTK("ENTER\n");
2221
2222 six_byte = (scsicmd[0] == MODE_SENSE);
2223 ebd = !(scsicmd[1] & 0x8);
2224
2225
2226
2227
2228 page_control = scsicmd[2] >> 6;
2229 switch (page_control) {
2230 case 0:
2231 break;
2232 case 3:
2233 goto saving_not_supp;
2234 case 1:
2235 case 2:
2236 default:
2237 goto invalid_fld;
2238 }
2239
2240 if (six_byte)
2241 p += 4 + (ebd ? 8 : 0);
2242 else
2243 p += 8 + (ebd ? 8 : 0);
2244
2245 pg = scsicmd[2] & 0x3f;
2246 spg = scsicmd[3];
2247
2248
2249
2250
2251 if (spg && (spg != ALL_SUB_MPAGES))
2252 goto invalid_fld;
2253
2254 switch(pg) {
2255 case RW_RECOVERY_MPAGE:
2256 p += ata_msense_rw_recovery(p);
2257 break;
2258
2259 case CACHE_MPAGE:
2260 p += ata_msense_caching(args->id, p);
2261 break;
2262
2263 case CONTROL_MPAGE:
2264 p += ata_msense_ctl_mode(p);
2265 break;
2266
2267 case ALL_MPAGES:
2268 p += ata_msense_rw_recovery(p);
2269 p += ata_msense_caching(args->id, p);
2270 p += ata_msense_ctl_mode(p);
2271 break;
2272
2273 default:
2274 goto invalid_fld;
2275 }
2276
2277 dpofua = 0;
2278 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2279 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2280 dpofua = 1 << 4;
2281
2282 if (six_byte) {
2283 rbuf[0] = p - rbuf - 1;
2284 rbuf[2] |= dpofua;
2285 if (ebd) {
2286 rbuf[3] = sizeof(sat_blk_desc);
2287 memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
2288 }
2289 } else {
2290 unsigned int output_len = p - rbuf - 2;
2291
2292 rbuf[0] = output_len >> 8;
2293 rbuf[1] = output_len;
2294 rbuf[3] |= dpofua;
2295 if (ebd) {
2296 rbuf[7] = sizeof(sat_blk_desc);
2297 memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
2298 }
2299 }
2300 return 0;
2301
2302invalid_fld:
2303 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2304
2305 return 1;
2306
2307saving_not_supp:
2308 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2309
2310 return 1;
2311}
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2324{
2325 struct ata_device *dev = args->dev;
2326 u64 last_lba = dev->n_sectors - 1;
2327 u8 log_per_phys = 0;
2328 u16 lowest_aligned = 0;
2329 u16 word_106 = dev->id[106];
2330 u16 word_209 = dev->id[209];
2331
2332 if ((word_106 & 0xc000) == 0x4000) {
2333
2334 if (word_106 & (1 << 13))
2335 log_per_phys = word_106 & 0xf;
2336 if ((word_209 & 0xc000) == 0x4000) {
2337 u16 first = dev->id[209] & 0x3fff;
2338 if (first > 0)
2339 lowest_aligned = (1 << log_per_phys) - first;
2340 }
2341 }
2342
2343 VPRINTK("ENTER\n");
2344
2345 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2346 if (last_lba >= 0xffffffffULL)
2347 last_lba = 0xffffffff;
2348
2349
2350 rbuf[0] = last_lba >> (8 * 3);
2351 rbuf[1] = last_lba >> (8 * 2);
2352 rbuf[2] = last_lba >> (8 * 1);
2353 rbuf[3] = last_lba;
2354
2355
2356 rbuf[6] = ATA_SECT_SIZE >> 8;
2357 rbuf[7] = ATA_SECT_SIZE & 0xff;
2358 } else {
2359
2360 rbuf[0] = last_lba >> (8 * 7);
2361 rbuf[1] = last_lba >> (8 * 6);
2362 rbuf[2] = last_lba >> (8 * 5);
2363 rbuf[3] = last_lba >> (8 * 4);
2364 rbuf[4] = last_lba >> (8 * 3);
2365 rbuf[5] = last_lba >> (8 * 2);
2366 rbuf[6] = last_lba >> (8 * 1);
2367 rbuf[7] = last_lba;
2368
2369
2370 rbuf[10] = ATA_SECT_SIZE >> 8;
2371 rbuf[11] = ATA_SECT_SIZE & 0xff;
2372
2373 rbuf[12] = 0;
2374 rbuf[13] = log_per_phys;
2375 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2376 rbuf[15] = lowest_aligned;
2377 }
2378
2379 return 0;
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
2393{
2394 VPRINTK("ENTER\n");
2395 rbuf[3] = 8;
2396
2397 return 0;
2398}
2399
2400static void atapi_sense_complete(struct ata_queued_cmd *qc)
2401{
2402 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2403
2404
2405
2406
2407
2408 ata_gen_passthru_sense(qc);
2409 }
2410
2411 qc->scsidone(qc->scsicmd);
2412 ata_qc_free(qc);
2413}
2414
2415
2416static inline int ata_pio_use_silly(struct ata_port *ap)
2417{
2418 return (ap->flags & ATA_FLAG_PIO_DMA);
2419}
2420
2421static void atapi_request_sense(struct ata_queued_cmd *qc)
2422{
2423 struct ata_port *ap = qc->ap;
2424 struct scsi_cmnd *cmd = qc->scsicmd;
2425
2426 DPRINTK("ATAPI request sense\n");
2427
2428
2429 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2430
2431#ifdef CONFIG_ATA_SFF
2432 if (ap->ops->sff_tf_read)
2433 ap->ops->sff_tf_read(ap, &qc->tf);
2434#endif
2435
2436
2437 cmd->sense_buffer[0] = 0x70;
2438 cmd->sense_buffer[2] = qc->tf.feature >> 4;
2439
2440 ata_qc_reinit(qc);
2441
2442
2443 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
2444 ata_sg_init(qc, &qc->sgent, 1);
2445 qc->dma_dir = DMA_FROM_DEVICE;
2446
2447 memset(&qc->cdb, 0, qc->dev->cdb_len);
2448 qc->cdb[0] = REQUEST_SENSE;
2449 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2450
2451 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2452 qc->tf.command = ATA_CMD_PACKET;
2453
2454 if (ata_pio_use_silly(ap)) {
2455 qc->tf.protocol = ATAPI_PROT_DMA;
2456 qc->tf.feature |= ATAPI_PKT_DMA;
2457 } else {
2458 qc->tf.protocol = ATAPI_PROT_PIO;
2459 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
2460 qc->tf.lbah = 0;
2461 }
2462 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2463
2464 qc->complete_fn = atapi_sense_complete;
2465
2466 ata_qc_issue(qc);
2467
2468 DPRINTK("EXIT\n");
2469}
2470
2471static void atapi_qc_complete(struct ata_queued_cmd *qc)
2472{
2473 struct scsi_cmnd *cmd = qc->scsicmd;
2474 unsigned int err_mask = qc->err_mask;
2475
2476 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2477
2478
2479 if (unlikely(qc->ap->ops->error_handler &&
2480 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2481
2482 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2483
2484
2485
2486
2487
2488 ata_gen_passthru_sense(qc);
2489 }
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
2502 qc->dev->sdev->locked = 0;
2503
2504 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2505 qc->scsidone(cmd);
2506 ata_qc_free(qc);
2507 return;
2508 }
2509
2510
2511 if (unlikely(err_mask & AC_ERR_DEV)) {
2512 cmd->result = SAM_STAT_CHECK_CONDITION;
2513 atapi_request_sense(qc);
2514 return;
2515 } else if (unlikely(err_mask)) {
2516
2517
2518
2519
2520
2521 ata_gen_passthru_sense(qc);
2522 } else {
2523 u8 *scsicmd = cmd->cmnd;
2524
2525 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2526 unsigned long flags;
2527 u8 *buf;
2528
2529 buf = ata_scsi_rbuf_get(cmd, true, &flags);
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539 if (buf[2] == 0) {
2540 buf[2] = 0x5;
2541 buf[3] = 0x32;
2542 }
2543
2544 ata_scsi_rbuf_put(cmd, true, &flags);
2545 }
2546
2547 cmd->result = SAM_STAT_GOOD;
2548 }
2549
2550 qc->scsidone(cmd);
2551 ata_qc_free(qc);
2552}
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2564{
2565 struct scsi_cmnd *scmd = qc->scsicmd;
2566 struct ata_device *dev = qc->dev;
2567 int nodata = (scmd->sc_data_direction == DMA_NONE);
2568 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
2569 unsigned int nbytes;
2570
2571 memset(qc->cdb, 0, dev->cdb_len);
2572 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
2573
2574 qc->complete_fn = atapi_qc_complete;
2575
2576 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2577 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2578 qc->tf.flags |= ATA_TFLAG_WRITE;
2579 DPRINTK("direction: write\n");
2580 }
2581
2582 qc->tf.command = ATA_CMD_PACKET;
2583 ata_qc_set_pc_nbytes(qc);
2584
2585
2586 if (!nodata && !using_pio && atapi_check_dma(qc))
2587 using_pio = 1;
2588
2589
2590
2591
2592
2593
2594 nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620 if (nbytes & 0x1)
2621 nbytes++;
2622
2623 qc->tf.lbam = (nbytes & 0xFF);
2624 qc->tf.lbah = (nbytes >> 8);
2625
2626 if (nodata)
2627 qc->tf.protocol = ATAPI_PROT_NODATA;
2628 else if (using_pio)
2629 qc->tf.protocol = ATAPI_PROT_PIO;
2630 else {
2631
2632 qc->tf.protocol = ATAPI_PROT_DMA;
2633 qc->tf.feature |= ATAPI_PKT_DMA;
2634
2635 if ((dev->flags & ATA_DFLAG_DMADIR) &&
2636 (scmd->sc_data_direction != DMA_TO_DEVICE))
2637
2638 qc->tf.feature |= ATAPI_DMADIR;
2639 }
2640
2641
2642
2643
2644 return 0;
2645}
2646
2647static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
2648{
2649 if (!sata_pmp_attached(ap)) {
2650 if (likely(devno < ata_link_max_devices(&ap->link)))
2651 return &ap->link.device[devno];
2652 } else {
2653 if (likely(devno < ap->nr_pmp_links))
2654 return &ap->pmp_link[devno].device[0];
2655 }
2656
2657 return NULL;
2658}
2659
2660static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
2661 const struct scsi_device *scsidev)
2662{
2663 int devno;
2664
2665
2666 if (!sata_pmp_attached(ap)) {
2667 if (unlikely(scsidev->channel || scsidev->lun))
2668 return NULL;
2669 devno = scsidev->id;
2670 } else {
2671 if (unlikely(scsidev->id || scsidev->lun))
2672 return NULL;
2673 devno = scsidev->channel;
2674 }
2675
2676 return ata_find_dev(ap, devno);
2677}
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695static struct ata_device *
2696ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2697{
2698 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2699
2700 if (unlikely(!dev || !ata_dev_enabled(dev)))
2701 return NULL;
2702
2703 return dev;
2704}
2705
2706
2707
2708
2709
2710
2711
2712
2713static u8
2714ata_scsi_map_proto(u8 byte1)
2715{
2716 switch((byte1 & 0x1e) >> 1) {
2717 case 3:
2718 return ATA_PROT_NODATA;
2719
2720 case 6:
2721 case 10:
2722 case 11:
2723 return ATA_PROT_DMA;
2724
2725 case 4:
2726 case 5:
2727 return ATA_PROT_PIO;
2728
2729 case 0:
2730 case 1:
2731 case 8:
2732 case 9:
2733 case 7:
2734 case 12:
2735 case 15:
2736 default:
2737 break;
2738 }
2739
2740 return ATA_PROT_UNKNOWN;
2741}
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2753{
2754 struct ata_taskfile *tf = &(qc->tf);
2755 struct scsi_cmnd *scmd = qc->scsicmd;
2756 struct ata_device *dev = qc->dev;
2757 const u8 *cdb = scmd->cmnd;
2758
2759 if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
2760 goto invalid_fld;
2761
2762
2763
2764
2765
2766 if (cdb[0] == ATA_16) {
2767
2768
2769
2770
2771
2772 if (cdb[1] & 0x01) {
2773 tf->hob_feature = cdb[3];
2774 tf->hob_nsect = cdb[5];
2775 tf->hob_lbal = cdb[7];
2776 tf->hob_lbam = cdb[9];
2777 tf->hob_lbah = cdb[11];
2778 tf->flags |= ATA_TFLAG_LBA48;
2779 } else
2780 tf->flags &= ~ATA_TFLAG_LBA48;
2781
2782
2783
2784
2785 tf->feature = cdb[4];
2786 tf->nsect = cdb[6];
2787 tf->lbal = cdb[8];
2788 tf->lbam = cdb[10];
2789 tf->lbah = cdb[12];
2790 tf->device = cdb[13];
2791 tf->command = cdb[14];
2792 } else {
2793
2794
2795
2796 tf->flags &= ~ATA_TFLAG_LBA48;
2797
2798 tf->feature = cdb[3];
2799 tf->nsect = cdb[4];
2800 tf->lbal = cdb[5];
2801 tf->lbam = cdb[6];
2802 tf->lbah = cdb[7];
2803 tf->device = cdb[8];
2804 tf->command = cdb[9];
2805 }
2806
2807
2808 tf->device = dev->devno ?
2809 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2810
2811
2812 qc->sect_size = ATA_SECT_SIZE;
2813 switch (tf->command) {
2814 case ATA_CMD_READ_LONG:
2815 case ATA_CMD_READ_LONG_ONCE:
2816 case ATA_CMD_WRITE_LONG:
2817 case ATA_CMD_WRITE_LONG_ONCE:
2818 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2819 goto invalid_fld;
2820 qc->sect_size = scsi_bufflen(scmd);
2821 }
2822
2823
2824
2825
2826
2827
2828 tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2829 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2830 tf->flags |= ATA_TFLAG_WRITE;
2831
2832 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
2833
2834
2835
2836
2837
2838
2839
2840 ata_qc_set_pc_nbytes(qc);
2841
2842
2843 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2844 goto invalid_fld;
2845
2846
2847 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
2848 goto invalid_fld;
2849
2850 if (is_multi_taskfile(tf)) {
2851 unsigned int multi_count = 1 << (cdb[1] >> 5);
2852
2853
2854
2855
2856 if (multi_count != dev->multi_count)
2857 ata_dev_printk(dev, KERN_WARNING,
2858 "invalid multi_count %u ignored\n",
2859 multi_count);
2860 }
2861
2862
2863
2864
2865
2866
2867
2868
2869 if (tf->command == ATA_CMD_SET_FEATURES &&
2870 tf->feature == SETFEATURES_XFER)
2871 goto invalid_fld;
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
2889 goto invalid_fld;
2890
2891 return 0;
2892
2893 invalid_fld:
2894 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
2895
2896 return 1;
2897}
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
2912{
2913 switch (cmd) {
2914 case READ_6:
2915 case READ_10:
2916 case READ_16:
2917
2918 case WRITE_6:
2919 case WRITE_10:
2920 case WRITE_16:
2921 return ata_scsi_rw_xlat;
2922
2923 case SYNCHRONIZE_CACHE:
2924 if (ata_try_flush_cache(dev))
2925 return ata_scsi_flush_xlat;
2926 break;
2927
2928 case VERIFY:
2929 case VERIFY_16:
2930 return ata_scsi_verify_xlat;
2931
2932 case ATA_12:
2933 case ATA_16:
2934 return ata_scsi_pass_thru;
2935
2936 case START_STOP:
2937 return ata_scsi_start_stop_xlat;
2938 }
2939
2940 return NULL;
2941}
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2952 struct scsi_cmnd *cmd)
2953{
2954#ifdef ATA_DEBUG
2955 struct scsi_device *scsidev = cmd->device;
2956 u8 *scsicmd = cmd->cmnd;
2957
2958 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
2959 ap->print_id,
2960 scsidev->channel, scsidev->id, scsidev->lun,
2961 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
2962 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
2963 scsicmd[8]);
2964#endif
2965}
2966
2967static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
2968 void (*done)(struct scsi_cmnd *),
2969 struct ata_device *dev)
2970{
2971 u8 scsi_op = scmd->cmnd[0];
2972 ata_xlat_func_t xlat_func;
2973 int rc = 0;
2974
2975 if (dev->class == ATA_DEV_ATA) {
2976 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
2977 goto bad_cdb_len;
2978
2979 xlat_func = ata_get_xlat_func(dev, scsi_op);
2980 } else {
2981 if (unlikely(!scmd->cmd_len))
2982 goto bad_cdb_len;
2983
2984 xlat_func = NULL;
2985 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
2986
2987 int len = COMMAND_SIZE(scsi_op);
2988 if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
2989 goto bad_cdb_len;
2990
2991 xlat_func = atapi_xlat;
2992 } else {
2993
2994 if (unlikely(scmd->cmd_len > 16))
2995 goto bad_cdb_len;
2996
2997 xlat_func = ata_get_xlat_func(dev, scsi_op);
2998 }
2999 }
3000
3001 if (xlat_func)
3002 rc = ata_scsi_translate(dev, scmd, done, xlat_func);
3003 else
3004 ata_scsi_simulate(dev, scmd, done);
3005
3006 return rc;
3007
3008 bad_cdb_len:
3009 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
3010 scmd->cmd_len, scsi_op, dev->cdb_len);
3011 scmd->result = DID_ERROR << 16;
3012 done(scmd);
3013 return 0;
3014}
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
3036{
3037 struct ata_port *ap;
3038 struct ata_device *dev;
3039 struct scsi_device *scsidev = cmd->device;
3040 struct Scsi_Host *shost = scsidev->host;
3041 int rc = 0;
3042
3043 ap = ata_shost_to_port(shost);
3044
3045 spin_unlock(shost->host_lock);
3046 spin_lock(ap->lock);
3047
3048 ata_scsi_dump_cdb(ap, cmd);
3049
3050 dev = ata_scsi_find_dev(ap, scsidev);
3051 if (likely(dev))
3052 rc = __ata_scsi_queuecmd(cmd, done, dev);
3053 else {
3054 cmd->result = (DID_BAD_TARGET << 16);
3055 done(cmd);
3056 }
3057
3058 spin_unlock(ap->lock);
3059 spin_lock(shost->host_lock);
3060 return rc;
3061}
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3077 void (*done)(struct scsi_cmnd *))
3078{
3079 struct ata_scsi_args args;
3080 const u8 *scsicmd = cmd->cmnd;
3081 u8 tmp8;
3082
3083 args.dev = dev;
3084 args.id = dev->id;
3085 args.cmd = cmd;
3086 args.done = done;
3087
3088 switch(scsicmd[0]) {
3089
3090 case FORMAT_UNIT:
3091 ata_scsi_invalid_field(cmd, done);
3092 break;
3093
3094 case INQUIRY:
3095 if (scsicmd[1] & 2)
3096 ata_scsi_invalid_field(cmd, done);
3097 else if ((scsicmd[1] & 1) == 0)
3098 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
3099 else switch (scsicmd[2]) {
3100 case 0x00:
3101 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
3102 break;
3103 case 0x80:
3104 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
3105 break;
3106 case 0x83:
3107 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
3108 break;
3109 case 0x89:
3110 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
3111 break;
3112 case 0xb1:
3113 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
3114 break;
3115 default:
3116 ata_scsi_invalid_field(cmd, done);
3117 break;
3118 }
3119 break;
3120
3121 case MODE_SENSE:
3122 case MODE_SENSE_10:
3123 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
3124 break;
3125
3126 case MODE_SELECT:
3127 case MODE_SELECT_10:
3128 ata_scsi_invalid_field(cmd, done);
3129 break;
3130
3131 case READ_CAPACITY:
3132 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3133 break;
3134
3135 case SERVICE_ACTION_IN:
3136 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
3137 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3138 else
3139 ata_scsi_invalid_field(cmd, done);
3140 break;
3141
3142 case REPORT_LUNS:
3143 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
3144 break;
3145
3146 case REQUEST_SENSE:
3147 ata_scsi_set_sense(cmd, 0, 0, 0);
3148 cmd->result = (DRIVER_SENSE << 24);
3149 done(cmd);
3150 break;
3151
3152
3153
3154
3155 case SYNCHRONIZE_CACHE:
3156
3157
3158
3159 case REZERO_UNIT:
3160 case SEEK_6:
3161 case SEEK_10:
3162 case TEST_UNIT_READY:
3163 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3164 break;
3165
3166 case SEND_DIAGNOSTIC:
3167 tmp8 = scsicmd[1] & ~(1 << 3);
3168 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
3169 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3170 else
3171 ata_scsi_invalid_field(cmd, done);
3172 break;
3173
3174
3175 default:
3176 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
3177
3178 done(cmd);
3179 break;
3180 }
3181}
3182
3183int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3184{
3185 int i, rc;
3186
3187 for (i = 0; i < host->n_ports; i++) {
3188 struct ata_port *ap = host->ports[i];
3189 struct Scsi_Host *shost;
3190
3191 rc = -ENOMEM;
3192 shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
3193 if (!shost)
3194 goto err_alloc;
3195
3196 *(struct ata_port **)&shost->hostdata[0] = ap;
3197 ap->scsi_host = shost;
3198
3199 shost->transportt = &ata_scsi_transport_template;
3200 shost->unique_id = ap->print_id;
3201 shost->max_id = 16;
3202 shost->max_lun = 1;
3203 shost->max_channel = 1;
3204 shost->max_cmd_len = 16;
3205
3206
3207
3208
3209
3210
3211 shost->max_host_blocked = 1;
3212
3213 rc = scsi_add_host(ap->scsi_host, ap->host->dev);
3214 if (rc)
3215 goto err_add;
3216 }
3217
3218 return 0;
3219
3220 err_add:
3221 scsi_host_put(host->ports[i]->scsi_host);
3222 err_alloc:
3223 while (--i >= 0) {
3224 struct Scsi_Host *shost = host->ports[i]->scsi_host;
3225
3226 scsi_remove_host(shost);
3227 scsi_host_put(shost);
3228 }
3229 return rc;
3230}
3231
3232void ata_scsi_scan_host(struct ata_port *ap, int sync)
3233{
3234 int tries = 5;
3235 struct ata_device *last_failed_dev = NULL;
3236 struct ata_link *link;
3237 struct ata_device *dev;
3238
3239 if (ap->flags & ATA_FLAG_DISABLED)
3240 return;
3241
3242 repeat:
3243 ata_for_each_link(link, ap, EDGE) {
3244 ata_for_each_dev(dev, link, ENABLED) {
3245 struct scsi_device *sdev;
3246 int channel = 0, id = 0;
3247
3248 if (dev->sdev)
3249 continue;
3250
3251 if (ata_is_host_link(link))
3252 id = dev->devno;
3253 else
3254 channel = link->pmp;
3255
3256 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
3257 NULL);
3258 if (!IS_ERR(sdev)) {
3259 dev->sdev = sdev;
3260 scsi_device_put(sdev);
3261 }
3262 }
3263 }
3264
3265
3266
3267
3268
3269 ata_for_each_link(link, ap, EDGE) {
3270 ata_for_each_dev(dev, link, ENABLED) {
3271 if (!dev->sdev)
3272 goto exit_loop;
3273 }
3274 }
3275 exit_loop:
3276 if (!link)
3277 return;
3278
3279
3280 if (sync) {
3281
3282
3283
3284 if (dev != last_failed_dev) {
3285 msleep(100);
3286 last_failed_dev = dev;
3287 goto repeat;
3288 }
3289
3290
3291
3292
3293 if (--tries) {
3294 msleep(100);
3295 goto repeat;
3296 }
3297
3298 ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
3299 "failed without making any progress,\n"
3300 " switching to async\n");
3301 }
3302
3303 queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
3304 round_jiffies_relative(HZ));
3305}
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322int ata_scsi_offline_dev(struct ata_device *dev)
3323{
3324 if (dev->sdev) {
3325 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
3326 return 1;
3327 }
3328 return 0;
3329}
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341static void ata_scsi_remove_dev(struct ata_device *dev)
3342{
3343 struct ata_port *ap = dev->link->ap;
3344 struct scsi_device *sdev;
3345 unsigned long flags;
3346
3347
3348
3349
3350
3351
3352
3353 mutex_lock(&ap->scsi_host->scan_mutex);
3354 spin_lock_irqsave(ap->lock, flags);
3355
3356
3357 sdev = dev->sdev;
3358 dev->sdev = NULL;
3359
3360 if (sdev) {
3361
3362
3363
3364
3365 if (scsi_device_get(sdev) == 0) {
3366
3367
3368
3369
3370
3371 scsi_device_set_state(sdev, SDEV_OFFLINE);
3372 } else {
3373 WARN_ON(1);
3374 sdev = NULL;
3375 }
3376 }
3377
3378 spin_unlock_irqrestore(ap->lock, flags);
3379 mutex_unlock(&ap->scsi_host->scan_mutex);
3380
3381 if (sdev) {
3382 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
3383 dev_name(&sdev->sdev_gendev));
3384
3385 scsi_remove_device(sdev);
3386 scsi_device_put(sdev);
3387 }
3388}
3389
3390static void ata_scsi_handle_link_detach(struct ata_link *link)
3391{
3392 struct ata_port *ap = link->ap;
3393 struct ata_device *dev;
3394
3395 ata_for_each_dev(dev, link, ALL) {
3396 unsigned long flags;
3397
3398 if (!(dev->flags & ATA_DFLAG_DETACHED))
3399 continue;
3400
3401 spin_lock_irqsave(ap->lock, flags);
3402 dev->flags &= ~ATA_DFLAG_DETACHED;
3403 spin_unlock_irqrestore(ap->lock, flags);
3404
3405 ata_scsi_remove_dev(dev);
3406 }
3407}
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419void ata_scsi_media_change_notify(struct ata_device *dev)
3420{
3421 if (dev->sdev)
3422 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
3423 GFP_ATOMIC);
3424}
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438void ata_scsi_hotplug(struct work_struct *work)
3439{
3440 struct ata_port *ap =
3441 container_of(work, struct ata_port, hotplug_task.work);
3442 int i;
3443
3444 if (ap->pflags & ATA_PFLAG_UNLOADING) {
3445 DPRINTK("ENTER/EXIT - unloading\n");
3446 return;
3447 }
3448
3449 DPRINTK("ENTER\n");
3450
3451
3452
3453
3454
3455 ata_scsi_handle_link_detach(&ap->link);
3456 if (ap->pmp_link)
3457 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
3458 ata_scsi_handle_link_detach(&ap->pmp_link[i]);
3459
3460
3461 ata_scsi_scan_host(ap, 0);
3462
3463 DPRINTK("EXIT\n");
3464}
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3483 unsigned int id, unsigned int lun)
3484{
3485 struct ata_port *ap = ata_shost_to_port(shost);
3486 unsigned long flags;
3487 int devno, rc = 0;
3488
3489 if (!ap->ops->error_handler)
3490 return -EOPNOTSUPP;
3491
3492 if (lun != SCAN_WILD_CARD && lun)
3493 return -EINVAL;
3494
3495 if (!sata_pmp_attached(ap)) {
3496 if (channel != SCAN_WILD_CARD && channel)
3497 return -EINVAL;
3498 devno = id;
3499 } else {
3500 if (id != SCAN_WILD_CARD && id)
3501 return -EINVAL;
3502 devno = channel;
3503 }
3504
3505 spin_lock_irqsave(ap->lock, flags);
3506
3507 if (devno == SCAN_WILD_CARD) {
3508 struct ata_link *link;
3509
3510 ata_for_each_link(link, ap, EDGE) {
3511 struct ata_eh_info *ehi = &link->eh_info;
3512 ehi->probe_mask |= ATA_ALL_DEVICES;
3513 ehi->action |= ATA_EH_RESET;
3514 }
3515 } else {
3516 struct ata_device *dev = ata_find_dev(ap, devno);
3517
3518 if (dev) {
3519 struct ata_eh_info *ehi = &dev->link->eh_info;
3520 ehi->probe_mask |= 1 << dev->devno;
3521 ehi->action |= ATA_EH_RESET;
3522 } else
3523 rc = -EINVAL;
3524 }
3525
3526 if (rc == 0) {
3527 ata_port_schedule_eh(ap);
3528 spin_unlock_irqrestore(ap->lock, flags);
3529 ata_port_wait_eh(ap);
3530 } else
3531 spin_unlock_irqrestore(ap->lock, flags);
3532
3533 return rc;
3534}
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548void ata_scsi_dev_rescan(struct work_struct *work)
3549{
3550 struct ata_port *ap =
3551 container_of(work, struct ata_port, scsi_rescan_task);
3552 struct ata_link *link;
3553 struct ata_device *dev;
3554 unsigned long flags;
3555
3556 spin_lock_irqsave(ap->lock, flags);
3557
3558 ata_for_each_link(link, ap, EDGE) {
3559 ata_for_each_dev(dev, link, ENABLED) {
3560 struct scsi_device *sdev = dev->sdev;
3561
3562 if (!sdev)
3563 continue;
3564 if (scsi_device_get(sdev))
3565 continue;
3566
3567 spin_unlock_irqrestore(ap->lock, flags);
3568 scsi_rescan_device(&(sdev->sdev_gendev));
3569 scsi_device_put(sdev);
3570 spin_lock_irqsave(ap->lock, flags);
3571 }
3572 }
3573
3574 spin_unlock_irqrestore(ap->lock, flags);
3575}
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3591 struct ata_port_info *port_info,
3592 struct Scsi_Host *shost)
3593{
3594 struct ata_port *ap;
3595
3596 ap = ata_port_alloc(host);
3597 if (!ap)
3598 return NULL;
3599
3600 ap->port_no = 0;
3601 ap->lock = shost->host_lock;
3602 ap->pio_mask = port_info->pio_mask;
3603 ap->mwdma_mask = port_info->mwdma_mask;
3604 ap->udma_mask = port_info->udma_mask;
3605 ap->flags |= port_info->flags;
3606 ap->ops = port_info->port_ops;
3607 ap->cbl = ATA_CBL_SATA;
3608
3609 return ap;
3610}
3611EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625int ata_sas_port_start(struct ata_port *ap)
3626{
3627 return 0;
3628}
3629EXPORT_SYMBOL_GPL(ata_sas_port_start);
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641void ata_sas_port_stop(struct ata_port *ap)
3642{
3643}
3644EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657int ata_sas_port_init(struct ata_port *ap)
3658{
3659 int rc = ap->ops->port_start(ap);
3660
3661 if (!rc) {
3662 ap->print_id = ata_print_id++;
3663 rc = ata_bus_probe(ap);
3664 }
3665
3666 return rc;
3667}
3668EXPORT_SYMBOL_GPL(ata_sas_port_init);
3669
3670
3671
3672
3673
3674
3675
3676void ata_sas_port_destroy(struct ata_port *ap)
3677{
3678 if (ap->ops->port_stop)
3679 ap->ops->port_stop(ap);
3680 kfree(ap);
3681}
3682EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3694{
3695 ata_scsi_sdev_config(sdev);
3696 ata_scsi_dev_config(sdev, ap->link.device);
3697 return 0;
3698}
3699EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3713 struct ata_port *ap)
3714{
3715 int rc = 0;
3716
3717 ata_scsi_dump_cdb(ap, cmd);
3718
3719 if (likely(ata_dev_enabled(ap->link.device)))
3720 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
3721 else {
3722 cmd->result = (DID_BAD_TARGET << 16);
3723 done(cmd);
3724 }
3725 return rc;
3726}
3727EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
3728