1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/module.h>
46#include <linux/kernel.h>
47#include <linux/init.h>
48#include <linux/errno.h>
49#include <linux/blkdev.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/delay.h>
53#include <linux/pci.h>
54#include <linux/interrupt.h>
55#include <linux/aer.h>
56#include <linux/raid_class.h>
57#include <linux/blk-mq-pci.h>
58#include <asm/unaligned.h>
59
60#include "mpt3sas_base.h"
61
62#define RAID_CHANNEL 1
63
64#define PCIE_CHANNEL 2
65
66
67static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69static void _firmware_event_work(struct work_struct *work);
70
71static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
78static void
79_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81
82
83LIST_HEAD(mpt3sas_ioc_list);
84
85DEFINE_SPINLOCK(gioc_lock);
86
87MODULE_AUTHOR(MPT3SAS_AUTHOR);
88MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89MODULE_LICENSE("GPL");
90MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91MODULE_ALIAS("mpt2sas");
92
93
94static u8 scsi_io_cb_idx = -1;
95static u8 tm_cb_idx = -1;
96static u8 ctl_cb_idx = -1;
97static u8 base_cb_idx = -1;
98static u8 port_enable_cb_idx = -1;
99static u8 transport_cb_idx = -1;
100static u8 scsih_cb_idx = -1;
101static u8 config_cb_idx = -1;
102static int mpt2_ids;
103static int mpt3_ids;
104
105static u8 tm_tr_cb_idx = -1 ;
106static u8 tm_tr_volume_cb_idx = -1 ;
107static u8 tm_sas_control_cb_idx = -1;
108
109
110static u32 logging_level;
111MODULE_PARM_DESC(logging_level,
112 " bits for enabling additional logging info (default=0)");
113
114
115static ushort max_sectors = 0xFFFF;
116module_param(max_sectors, ushort, 0444);
117MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
118
119
120static int missing_delay[2] = {-1, -1};
121module_param_array(missing_delay, int, NULL, 0444);
122MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123
124
125#define MPT3SAS_MAX_LUN (16895)
126static u64 max_lun = MPT3SAS_MAX_LUN;
127module_param(max_lun, ullong, 0444);
128MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129
130static ushort hbas_to_enumerate;
131module_param(hbas_to_enumerate, ushort, 0444);
132MODULE_PARM_DESC(hbas_to_enumerate,
133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 1 - enumerates only SAS 2.0 generation HBAs\n \
135 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136
137
138
139
140
141
142
143
144static int diag_buffer_enable = -1;
145module_param(diag_buffer_enable, int, 0444);
146MODULE_PARM_DESC(diag_buffer_enable,
147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148static int disable_discovery = -1;
149module_param(disable_discovery, int, 0444);
150MODULE_PARM_DESC(disable_discovery, " disable discovery ");
151
152
153
154static int prot_mask = -1;
155module_param(prot_mask, int, 0444);
156MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157
158static bool enable_sdev_max_qd;
159module_param(enable_sdev_max_qd, bool, 0444);
160MODULE_PARM_DESC(enable_sdev_max_qd,
161 "Enable sdev max qd as can_queue, def=disabled(0)");
162
163static int multipath_on_hba = -1;
164module_param(multipath_on_hba, int, 0);
165MODULE_PARM_DESC(multipath_on_hba,
166 "Multipath support to add same target device\n\t\t"
167 "as many times as it is visible to HBA from various paths\n\t\t"
168 "(by default:\n\t\t"
169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
170 "\t SAS 3.5 HBA - This will be enabled)");
171
172static int host_tagset_enable = 1;
173module_param(host_tagset_enable, int, 0444);
174MODULE_PARM_DESC(host_tagset_enable,
175 "Shared host tagset enable/disable Default: enable(1)");
176
177
178static struct raid_template *mpt3sas_raid_template;
179static struct raid_template *mpt2sas_raid_template;
180
181
182
183
184
185
186
187
188struct sense_info {
189 u8 skey;
190 u8 asc;
191 u8 ascq;
192};
193
194#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
195#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
196#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
197#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
198#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214struct fw_event_work {
215 struct list_head list;
216 struct work_struct work;
217
218 struct MPT3SAS_ADAPTER *ioc;
219 u16 device_handle;
220 u8 VF_ID;
221 u8 VP_ID;
222 u8 ignore;
223 u16 event;
224 struct kref refcount;
225 char event_data[] __aligned(4);
226};
227
228static void fw_event_work_free(struct kref *r)
229{
230 kfree(container_of(r, struct fw_event_work, refcount));
231}
232
233static void fw_event_work_get(struct fw_event_work *fw_work)
234{
235 kref_get(&fw_work->refcount);
236}
237
238static void fw_event_work_put(struct fw_event_work *fw_work)
239{
240 kref_put(&fw_work->refcount, fw_event_work_free);
241}
242
243static struct fw_event_work *alloc_fw_event_work(int len)
244{
245 struct fw_event_work *fw_event;
246
247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
248 if (!fw_event)
249 return NULL;
250
251 kref_init(&fw_event->refcount);
252 return fw_event;
253}
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280struct _scsi_io_transfer {
281 u16 handle;
282 u8 is_raid;
283 enum dma_data_direction dir;
284 u32 data_length;
285 dma_addr_t data_dma;
286 u8 sense[SCSI_SENSE_BUFFERSIZE];
287 u32 lun;
288 u8 cdb_length;
289 u8 cdb[32];
290 u8 timeout;
291 u8 VF_ID;
292 u8 VP_ID;
293 u8 valid_reply;
294
295 u32 sense_length;
296 u16 ioc_status;
297 u8 scsi_state;
298 u8 scsi_status;
299 u32 log_info;
300 u32 transfer_length;
301};
302
303
304
305
306
307
308
309
310static int
311_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
312{
313 int ret = param_set_int(val, kp);
314 struct MPT3SAS_ADAPTER *ioc;
315
316 if (ret)
317 return ret;
318
319 pr_info("setting logging_level(0x%08x)\n", logging_level);
320 spin_lock(&gioc_lock);
321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
322 ioc->logging_level = logging_level;
323 spin_unlock(&gioc_lock);
324 return 0;
325}
326module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
327 &logging_level, 0644);
328
329
330
331
332
333
334
335
336static inline int
337_scsih_srch_boot_sas_address(u64 sas_address,
338 Mpi2BootDeviceSasWwid_t *boot_device)
339{
340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
341}
342
343
344
345
346
347
348
349
350static inline int
351_scsih_srch_boot_device_name(u64 device_name,
352 Mpi2BootDeviceDeviceName_t *boot_device)
353{
354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
355}
356
357
358
359
360
361
362
363
364
365static inline int
366_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
367 Mpi2BootDeviceEnclosureSlot_t *boot_device)
368{
369 return (enclosure_logical_id == le64_to_cpu(boot_device->
370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
371 SlotNumber)) ? 1 : 0;
372}
373
374
375
376
377
378
379
380
381
382
383
384
385struct hba_port *
386mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
387 u8 port_id, u8 bypass_dirty_port_flag)
388{
389 struct hba_port *port, *port_next;
390
391
392
393
394
395
396 if (!ioc->multipath_on_hba)
397 port_id = MULTIPATH_DISABLED_PORT_ID;
398
399 list_for_each_entry_safe(port, port_next,
400 &ioc->port_table_list, list) {
401 if (port->port_id != port_id)
402 continue;
403 if (bypass_dirty_port_flag)
404 return port;
405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
406 continue;
407 return port;
408 }
409
410
411
412
413
414
415 if (!ioc->multipath_on_hba) {
416 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
417 if (!port)
418 return NULL;
419
420 port->port_id = port_id;
421 ioc_info(ioc,
422 "hba_port entry: %p, port: %d is added to hba_port list\n",
423 port, port->port_id);
424 list_add_tail(&port->list,
425 &ioc->port_table_list);
426 return port;
427 }
428 return NULL;
429}
430
431
432
433
434
435
436
437
438
439struct virtual_phy *
440mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
441 struct hba_port *port, u32 phy)
442{
443 struct virtual_phy *vphy, *vphy_next;
444
445 if (!port->vphys_mask)
446 return NULL;
447
448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
449 if (vphy->phy_mask & (1 << phy))
450 return vphy;
451 }
452 return NULL;
453}
454
455
456
457
458
459
460
461
462
463
464
465
466static int
467_scsih_is_boot_device(u64 sas_address, u64 device_name,
468 u64 enclosure_logical_id, u16 slot, u8 form,
469 Mpi2BiosPage2BootDevice_t *boot_device)
470{
471 int rc = 0;
472
473 switch (form) {
474 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
475 if (!sas_address)
476 break;
477 rc = _scsih_srch_boot_sas_address(
478 sas_address, &boot_device->SasWwid);
479 break;
480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
481 if (!enclosure_logical_id)
482 break;
483 rc = _scsih_srch_boot_encl_slot(
484 enclosure_logical_id,
485 slot, &boot_device->EnclosureSlot);
486 break;
487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
488 if (!device_name)
489 break;
490 rc = _scsih_srch_boot_device_name(
491 device_name, &boot_device->DeviceName);
492 break;
493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
494 break;
495 }
496
497 return rc;
498}
499
500
501
502
503
504
505
506
507
508static int
509_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
510 u64 *sas_address)
511{
512 Mpi2SasDevicePage0_t sas_device_pg0;
513 Mpi2ConfigReply_t mpi_reply;
514 u32 ioc_status;
515
516 *sas_address = 0;
517
518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
520 ioc_err(ioc, "failure at %s:%d/%s()!\n",
521 __FILE__, __LINE__, __func__);
522 return -ENXIO;
523 }
524
525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
527
528
529
530 if ((handle <= ioc->sas_hba.num_phys) &&
531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
532 MPI2_SAS_DEVICE_INFO_SEP)))
533 *sas_address = ioc->sas_hba.sas_address;
534 else
535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
536 return 0;
537 }
538
539
540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
541 return -ENXIO;
542
543
544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
545 handle, ioc_status, __FILE__, __LINE__, __func__);
546 return -EIO;
547}
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562static void
563_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
564 u32 channel)
565{
566 struct _sas_device *sas_device;
567 struct _pcie_device *pcie_device;
568 struct _raid_device *raid_device;
569 u64 sas_address;
570 u64 device_name;
571 u64 enclosure_logical_id;
572 u16 slot;
573
574
575 if (!ioc->is_driver_loading)
576 return;
577
578
579 if (!ioc->bios_pg3.BiosVersion)
580 return;
581
582 if (channel == RAID_CHANNEL) {
583 raid_device = device;
584 sas_address = raid_device->wwid;
585 device_name = 0;
586 enclosure_logical_id = 0;
587 slot = 0;
588 } else if (channel == PCIE_CHANNEL) {
589 pcie_device = device;
590 sas_address = pcie_device->wwid;
591 device_name = 0;
592 enclosure_logical_id = 0;
593 slot = 0;
594 } else {
595 sas_device = device;
596 sas_address = sas_device->sas_address;
597 device_name = sas_device->device_name;
598 enclosure_logical_id = sas_device->enclosure_logical_id;
599 slot = sas_device->slot;
600 }
601
602 if (!ioc->req_boot_device.device) {
603 if (_scsih_is_boot_device(sas_address, device_name,
604 enclosure_logical_id, slot,
605 (ioc->bios_pg2.ReqBootDeviceForm &
606 MPI2_BIOSPAGE2_FORM_MASK),
607 &ioc->bios_pg2.RequestedBootDevice)) {
608 dinitprintk(ioc,
609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
610 __func__, (u64)sas_address));
611 ioc->req_boot_device.device = device;
612 ioc->req_boot_device.channel = channel;
613 }
614 }
615
616 if (!ioc->req_alt_boot_device.device) {
617 if (_scsih_is_boot_device(sas_address, device_name,
618 enclosure_logical_id, slot,
619 (ioc->bios_pg2.ReqAltBootDeviceForm &
620 MPI2_BIOSPAGE2_FORM_MASK),
621 &ioc->bios_pg2.RequestedAltBootDevice)) {
622 dinitprintk(ioc,
623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
624 __func__, (u64)sas_address));
625 ioc->req_alt_boot_device.device = device;
626 ioc->req_alt_boot_device.channel = channel;
627 }
628 }
629
630 if (!ioc->current_boot_device.device) {
631 if (_scsih_is_boot_device(sas_address, device_name,
632 enclosure_logical_id, slot,
633 (ioc->bios_pg2.CurrentBootDeviceForm &
634 MPI2_BIOSPAGE2_FORM_MASK),
635 &ioc->bios_pg2.CurrentBootDevice)) {
636 dinitprintk(ioc,
637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
638 __func__, (u64)sas_address));
639 ioc->current_boot_device.device = device;
640 ioc->current_boot_device.channel = channel;
641 }
642 }
643}
644
645static struct _sas_device *
646__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
647 struct MPT3SAS_TARGET *tgt_priv)
648{
649 struct _sas_device *ret;
650
651 assert_spin_locked(&ioc->sas_device_lock);
652
653 ret = tgt_priv->sas_dev;
654 if (ret)
655 sas_device_get(ret);
656
657 return ret;
658}
659
660static struct _sas_device *
661mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
662 struct MPT3SAS_TARGET *tgt_priv)
663{
664 struct _sas_device *ret;
665 unsigned long flags;
666
667 spin_lock_irqsave(&ioc->sas_device_lock, flags);
668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
670
671 return ret;
672}
673
674static struct _pcie_device *
675__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
676 struct MPT3SAS_TARGET *tgt_priv)
677{
678 struct _pcie_device *ret;
679
680 assert_spin_locked(&ioc->pcie_device_lock);
681
682 ret = tgt_priv->pcie_dev;
683 if (ret)
684 pcie_device_get(ret);
685
686 return ret;
687}
688
689
690
691
692
693
694
695
696
697
698
699static struct _pcie_device *
700mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
701 struct MPT3SAS_TARGET *tgt_priv)
702{
703 struct _pcie_device *ret;
704 unsigned long flags;
705
706 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
709
710 return ret;
711}
712
713
714
715
716
717
718
719
720
721
722
723
724
725struct _sas_device *
726__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
727 struct sas_rphy *rphy)
728{
729 struct _sas_device *sas_device;
730
731 assert_spin_locked(&ioc->sas_device_lock);
732
733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
734 if (sas_device->rphy != rphy)
735 continue;
736 sas_device_get(sas_device);
737 return sas_device;
738 }
739
740 sas_device = NULL;
741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
742 if (sas_device->rphy != rphy)
743 continue;
744 sas_device_get(sas_device);
745 return sas_device;
746 }
747
748 return NULL;
749}
750
751
752
753
754
755
756
757
758
759
760struct _sas_device *
761__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
762 u64 sas_address, struct hba_port *port)
763{
764 struct _sas_device *sas_device;
765
766 if (!port)
767 return NULL;
768
769 assert_spin_locked(&ioc->sas_device_lock);
770
771 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
772 if (sas_device->sas_address != sas_address)
773 continue;
774 if (sas_device->port != port)
775 continue;
776 sas_device_get(sas_device);
777 return sas_device;
778 }
779
780 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
781 if (sas_device->sas_address != sas_address)
782 continue;
783 if (sas_device->port != port)
784 continue;
785 sas_device_get(sas_device);
786 return sas_device;
787 }
788
789 return NULL;
790}
791
792
793
794
795
796
797
798
799
800
801
802struct _sas_device *
803mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
804 u64 sas_address, struct hba_port *port)
805{
806 struct _sas_device *sas_device;
807 unsigned long flags;
808
809 spin_lock_irqsave(&ioc->sas_device_lock, flags);
810 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
811 sas_address, port);
812 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
813
814 return sas_device;
815}
816
817static struct _sas_device *
818__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
819{
820 struct _sas_device *sas_device;
821
822 assert_spin_locked(&ioc->sas_device_lock);
823
824 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
825 if (sas_device->handle == handle)
826 goto found_device;
827
828 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
829 if (sas_device->handle == handle)
830 goto found_device;
831
832 return NULL;
833
834found_device:
835 sas_device_get(sas_device);
836 return sas_device;
837}
838
839
840
841
842
843
844
845
846
847
848struct _sas_device *
849mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
850{
851 struct _sas_device *sas_device;
852 unsigned long flags;
853
854 spin_lock_irqsave(&ioc->sas_device_lock, flags);
855 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
856 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
857
858 return sas_device;
859}
860
861
862
863
864
865
866
867
868static void
869_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
870 struct _sas_device *sas_device, struct scsi_device *sdev,
871 struct scsi_target *starget)
872{
873 if (sdev) {
874 if (sas_device->enclosure_handle != 0)
875 sdev_printk(KERN_INFO, sdev,
876 "enclosure logical id (0x%016llx), slot(%d) \n",
877 (unsigned long long)
878 sas_device->enclosure_logical_id,
879 sas_device->slot);
880 if (sas_device->connector_name[0] != '\0')
881 sdev_printk(KERN_INFO, sdev,
882 "enclosure level(0x%04x), connector name( %s)\n",
883 sas_device->enclosure_level,
884 sas_device->connector_name);
885 if (sas_device->is_chassis_slot_valid)
886 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
887 sas_device->chassis_slot);
888 } else if (starget) {
889 if (sas_device->enclosure_handle != 0)
890 starget_printk(KERN_INFO, starget,
891 "enclosure logical id(0x%016llx), slot(%d) \n",
892 (unsigned long long)
893 sas_device->enclosure_logical_id,
894 sas_device->slot);
895 if (sas_device->connector_name[0] != '\0')
896 starget_printk(KERN_INFO, starget,
897 "enclosure level(0x%04x), connector name( %s)\n",
898 sas_device->enclosure_level,
899 sas_device->connector_name);
900 if (sas_device->is_chassis_slot_valid)
901 starget_printk(KERN_INFO, starget,
902 "chassis slot(0x%04x)\n",
903 sas_device->chassis_slot);
904 } else {
905 if (sas_device->enclosure_handle != 0)
906 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
907 (u64)sas_device->enclosure_logical_id,
908 sas_device->slot);
909 if (sas_device->connector_name[0] != '\0')
910 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
911 sas_device->enclosure_level,
912 sas_device->connector_name);
913 if (sas_device->is_chassis_slot_valid)
914 ioc_info(ioc, "chassis slot(0x%04x)\n",
915 sas_device->chassis_slot);
916 }
917}
918
919
920
921
922
923
924
925
926
927static void
928_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
929 struct _sas_device *sas_device)
930{
931 unsigned long flags;
932
933 if (!sas_device)
934 return;
935 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
936 sas_device->handle, (u64)sas_device->sas_address);
937
938 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
939
940
941
942
943
944 spin_lock_irqsave(&ioc->sas_device_lock, flags);
945 if (!list_empty(&sas_device->list)) {
946 list_del_init(&sas_device->list);
947 sas_device_put(sas_device);
948 }
949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
950}
951
952
953
954
955
956
957static void
958_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
959{
960 struct _sas_device *sas_device;
961 unsigned long flags;
962
963 if (ioc->shost_recovery)
964 return;
965
966 spin_lock_irqsave(&ioc->sas_device_lock, flags);
967 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
968 if (sas_device) {
969 list_del_init(&sas_device->list);
970 sas_device_put(sas_device);
971 }
972 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
973 if (sas_device) {
974 _scsih_remove_device(ioc, sas_device);
975 sas_device_put(sas_device);
976 }
977}
978
979
980
981
982
983
984
985
986
987
988void
989mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
990 u64 sas_address, struct hba_port *port)
991{
992 struct _sas_device *sas_device;
993 unsigned long flags;
994
995 if (ioc->shost_recovery)
996 return;
997
998 spin_lock_irqsave(&ioc->sas_device_lock, flags);
999 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1000 if (sas_device) {
1001 list_del_init(&sas_device->list);
1002 sas_device_put(sas_device);
1003 }
1004 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1005 if (sas_device) {
1006 _scsih_remove_device(ioc, sas_device);
1007 sas_device_put(sas_device);
1008 }
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019static void
1020_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1021 struct _sas_device *sas_device)
1022{
1023 unsigned long flags;
1024
1025 dewtprintk(ioc,
1026 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1027 __func__, sas_device->handle,
1028 (u64)sas_device->sas_address));
1029
1030 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1031 NULL, NULL));
1032
1033 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1034 sas_device_get(sas_device);
1035 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1036 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1037
1038 if (ioc->hide_drives) {
1039 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1040 return;
1041 }
1042
1043 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1044 sas_device->sas_address_parent, sas_device->port)) {
1045 _scsih_sas_device_remove(ioc, sas_device);
1046 } else if (!sas_device->starget) {
1047
1048
1049
1050
1051
1052 if (!ioc->is_driver_loading) {
1053 mpt3sas_transport_port_remove(ioc,
1054 sas_device->sas_address,
1055 sas_device->sas_address_parent,
1056 sas_device->port);
1057 _scsih_sas_device_remove(ioc, sas_device);
1058 }
1059 } else
1060 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static void
1072_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1073 struct _sas_device *sas_device)
1074{
1075 unsigned long flags;
1076
1077 dewtprintk(ioc,
1078 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1079 __func__, sas_device->handle,
1080 (u64)sas_device->sas_address));
1081
1082 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1083 NULL, NULL));
1084
1085 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1086 sas_device_get(sas_device);
1087 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1088 _scsih_determine_boot_device(ioc, sas_device, 0);
1089 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1090}
1091
1092
1093static struct _pcie_device *
1094__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1095{
1096 struct _pcie_device *pcie_device;
1097
1098 assert_spin_locked(&ioc->pcie_device_lock);
1099
1100 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1101 if (pcie_device->wwid == wwid)
1102 goto found_device;
1103
1104 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1105 if (pcie_device->wwid == wwid)
1106 goto found_device;
1107
1108 return NULL;
1109
1110found_device:
1111 pcie_device_get(pcie_device);
1112 return pcie_device;
1113}
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126static struct _pcie_device *
1127mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1128{
1129 struct _pcie_device *pcie_device;
1130 unsigned long flags;
1131
1132 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1133 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1134 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1135
1136 return pcie_device;
1137}
1138
1139
1140static struct _pcie_device *
1141__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1142 int channel)
1143{
1144 struct _pcie_device *pcie_device;
1145
1146 assert_spin_locked(&ioc->pcie_device_lock);
1147
1148 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1149 if (pcie_device->id == id && pcie_device->channel == channel)
1150 goto found_device;
1151
1152 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1153 if (pcie_device->id == id && pcie_device->channel == channel)
1154 goto found_device;
1155
1156 return NULL;
1157
1158found_device:
1159 pcie_device_get(pcie_device);
1160 return pcie_device;
1161}
1162
1163static struct _pcie_device *
1164__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1165{
1166 struct _pcie_device *pcie_device;
1167
1168 assert_spin_locked(&ioc->pcie_device_lock);
1169
1170 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1171 if (pcie_device->handle == handle)
1172 goto found_device;
1173
1174 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1175 if (pcie_device->handle == handle)
1176 goto found_device;
1177
1178 return NULL;
1179
1180found_device:
1181 pcie_device_get(pcie_device);
1182 return pcie_device;
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197struct _pcie_device *
1198mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1199{
1200 struct _pcie_device *pcie_device;
1201 unsigned long flags;
1202
1203 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1204 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1205 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1206
1207 return pcie_device;
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219static void
1220_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1221{
1222 struct _pcie_device *pcie_device;
1223 unsigned long flags;
1224 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1225
1226 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1227 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1228 if (pcie_device->shutdown_latency) {
1229 if (shutdown_latency < pcie_device->shutdown_latency)
1230 shutdown_latency =
1231 pcie_device->shutdown_latency;
1232 }
1233 }
1234 ioc->max_shutdown_latency = shutdown_latency;
1235 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1236}
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246static void
1247_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1248 struct _pcie_device *pcie_device)
1249{
1250 unsigned long flags;
1251 int was_on_pcie_device_list = 0;
1252 u8 update_latency = 0;
1253
1254 if (!pcie_device)
1255 return;
1256 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1257 pcie_device->handle, (u64)pcie_device->wwid);
1258 if (pcie_device->enclosure_handle != 0)
1259 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1260 (u64)pcie_device->enclosure_logical_id,
1261 pcie_device->slot);
1262 if (pcie_device->connector_name[0] != '\0')
1263 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1264 pcie_device->enclosure_level,
1265 pcie_device->connector_name);
1266
1267 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1268 if (!list_empty(&pcie_device->list)) {
1269 list_del_init(&pcie_device->list);
1270 was_on_pcie_device_list = 1;
1271 }
1272 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1273 update_latency = 1;
1274 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1275 if (was_on_pcie_device_list) {
1276 kfree(pcie_device->serial_number);
1277 pcie_device_put(pcie_device);
1278 }
1279
1280
1281
1282
1283
1284
1285 if (update_latency)
1286 _scsih_set_nvme_max_shutdown_latency(ioc);
1287}
1288
1289
1290
1291
1292
1293
1294
1295static void
1296_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1297{
1298 struct _pcie_device *pcie_device;
1299 unsigned long flags;
1300 int was_on_pcie_device_list = 0;
1301 u8 update_latency = 0;
1302
1303 if (ioc->shost_recovery)
1304 return;
1305
1306 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1307 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1308 if (pcie_device) {
1309 if (!list_empty(&pcie_device->list)) {
1310 list_del_init(&pcie_device->list);
1311 was_on_pcie_device_list = 1;
1312 pcie_device_put(pcie_device);
1313 }
1314 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1315 update_latency = 1;
1316 }
1317 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1318 if (was_on_pcie_device_list) {
1319 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1320 pcie_device_put(pcie_device);
1321 }
1322
1323
1324
1325
1326
1327
1328 if (update_latency)
1329 _scsih_set_nvme_max_shutdown_latency(ioc);
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339static void
1340_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1341 struct _pcie_device *pcie_device)
1342{
1343 unsigned long flags;
1344
1345 dewtprintk(ioc,
1346 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1347 __func__,
1348 pcie_device->handle, (u64)pcie_device->wwid));
1349 if (pcie_device->enclosure_handle != 0)
1350 dewtprintk(ioc,
1351 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1352 __func__,
1353 (u64)pcie_device->enclosure_logical_id,
1354 pcie_device->slot));
1355 if (pcie_device->connector_name[0] != '\0')
1356 dewtprintk(ioc,
1357 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1358 __func__, pcie_device->enclosure_level,
1359 pcie_device->connector_name));
1360
1361 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1362 pcie_device_get(pcie_device);
1363 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1364 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1365
1366 if (pcie_device->access_status ==
1367 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1368 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1369 return;
1370 }
1371 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1372 _scsih_pcie_device_remove(ioc, pcie_device);
1373 } else if (!pcie_device->starget) {
1374 if (!ioc->is_driver_loading) {
1375
1376 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1377 }
1378 } else
1379 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1380}
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390static void
1391_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1392 struct _pcie_device *pcie_device)
1393{
1394 unsigned long flags;
1395
1396 dewtprintk(ioc,
1397 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1398 __func__,
1399 pcie_device->handle, (u64)pcie_device->wwid));
1400 if (pcie_device->enclosure_handle != 0)
1401 dewtprintk(ioc,
1402 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1403 __func__,
1404 (u64)pcie_device->enclosure_logical_id,
1405 pcie_device->slot));
1406 if (pcie_device->connector_name[0] != '\0')
1407 dewtprintk(ioc,
1408 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1409 __func__, pcie_device->enclosure_level,
1410 pcie_device->connector_name));
1411
1412 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1413 pcie_device_get(pcie_device);
1414 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1415 if (pcie_device->access_status !=
1416 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1417 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1418 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430static struct _raid_device *
1431_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1432{
1433 struct _raid_device *raid_device, *r;
1434
1435 r = NULL;
1436 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1437 if (raid_device->id == id && raid_device->channel == channel) {
1438 r = raid_device;
1439 goto out;
1440 }
1441 }
1442
1443 out:
1444 return r;
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456struct _raid_device *
1457mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1458{
1459 struct _raid_device *raid_device, *r;
1460
1461 r = NULL;
1462 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1463 if (raid_device->handle != handle)
1464 continue;
1465 r = raid_device;
1466 goto out;
1467 }
1468
1469 out:
1470 return r;
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static struct _raid_device *
1483_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1484{
1485 struct _raid_device *raid_device, *r;
1486
1487 r = NULL;
1488 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1489 if (raid_device->wwid != wwid)
1490 continue;
1491 r = raid_device;
1492 goto out;
1493 }
1494
1495 out:
1496 return r;
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506static void
1507_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1508 struct _raid_device *raid_device)
1509{
1510 unsigned long flags;
1511
1512 dewtprintk(ioc,
1513 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1514 __func__,
1515 raid_device->handle, (u64)raid_device->wwid));
1516
1517 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1518 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1519 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1520}
1521
1522
1523
1524
1525
1526
1527
1528static void
1529_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1530 struct _raid_device *raid_device)
1531{
1532 unsigned long flags;
1533
1534 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1535 list_del(&raid_device->list);
1536 kfree(raid_device);
1537 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549struct _sas_node *
1550mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1551{
1552 struct _sas_node *sas_expander, *r;
1553
1554 r = NULL;
1555 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1556 if (sas_expander->handle != handle)
1557 continue;
1558 r = sas_expander;
1559 goto out;
1560 }
1561 out:
1562 return r;
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574static struct _enclosure_node *
1575mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1576{
1577 struct _enclosure_node *enclosure_dev, *r;
1578
1579 r = NULL;
1580 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1581 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1582 continue;
1583 r = enclosure_dev;
1584 goto out;
1585 }
1586out:
1587 return r;
1588}
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599struct _sas_node *
1600mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1601 u64 sas_address, struct hba_port *port)
1602{
1603 struct _sas_node *sas_expander, *r = NULL;
1604
1605 if (!port)
1606 return r;
1607
1608 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1609 if (sas_expander->sas_address != sas_address)
1610 continue;
1611 if (sas_expander->port != port)
1612 continue;
1613 r = sas_expander;
1614 goto out;
1615 }
1616 out:
1617 return r;
1618}
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628static void
1629_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1630 struct _sas_node *sas_expander)
1631{
1632 unsigned long flags;
1633
1634 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1635 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1636 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646static int
1647_scsih_is_end_device(u32 device_info)
1648{
1649 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1650 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1651 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1652 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1653 return 1;
1654 else
1655 return 0;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666static int
1667_scsih_is_nvme_pciescsi_device(u32 device_info)
1668{
1669 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1670 == MPI26_PCIE_DEVINFO_NVME) ||
1671 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1672 == MPI26_PCIE_DEVINFO_SCSI))
1673 return 1;
1674 else
1675 return 0;
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688static u8
1689_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1690 int channel)
1691{
1692 int smid;
1693 struct scsi_cmnd *scmd;
1694
1695 for (smid = 1;
1696 smid <= ioc->shost->can_queue; smid++) {
1697 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1698 if (!scmd)
1699 continue;
1700 if (scmd->device->id == id &&
1701 scmd->device->channel == channel)
1702 return 1;
1703 }
1704 return 0;
1705}
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718static u8
1719_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1720 unsigned int lun, int channel)
1721{
1722 int smid;
1723 struct scsi_cmnd *scmd;
1724
1725 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1726
1727 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1728 if (!scmd)
1729 continue;
1730 if (scmd->device->id == id &&
1731 scmd->device->channel == channel &&
1732 scmd->device->lun == lun)
1733 return 1;
1734 }
1735 return 0;
1736}
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746struct scsi_cmnd *
1747mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1748{
1749 struct scsi_cmnd *scmd = NULL;
1750 struct scsiio_tracker *st;
1751 Mpi25SCSIIORequest_t *mpi_request;
1752 u16 tag = smid - 1;
1753
1754 if (smid > 0 &&
1755 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1756 u32 unique_tag =
1757 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1758
1759 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1760
1761
1762
1763
1764
1765
1766
1767 if (!mpi_request->DevHandle)
1768 return scmd;
1769
1770 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1771 if (scmd) {
1772 st = scsi_cmd_priv(scmd);
1773 if (st->cb_idx == 0xFF || st->smid == 0)
1774 scmd = NULL;
1775 }
1776 }
1777 return scmd;
1778}
1779
1780
1781
1782
1783
1784
1785
1786
1787static int
1788scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1789{
1790 struct Scsi_Host *shost = sdev->host;
1791 int max_depth;
1792 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1793 struct MPT3SAS_DEVICE *sas_device_priv_data;
1794 struct MPT3SAS_TARGET *sas_target_priv_data;
1795 struct _sas_device *sas_device;
1796 unsigned long flags;
1797
1798 max_depth = shost->can_queue;
1799
1800
1801
1802
1803
1804 if (ioc->enable_sdev_max_qd)
1805 goto not_sata;
1806
1807 sas_device_priv_data = sdev->hostdata;
1808 if (!sas_device_priv_data)
1809 goto not_sata;
1810 sas_target_priv_data = sas_device_priv_data->sas_target;
1811 if (!sas_target_priv_data)
1812 goto not_sata;
1813 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1814 goto not_sata;
1815
1816 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1817 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1818 if (sas_device) {
1819 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1820 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1821
1822 sas_device_put(sas_device);
1823 }
1824 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1825
1826 not_sata:
1827
1828 if (!sdev->tagged_supported)
1829 max_depth = 1;
1830 if (qdepth > max_depth)
1831 qdepth = max_depth;
1832 scsi_change_queue_depth(sdev, qdepth);
1833 sdev_printk(KERN_INFO, sdev,
1834 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1835 sdev->queue_depth, sdev->tagged_supported,
1836 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1837 return sdev->queue_depth;
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847void
1848mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1849{
1850 struct Scsi_Host *shost = sdev->host;
1851 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1852
1853 if (ioc->enable_sdev_max_qd)
1854 qdepth = shost->can_queue;
1855
1856 scsih_change_queue_depth(sdev, qdepth);
1857}
1858
1859
1860
1861
1862
1863
1864
1865
1866static int
1867scsih_target_alloc(struct scsi_target *starget)
1868{
1869 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1870 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1871 struct MPT3SAS_TARGET *sas_target_priv_data;
1872 struct _sas_device *sas_device;
1873 struct _raid_device *raid_device;
1874 struct _pcie_device *pcie_device;
1875 unsigned long flags;
1876 struct sas_rphy *rphy;
1877
1878 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1879 GFP_KERNEL);
1880 if (!sas_target_priv_data)
1881 return -ENOMEM;
1882
1883 starget->hostdata = sas_target_priv_data;
1884 sas_target_priv_data->starget = starget;
1885 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1886
1887
1888 if (starget->channel == RAID_CHANNEL) {
1889 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1890 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1891 starget->channel);
1892 if (raid_device) {
1893 sas_target_priv_data->handle = raid_device->handle;
1894 sas_target_priv_data->sas_address = raid_device->wwid;
1895 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1896 if (ioc->is_warpdrive)
1897 sas_target_priv_data->raid_device = raid_device;
1898 raid_device->starget = starget;
1899 }
1900 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1901 return 0;
1902 }
1903
1904
1905 if (starget->channel == PCIE_CHANNEL) {
1906 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1907 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1908 starget->channel);
1909 if (pcie_device) {
1910 sas_target_priv_data->handle = pcie_device->handle;
1911 sas_target_priv_data->sas_address = pcie_device->wwid;
1912 sas_target_priv_data->port = NULL;
1913 sas_target_priv_data->pcie_dev = pcie_device;
1914 pcie_device->starget = starget;
1915 pcie_device->id = starget->id;
1916 pcie_device->channel = starget->channel;
1917 sas_target_priv_data->flags |=
1918 MPT_TARGET_FLAGS_PCIE_DEVICE;
1919 if (pcie_device->fast_path)
1920 sas_target_priv_data->flags |=
1921 MPT_TARGET_FASTPATH_IO;
1922 }
1923 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1924 return 0;
1925 }
1926
1927
1928 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1929 rphy = dev_to_rphy(starget->dev.parent);
1930 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1931
1932 if (sas_device) {
1933 sas_target_priv_data->handle = sas_device->handle;
1934 sas_target_priv_data->sas_address = sas_device->sas_address;
1935 sas_target_priv_data->port = sas_device->port;
1936 sas_target_priv_data->sas_dev = sas_device;
1937 sas_device->starget = starget;
1938 sas_device->id = starget->id;
1939 sas_device->channel = starget->channel;
1940 if (test_bit(sas_device->handle, ioc->pd_handles))
1941 sas_target_priv_data->flags |=
1942 MPT_TARGET_FLAGS_RAID_COMPONENT;
1943 if (sas_device->fast_path)
1944 sas_target_priv_data->flags |=
1945 MPT_TARGET_FASTPATH_IO;
1946 }
1947 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1948
1949 return 0;
1950}
1951
1952
1953
1954
1955
1956static void
1957scsih_target_destroy(struct scsi_target *starget)
1958{
1959 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1960 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1961 struct MPT3SAS_TARGET *sas_target_priv_data;
1962 struct _sas_device *sas_device;
1963 struct _raid_device *raid_device;
1964 struct _pcie_device *pcie_device;
1965 unsigned long flags;
1966
1967 sas_target_priv_data = starget->hostdata;
1968 if (!sas_target_priv_data)
1969 return;
1970
1971 if (starget->channel == RAID_CHANNEL) {
1972 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1973 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1974 starget->channel);
1975 if (raid_device) {
1976 raid_device->starget = NULL;
1977 raid_device->sdev = NULL;
1978 }
1979 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1980 goto out;
1981 }
1982
1983 if (starget->channel == PCIE_CHANNEL) {
1984 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1985 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1986 sas_target_priv_data);
1987 if (pcie_device && (pcie_device->starget == starget) &&
1988 (pcie_device->id == starget->id) &&
1989 (pcie_device->channel == starget->channel))
1990 pcie_device->starget = NULL;
1991
1992 if (pcie_device) {
1993
1994
1995
1996 sas_target_priv_data->pcie_dev = NULL;
1997 pcie_device_put(pcie_device);
1998 pcie_device_put(pcie_device);
1999 }
2000 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2001 goto out;
2002 }
2003
2004 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2005 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2006 if (sas_device && (sas_device->starget == starget) &&
2007 (sas_device->id == starget->id) &&
2008 (sas_device->channel == starget->channel))
2009 sas_device->starget = NULL;
2010
2011 if (sas_device) {
2012
2013
2014
2015 sas_target_priv_data->sas_dev = NULL;
2016 sas_device_put(sas_device);
2017
2018 sas_device_put(sas_device);
2019 }
2020 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2021
2022 out:
2023 kfree(sas_target_priv_data);
2024 starget->hostdata = NULL;
2025}
2026
2027
2028
2029
2030
2031
2032
2033
2034static int
2035scsih_slave_alloc(struct scsi_device *sdev)
2036{
2037 struct Scsi_Host *shost;
2038 struct MPT3SAS_ADAPTER *ioc;
2039 struct MPT3SAS_TARGET *sas_target_priv_data;
2040 struct MPT3SAS_DEVICE *sas_device_priv_data;
2041 struct scsi_target *starget;
2042 struct _raid_device *raid_device;
2043 struct _sas_device *sas_device;
2044 struct _pcie_device *pcie_device;
2045 unsigned long flags;
2046
2047 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2048 GFP_KERNEL);
2049 if (!sas_device_priv_data)
2050 return -ENOMEM;
2051
2052 sas_device_priv_data->lun = sdev->lun;
2053 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2054
2055 starget = scsi_target(sdev);
2056 sas_target_priv_data = starget->hostdata;
2057 sas_target_priv_data->num_luns++;
2058 sas_device_priv_data->sas_target = sas_target_priv_data;
2059 sdev->hostdata = sas_device_priv_data;
2060 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2061 sdev->no_uld_attach = 1;
2062
2063 shost = dev_to_shost(&starget->dev);
2064 ioc = shost_priv(shost);
2065 if (starget->channel == RAID_CHANNEL) {
2066 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2067 raid_device = _scsih_raid_device_find_by_id(ioc,
2068 starget->id, starget->channel);
2069 if (raid_device)
2070 raid_device->sdev = sdev;
2071 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2072 }
2073 if (starget->channel == PCIE_CHANNEL) {
2074 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2075 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2076 sas_target_priv_data->sas_address);
2077 if (pcie_device && (pcie_device->starget == NULL)) {
2078 sdev_printk(KERN_INFO, sdev,
2079 "%s : pcie_device->starget set to starget @ %d\n",
2080 __func__, __LINE__);
2081 pcie_device->starget = starget;
2082 }
2083
2084 if (pcie_device)
2085 pcie_device_put(pcie_device);
2086 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2087
2088 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2089 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2090 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2091 sas_target_priv_data->sas_address,
2092 sas_target_priv_data->port);
2093 if (sas_device && (sas_device->starget == NULL)) {
2094 sdev_printk(KERN_INFO, sdev,
2095 "%s : sas_device->starget set to starget @ %d\n",
2096 __func__, __LINE__);
2097 sas_device->starget = starget;
2098 }
2099
2100 if (sas_device)
2101 sas_device_put(sas_device);
2102
2103 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2104 }
2105
2106 return 0;
2107}
2108
2109
2110
2111
2112
2113static void
2114scsih_slave_destroy(struct scsi_device *sdev)
2115{
2116 struct MPT3SAS_TARGET *sas_target_priv_data;
2117 struct scsi_target *starget;
2118 struct Scsi_Host *shost;
2119 struct MPT3SAS_ADAPTER *ioc;
2120 struct _sas_device *sas_device;
2121 struct _pcie_device *pcie_device;
2122 unsigned long flags;
2123
2124 if (!sdev->hostdata)
2125 return;
2126
2127 starget = scsi_target(sdev);
2128 sas_target_priv_data = starget->hostdata;
2129 sas_target_priv_data->num_luns--;
2130
2131 shost = dev_to_shost(&starget->dev);
2132 ioc = shost_priv(shost);
2133
2134 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2135 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2136 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2137 sas_target_priv_data);
2138 if (pcie_device && !sas_target_priv_data->num_luns)
2139 pcie_device->starget = NULL;
2140
2141 if (pcie_device)
2142 pcie_device_put(pcie_device);
2143
2144 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2145
2146 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2147 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2148 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2149 sas_target_priv_data);
2150 if (sas_device && !sas_target_priv_data->num_luns)
2151 sas_device->starget = NULL;
2152
2153 if (sas_device)
2154 sas_device_put(sas_device);
2155 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2156 }
2157
2158 kfree(sdev->hostdata);
2159 sdev->hostdata = NULL;
2160}
2161
2162
2163
2164
2165
2166
2167
2168static void
2169_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2170 u16 handle, struct scsi_device *sdev)
2171{
2172 Mpi2ConfigReply_t mpi_reply;
2173 Mpi2SasDevicePage0_t sas_device_pg0;
2174 u32 ioc_status;
2175 u16 flags;
2176 u32 device_info;
2177
2178 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2179 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2180 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2181 __FILE__, __LINE__, __func__);
2182 return;
2183 }
2184
2185 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2186 MPI2_IOCSTATUS_MASK;
2187 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2188 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2189 __FILE__, __LINE__, __func__);
2190 return;
2191 }
2192
2193 flags = le16_to_cpu(sas_device_pg0.Flags);
2194 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2195
2196 sdev_printk(KERN_INFO, sdev,
2197 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2198 "sw_preserve(%s)\n",
2199 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2200 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2202 "n",
2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2206}
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219static int
2220scsih_is_raid(struct device *dev)
2221{
2222 struct scsi_device *sdev = to_scsi_device(dev);
2223 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2224
2225 if (ioc->is_warpdrive)
2226 return 0;
2227 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2228}
2229
2230static int
2231scsih_is_nvme(struct device *dev)
2232{
2233 struct scsi_device *sdev = to_scsi_device(dev);
2234
2235 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2236}
2237
2238
2239
2240
2241
2242static void
2243scsih_get_resync(struct device *dev)
2244{
2245 struct scsi_device *sdev = to_scsi_device(dev);
2246 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2247 static struct _raid_device *raid_device;
2248 unsigned long flags;
2249 Mpi2RaidVolPage0_t vol_pg0;
2250 Mpi2ConfigReply_t mpi_reply;
2251 u32 volume_status_flags;
2252 u8 percent_complete;
2253 u16 handle;
2254
2255 percent_complete = 0;
2256 handle = 0;
2257 if (ioc->is_warpdrive)
2258 goto out;
2259
2260 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2261 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2262 sdev->channel);
2263 if (raid_device) {
2264 handle = raid_device->handle;
2265 percent_complete = raid_device->percent_complete;
2266 }
2267 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2268
2269 if (!handle)
2270 goto out;
2271
2272 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2273 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2274 sizeof(Mpi2RaidVolPage0_t))) {
2275 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2276 __FILE__, __LINE__, __func__);
2277 percent_complete = 0;
2278 goto out;
2279 }
2280
2281 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2282 if (!(volume_status_flags &
2283 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2284 percent_complete = 0;
2285
2286 out:
2287
2288 switch (ioc->hba_mpi_version_belonged) {
2289 case MPI2_VERSION:
2290 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2291 break;
2292 case MPI25_VERSION:
2293 case MPI26_VERSION:
2294 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2295 break;
2296 }
2297}
2298
2299
2300
2301
2302
2303static void
2304scsih_get_state(struct device *dev)
2305{
2306 struct scsi_device *sdev = to_scsi_device(dev);
2307 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2308 static struct _raid_device *raid_device;
2309 unsigned long flags;
2310 Mpi2RaidVolPage0_t vol_pg0;
2311 Mpi2ConfigReply_t mpi_reply;
2312 u32 volstate;
2313 enum raid_state state = RAID_STATE_UNKNOWN;
2314 u16 handle = 0;
2315
2316 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2317 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2318 sdev->channel);
2319 if (raid_device)
2320 handle = raid_device->handle;
2321 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2322
2323 if (!raid_device)
2324 goto out;
2325
2326 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2327 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2328 sizeof(Mpi2RaidVolPage0_t))) {
2329 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2330 __FILE__, __LINE__, __func__);
2331 goto out;
2332 }
2333
2334 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2335 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2336 state = RAID_STATE_RESYNCING;
2337 goto out;
2338 }
2339
2340 switch (vol_pg0.VolumeState) {
2341 case MPI2_RAID_VOL_STATE_OPTIMAL:
2342 case MPI2_RAID_VOL_STATE_ONLINE:
2343 state = RAID_STATE_ACTIVE;
2344 break;
2345 case MPI2_RAID_VOL_STATE_DEGRADED:
2346 state = RAID_STATE_DEGRADED;
2347 break;
2348 case MPI2_RAID_VOL_STATE_FAILED:
2349 case MPI2_RAID_VOL_STATE_MISSING:
2350 state = RAID_STATE_OFFLINE;
2351 break;
2352 }
2353 out:
2354 switch (ioc->hba_mpi_version_belonged) {
2355 case MPI2_VERSION:
2356 raid_set_state(mpt2sas_raid_template, dev, state);
2357 break;
2358 case MPI25_VERSION:
2359 case MPI26_VERSION:
2360 raid_set_state(mpt3sas_raid_template, dev, state);
2361 break;
2362 }
2363}
2364
2365
2366
2367
2368
2369
2370
2371static void
2372_scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2373 struct scsi_device *sdev, u8 volume_type)
2374{
2375 enum raid_level level = RAID_LEVEL_UNKNOWN;
2376
2377 switch (volume_type) {
2378 case MPI2_RAID_VOL_TYPE_RAID0:
2379 level = RAID_LEVEL_0;
2380 break;
2381 case MPI2_RAID_VOL_TYPE_RAID10:
2382 level = RAID_LEVEL_10;
2383 break;
2384 case MPI2_RAID_VOL_TYPE_RAID1E:
2385 level = RAID_LEVEL_1E;
2386 break;
2387 case MPI2_RAID_VOL_TYPE_RAID1:
2388 level = RAID_LEVEL_1;
2389 break;
2390 }
2391
2392 switch (ioc->hba_mpi_version_belonged) {
2393 case MPI2_VERSION:
2394 raid_set_level(mpt2sas_raid_template,
2395 &sdev->sdev_gendev, level);
2396 break;
2397 case MPI25_VERSION:
2398 case MPI26_VERSION:
2399 raid_set_level(mpt3sas_raid_template,
2400 &sdev->sdev_gendev, level);
2401 break;
2402 }
2403}
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413static int
2414_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2415 struct _raid_device *raid_device)
2416{
2417 Mpi2RaidVolPage0_t *vol_pg0;
2418 Mpi2RaidPhysDiskPage0_t pd_pg0;
2419 Mpi2SasDevicePage0_t sas_device_pg0;
2420 Mpi2ConfigReply_t mpi_reply;
2421 u16 sz;
2422 u8 num_pds;
2423
2424 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2425 &num_pds)) || !num_pds) {
2426 dfailprintk(ioc,
2427 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2428 __FILE__, __LINE__, __func__));
2429 return 1;
2430 }
2431
2432 raid_device->num_pds = num_pds;
2433 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2434 sizeof(Mpi2RaidVol0PhysDisk_t));
2435 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2436 if (!vol_pg0) {
2437 dfailprintk(ioc,
2438 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2439 __FILE__, __LINE__, __func__));
2440 return 1;
2441 }
2442
2443 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2444 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2445 dfailprintk(ioc,
2446 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2447 __FILE__, __LINE__, __func__));
2448 kfree(vol_pg0);
2449 return 1;
2450 }
2451
2452 raid_device->volume_type = vol_pg0->VolumeType;
2453
2454
2455
2456
2457 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2458 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2459 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2460 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2461 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2462 le16_to_cpu(pd_pg0.DevHandle)))) {
2463 raid_device->device_info =
2464 le32_to_cpu(sas_device_pg0.DeviceInfo);
2465 }
2466 }
2467
2468 kfree(vol_pg0);
2469 return 0;
2470}
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481static void
2482_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2483{
2484
2485
2486 if (sdev->type != TYPE_TAPE)
2487 return;
2488
2489 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2490 return;
2491
2492 sas_enable_tlr(sdev);
2493 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2494 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2495 return;
2496
2497}
2498
2499
2500
2501
2502
2503
2504
2505
2506static int
2507scsih_slave_configure(struct scsi_device *sdev)
2508{
2509 struct Scsi_Host *shost = sdev->host;
2510 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2511 struct MPT3SAS_DEVICE *sas_device_priv_data;
2512 struct MPT3SAS_TARGET *sas_target_priv_data;
2513 struct _sas_device *sas_device;
2514 struct _pcie_device *pcie_device;
2515 struct _raid_device *raid_device;
2516 unsigned long flags;
2517 int qdepth;
2518 u8 ssp_target = 0;
2519 char *ds = "";
2520 char *r_level = "";
2521 u16 handle, volume_handle = 0;
2522 u64 volume_wwid = 0;
2523
2524 qdepth = 1;
2525 sas_device_priv_data = sdev->hostdata;
2526 sas_device_priv_data->configured_lun = 1;
2527 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2528 sas_target_priv_data = sas_device_priv_data->sas_target;
2529 handle = sas_target_priv_data->handle;
2530
2531
2532 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2533
2534 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2535 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2536 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2537 if (!raid_device) {
2538 dfailprintk(ioc,
2539 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2540 __FILE__, __LINE__, __func__));
2541 return 1;
2542 }
2543
2544 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2545 dfailprintk(ioc,
2546 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2547 __FILE__, __LINE__, __func__));
2548 return 1;
2549 }
2550
2551
2552
2553
2554 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2555
2556
2557
2558
2559
2560
2561 if (raid_device->device_info &
2562 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2563 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2564 ds = "SSP";
2565 } else {
2566 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2567 if (raid_device->device_info &
2568 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2569 ds = "SATA";
2570 else
2571 ds = "STP";
2572 }
2573
2574 switch (raid_device->volume_type) {
2575 case MPI2_RAID_VOL_TYPE_RAID0:
2576 r_level = "RAID0";
2577 break;
2578 case MPI2_RAID_VOL_TYPE_RAID1E:
2579 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2580 if (ioc->manu_pg10.OEMIdentifier &&
2581 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2582 MFG10_GF0_R10_DISPLAY) &&
2583 !(raid_device->num_pds % 2))
2584 r_level = "RAID10";
2585 else
2586 r_level = "RAID1E";
2587 break;
2588 case MPI2_RAID_VOL_TYPE_RAID1:
2589 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2590 r_level = "RAID1";
2591 break;
2592 case MPI2_RAID_VOL_TYPE_RAID10:
2593 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2594 r_level = "RAID10";
2595 break;
2596 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2597 default:
2598 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2599 r_level = "RAIDX";
2600 break;
2601 }
2602
2603 if (!ioc->hide_ir_msg)
2604 sdev_printk(KERN_INFO, sdev,
2605 "%s: handle(0x%04x), wwid(0x%016llx),"
2606 " pd_count(%d), type(%s)\n",
2607 r_level, raid_device->handle,
2608 (unsigned long long)raid_device->wwid,
2609 raid_device->num_pds, ds);
2610
2611 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2612 blk_queue_max_hw_sectors(sdev->request_queue,
2613 MPT3SAS_RAID_MAX_SECTORS);
2614 sdev_printk(KERN_INFO, sdev,
2615 "Set queue's max_sector to: %u\n",
2616 MPT3SAS_RAID_MAX_SECTORS);
2617 }
2618
2619 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2620
2621
2622 if (!ioc->is_warpdrive)
2623 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2624 return 0;
2625 }
2626
2627
2628 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2629 if (mpt3sas_config_get_volume_handle(ioc, handle,
2630 &volume_handle)) {
2631 dfailprintk(ioc,
2632 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2633 __FILE__, __LINE__, __func__));
2634 return 1;
2635 }
2636 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2637 volume_handle, &volume_wwid)) {
2638 dfailprintk(ioc,
2639 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2640 __FILE__, __LINE__, __func__));
2641 return 1;
2642 }
2643 }
2644
2645
2646 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2647 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2648 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2649 sas_device_priv_data->sas_target->sas_address);
2650 if (!pcie_device) {
2651 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2652 dfailprintk(ioc,
2653 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2654 __FILE__, __LINE__, __func__));
2655 return 1;
2656 }
2657
2658 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2659 ds = "NVMe";
2660 sdev_printk(KERN_INFO, sdev,
2661 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2662 ds, handle, (unsigned long long)pcie_device->wwid,
2663 pcie_device->port_num);
2664 if (pcie_device->enclosure_handle != 0)
2665 sdev_printk(KERN_INFO, sdev,
2666 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2667 ds,
2668 (unsigned long long)pcie_device->enclosure_logical_id,
2669 pcie_device->slot);
2670 if (pcie_device->connector_name[0] != '\0')
2671 sdev_printk(KERN_INFO, sdev,
2672 "%s: enclosure level(0x%04x),"
2673 "connector name( %s)\n", ds,
2674 pcie_device->enclosure_level,
2675 pcie_device->connector_name);
2676
2677 if (pcie_device->nvme_mdts)
2678 blk_queue_max_hw_sectors(sdev->request_queue,
2679 pcie_device->nvme_mdts/512);
2680
2681 pcie_device_put(pcie_device);
2682 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2683 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2684
2685
2686
2687
2688 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2689 sdev->request_queue);
2690 blk_queue_virt_boundary(sdev->request_queue,
2691 ioc->page_size - 1);
2692 return 0;
2693 }
2694
2695 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2696 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2697 sas_device_priv_data->sas_target->sas_address,
2698 sas_device_priv_data->sas_target->port);
2699 if (!sas_device) {
2700 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2701 dfailprintk(ioc,
2702 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2703 __FILE__, __LINE__, __func__));
2704 return 1;
2705 }
2706
2707 sas_device->volume_handle = volume_handle;
2708 sas_device->volume_wwid = volume_wwid;
2709 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2710 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2711 ssp_target = 1;
2712 if (sas_device->device_info &
2713 MPI2_SAS_DEVICE_INFO_SEP) {
2714 sdev_printk(KERN_WARNING, sdev,
2715 "set ignore_delay_remove for handle(0x%04x)\n",
2716 sas_device_priv_data->sas_target->handle);
2717 sas_device_priv_data->ignore_delay_remove = 1;
2718 ds = "SES";
2719 } else
2720 ds = "SSP";
2721 } else {
2722 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2723 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2724 ds = "STP";
2725 else if (sas_device->device_info &
2726 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2727 ds = "SATA";
2728 }
2729
2730 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2731 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2732 ds, handle, (unsigned long long)sas_device->sas_address,
2733 sas_device->phy, (unsigned long long)sas_device->device_name);
2734
2735 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2736
2737 sas_device_put(sas_device);
2738 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2739
2740 if (!ssp_target)
2741 _scsih_display_sata_capabilities(ioc, handle, sdev);
2742
2743
2744 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2745
2746 if (ssp_target) {
2747 sas_read_port_mode_page(sdev);
2748 _scsih_enable_tlr(ioc, sdev);
2749 }
2750
2751 return 0;
2752}
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764static int
2765scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2766 sector_t capacity, int params[])
2767{
2768 int heads;
2769 int sectors;
2770 sector_t cylinders;
2771 ulong dummy;
2772
2773 heads = 64;
2774 sectors = 32;
2775
2776 dummy = heads * sectors;
2777 cylinders = capacity;
2778 sector_div(cylinders, dummy);
2779
2780
2781
2782
2783
2784 if ((ulong)capacity >= 0x200000) {
2785 heads = 255;
2786 sectors = 63;
2787 dummy = heads * sectors;
2788 cylinders = capacity;
2789 sector_div(cylinders, dummy);
2790 }
2791
2792
2793 params[0] = heads;
2794 params[1] = sectors;
2795 params[2] = cylinders;
2796
2797 return 0;
2798}
2799
2800
2801
2802
2803
2804
2805static void
2806_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2807{
2808 char *desc;
2809
2810 switch (response_code) {
2811 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2812 desc = "task management request completed";
2813 break;
2814 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2815 desc = "invalid frame";
2816 break;
2817 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2818 desc = "task management request not supported";
2819 break;
2820 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2821 desc = "task management request failed";
2822 break;
2823 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2824 desc = "task management request succeeded";
2825 break;
2826 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2827 desc = "invalid lun";
2828 break;
2829 case 0xA:
2830 desc = "overlapped tag attempted";
2831 break;
2832 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2833 desc = "task queued, however not sent to target";
2834 break;
2835 default:
2836 desc = "unknown";
2837 break;
2838 }
2839 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855static u8
2856_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2857{
2858 MPI2DefaultReply_t *mpi_reply;
2859
2860 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2861 return 1;
2862 if (ioc->tm_cmds.smid != smid)
2863 return 1;
2864 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2865 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2866 if (mpi_reply) {
2867 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2868 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2869 }
2870 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2871 complete(&ioc->tm_cmds.done);
2872 return 1;
2873}
2874
2875
2876
2877
2878
2879
2880
2881
2882void
2883mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2884{
2885 struct MPT3SAS_DEVICE *sas_device_priv_data;
2886 struct scsi_device *sdev;
2887 u8 skip = 0;
2888
2889 shost_for_each_device(sdev, ioc->shost) {
2890 if (skip)
2891 continue;
2892 sas_device_priv_data = sdev->hostdata;
2893 if (!sas_device_priv_data)
2894 continue;
2895 if (sas_device_priv_data->sas_target->handle == handle) {
2896 sas_device_priv_data->sas_target->tm_busy = 1;
2897 skip = 1;
2898 ioc->ignore_loginfos = 1;
2899 }
2900 }
2901}
2902
2903
2904
2905
2906
2907
2908
2909
2910void
2911mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2912{
2913 struct MPT3SAS_DEVICE *sas_device_priv_data;
2914 struct scsi_device *sdev;
2915 u8 skip = 0;
2916
2917 shost_for_each_device(sdev, ioc->shost) {
2918 if (skip)
2919 continue;
2920 sas_device_priv_data = sdev->hostdata;
2921 if (!sas_device_priv_data)
2922 continue;
2923 if (sas_device_priv_data->sas_target->handle == handle) {
2924 sas_device_priv_data->sas_target->tm_busy = 0;
2925 skip = 1;
2926 ioc->ignore_loginfos = 0;
2927 }
2928 }
2929}
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943static int
2944scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2945 uint id, uint lun, u8 type, u16 smid_task)
2946{
2947
2948 if (smid_task <= ioc->shost->can_queue) {
2949 switch (type) {
2950 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2951 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2952 id, channel)))
2953 return SUCCESS;
2954 break;
2955 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2956 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2957 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2958 lun, channel)))
2959 return SUCCESS;
2960 break;
2961 default:
2962 return SUCCESS;
2963 }
2964 } else if (smid_task == ioc->scsih_cmds.smid) {
2965 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2966 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2967 return SUCCESS;
2968 } else if (smid_task == ioc->ctl_cmds.smid) {
2969 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2970 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2971 return SUCCESS;
2972 }
2973
2974 return FAILED;
2975}
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994static int
2995scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2996 uint channel, uint id, uint lun, u8 type, u16 smid_task)
2997{
2998 int rc;
2999
3000 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3001 if (rc == SUCCESS)
3002 return rc;
3003
3004 ioc_info(ioc,
3005 "Poll ReplyDescriptor queues for completion of"
3006 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3007 smid_task, type, handle);
3008
3009
3010
3011
3012
3013
3014 mpt3sas_base_mask_interrupts(ioc);
3015 mpt3sas_base_sync_reply_irqs(ioc, 1);
3016 mpt3sas_base_unmask_interrupts(ioc);
3017
3018 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3019}
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042int
3043mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3044 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3045 u8 timeout, u8 tr_method)
3046{
3047 Mpi2SCSITaskManagementRequest_t *mpi_request;
3048 Mpi2SCSITaskManagementReply_t *mpi_reply;
3049 Mpi25SCSIIORequest_t *request;
3050 u16 smid = 0;
3051 u32 ioc_state;
3052 int rc;
3053 u8 issue_reset = 0;
3054
3055 lockdep_assert_held(&ioc->tm_cmds.mutex);
3056
3057 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3058 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3059 return FAILED;
3060 }
3061
3062 if (ioc->shost_recovery || ioc->remove_host ||
3063 ioc->pci_error_recovery) {
3064 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3065 return FAILED;
3066 }
3067
3068 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3069 if (ioc_state & MPI2_DOORBELL_USED) {
3070 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3071 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3072 return (!rc) ? SUCCESS : FAILED;
3073 }
3074
3075 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3076 mpt3sas_print_fault_code(ioc, ioc_state &
3077 MPI2_DOORBELL_DATA_MASK);
3078 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3079 return (!rc) ? SUCCESS : FAILED;
3080 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3081 MPI2_IOC_STATE_COREDUMP) {
3082 mpt3sas_print_coredump_info(ioc, ioc_state &
3083 MPI2_DOORBELL_DATA_MASK);
3084 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3085 return (!rc) ? SUCCESS : FAILED;
3086 }
3087
3088 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3089 if (!smid) {
3090 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3091 return FAILED;
3092 }
3093
3094 dtmprintk(ioc,
3095 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3096 handle, type, smid_task, timeout, tr_method));
3097 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3098 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3099 ioc->tm_cmds.smid = smid;
3100 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3101 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3102 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3103 mpi_request->DevHandle = cpu_to_le16(handle);
3104 mpi_request->TaskType = type;
3105 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3106 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3107 mpi_request->MsgFlags = tr_method;
3108 mpi_request->TaskMID = cpu_to_le16(smid_task);
3109 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3110 mpt3sas_scsih_set_tm_flag(ioc, handle);
3111 init_completion(&ioc->tm_cmds.done);
3112 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3113 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3114 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3115 mpt3sas_check_cmd_timeout(ioc,
3116 ioc->tm_cmds.status, mpi_request,
3117 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3118 if (issue_reset) {
3119 rc = mpt3sas_base_hard_reset_handler(ioc,
3120 FORCE_BIG_HAMMER);
3121 rc = (!rc) ? SUCCESS : FAILED;
3122 goto out;
3123 }
3124 }
3125
3126
3127 mpt3sas_base_sync_reply_irqs(ioc, 0);
3128
3129 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3130 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3131 mpi_reply = ioc->tm_cmds.reply;
3132 dtmprintk(ioc,
3133 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3134 le16_to_cpu(mpi_reply->IOCStatus),
3135 le32_to_cpu(mpi_reply->IOCLogInfo),
3136 le32_to_cpu(mpi_reply->TerminationCount)));
3137 if (ioc->logging_level & MPT_DEBUG_TM) {
3138 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3139 if (mpi_reply->IOCStatus)
3140 _debug_dump_mf(mpi_request,
3141 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3142 }
3143 }
3144
3145 switch (type) {
3146 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3147 rc = SUCCESS;
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3160 if (le16_to_cpu(request->DevHandle) != handle)
3161 break;
3162
3163 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3164 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3165 handle, timeout, tr_method, smid_task, msix_task);
3166 rc = FAILED;
3167 break;
3168
3169 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3170 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3171 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3172 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3173 type, smid_task);
3174 break;
3175 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3176 rc = SUCCESS;
3177 break;
3178 default:
3179 rc = FAILED;
3180 break;
3181 }
3182
3183out:
3184 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3185 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3186 return rc;
3187}
3188
3189int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3190 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3191 u16 msix_task, u8 timeout, u8 tr_method)
3192{
3193 int ret;
3194
3195 mutex_lock(&ioc->tm_cmds.mutex);
3196 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3197 smid_task, msix_task, timeout, tr_method);
3198 mutex_unlock(&ioc->tm_cmds.mutex);
3199
3200 return ret;
3201}
3202
3203
3204
3205
3206
3207
3208
3209
3210static void
3211_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3212{
3213 struct scsi_target *starget = scmd->device->sdev_target;
3214 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3215 struct _sas_device *sas_device = NULL;
3216 struct _pcie_device *pcie_device = NULL;
3217 unsigned long flags;
3218 char *device_str = NULL;
3219
3220 if (!priv_target)
3221 return;
3222 if (ioc->hide_ir_msg)
3223 device_str = "WarpDrive";
3224 else
3225 device_str = "volume";
3226
3227 scsi_print_command(scmd);
3228 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3229 starget_printk(KERN_INFO, starget,
3230 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3231 device_str, priv_target->handle,
3232 device_str, (unsigned long long)priv_target->sas_address);
3233
3234 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3235 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3236 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3237 if (pcie_device) {
3238 starget_printk(KERN_INFO, starget,
3239 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3240 pcie_device->handle,
3241 (unsigned long long)pcie_device->wwid,
3242 pcie_device->port_num);
3243 if (pcie_device->enclosure_handle != 0)
3244 starget_printk(KERN_INFO, starget,
3245 "enclosure logical id(0x%016llx), slot(%d)\n",
3246 (unsigned long long)
3247 pcie_device->enclosure_logical_id,
3248 pcie_device->slot);
3249 if (pcie_device->connector_name[0] != '\0')
3250 starget_printk(KERN_INFO, starget,
3251 "enclosure level(0x%04x), connector name( %s)\n",
3252 pcie_device->enclosure_level,
3253 pcie_device->connector_name);
3254 pcie_device_put(pcie_device);
3255 }
3256 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3257
3258 } else {
3259 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3260 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3261 if (sas_device) {
3262 if (priv_target->flags &
3263 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3264 starget_printk(KERN_INFO, starget,
3265 "volume handle(0x%04x), "
3266 "volume wwid(0x%016llx)\n",
3267 sas_device->volume_handle,
3268 (unsigned long long)sas_device->volume_wwid);
3269 }
3270 starget_printk(KERN_INFO, starget,
3271 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3272 sas_device->handle,
3273 (unsigned long long)sas_device->sas_address,
3274 sas_device->phy);
3275
3276 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3277 NULL, starget);
3278
3279 sas_device_put(sas_device);
3280 }
3281 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3282 }
3283}
3284
3285
3286
3287
3288
3289
3290
3291static int
3292scsih_abort(struct scsi_cmnd *scmd)
3293{
3294 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3295 struct MPT3SAS_DEVICE *sas_device_priv_data;
3296 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3297 u16 handle;
3298 int r;
3299
3300 u8 timeout = 30;
3301 struct _pcie_device *pcie_device = NULL;
3302 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3303 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3304 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3305 (scmd->request->timeout / HZ) * 1000);
3306 _scsih_tm_display_info(ioc, scmd);
3307
3308 sas_device_priv_data = scmd->device->hostdata;
3309 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3310 ioc->remove_host) {
3311 sdev_printk(KERN_INFO, scmd->device,
3312 "device been deleted! scmd(0x%p)\n", scmd);
3313 scmd->result = DID_NO_CONNECT << 16;
3314 scmd->scsi_done(scmd);
3315 r = SUCCESS;
3316 goto out;
3317 }
3318
3319
3320 if (st == NULL || st->cb_idx == 0xFF) {
3321 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3322 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3323 scmd->result = DID_RESET << 16;
3324 r = SUCCESS;
3325 goto out;
3326 }
3327
3328
3329 if (sas_device_priv_data->sas_target->flags &
3330 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3331 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3332 scmd->result = DID_RESET << 16;
3333 r = FAILED;
3334 goto out;
3335 }
3336
3337 mpt3sas_halt_firmware(ioc);
3338
3339 handle = sas_device_priv_data->sas_target->handle;
3340 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3341 if (pcie_device && (!ioc->tm_custom_handling) &&
3342 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3343 timeout = ioc->nvme_abort_timeout;
3344 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3345 scmd->device->id, scmd->device->lun,
3346 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3347 st->smid, st->msix_io, timeout, 0);
3348
3349 if (r == SUCCESS && st->cb_idx != 0xFF)
3350 r = FAILED;
3351 out:
3352 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3353 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3354 if (pcie_device)
3355 pcie_device_put(pcie_device);
3356 return r;
3357}
3358
3359
3360
3361
3362
3363
3364
3365static int
3366scsih_dev_reset(struct scsi_cmnd *scmd)
3367{
3368 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3369 struct MPT3SAS_DEVICE *sas_device_priv_data;
3370 struct _sas_device *sas_device = NULL;
3371 struct _pcie_device *pcie_device = NULL;
3372 u16 handle;
3373 u8 tr_method = 0;
3374 u8 tr_timeout = 30;
3375 int r;
3376
3377 struct scsi_target *starget = scmd->device->sdev_target;
3378 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3379
3380 sdev_printk(KERN_INFO, scmd->device,
3381 "attempting device reset! scmd(0x%p)\n", scmd);
3382 _scsih_tm_display_info(ioc, scmd);
3383
3384 sas_device_priv_data = scmd->device->hostdata;
3385 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3386 ioc->remove_host) {
3387 sdev_printk(KERN_INFO, scmd->device,
3388 "device been deleted! scmd(0x%p)\n", scmd);
3389 scmd->result = DID_NO_CONNECT << 16;
3390 scmd->scsi_done(scmd);
3391 r = SUCCESS;
3392 goto out;
3393 }
3394
3395
3396 handle = 0;
3397 if (sas_device_priv_data->sas_target->flags &
3398 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3399 sas_device = mpt3sas_get_sdev_from_target(ioc,
3400 target_priv_data);
3401 if (sas_device)
3402 handle = sas_device->volume_handle;
3403 } else
3404 handle = sas_device_priv_data->sas_target->handle;
3405
3406 if (!handle) {
3407 scmd->result = DID_RESET << 16;
3408 r = FAILED;
3409 goto out;
3410 }
3411
3412 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3413
3414 if (pcie_device && (!ioc->tm_custom_handling) &&
3415 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3416 tr_timeout = pcie_device->reset_timeout;
3417 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3418 } else
3419 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3420
3421 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3422 scmd->device->id, scmd->device->lun,
3423 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3424 tr_timeout, tr_method);
3425
3426 if (r == SUCCESS && scsi_device_busy(scmd->device))
3427 r = FAILED;
3428 out:
3429 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3430 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3431
3432 if (sas_device)
3433 sas_device_put(sas_device);
3434 if (pcie_device)
3435 pcie_device_put(pcie_device);
3436
3437 return r;
3438}
3439
3440
3441
3442
3443
3444
3445
3446static int
3447scsih_target_reset(struct scsi_cmnd *scmd)
3448{
3449 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3450 struct MPT3SAS_DEVICE *sas_device_priv_data;
3451 struct _sas_device *sas_device = NULL;
3452 struct _pcie_device *pcie_device = NULL;
3453 u16 handle;
3454 u8 tr_method = 0;
3455 u8 tr_timeout = 30;
3456 int r;
3457 struct scsi_target *starget = scmd->device->sdev_target;
3458 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3459
3460 starget_printk(KERN_INFO, starget,
3461 "attempting target reset! scmd(0x%p)\n", scmd);
3462 _scsih_tm_display_info(ioc, scmd);
3463
3464 sas_device_priv_data = scmd->device->hostdata;
3465 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3466 ioc->remove_host) {
3467 starget_printk(KERN_INFO, starget,
3468 "target been deleted! scmd(0x%p)\n", scmd);
3469 scmd->result = DID_NO_CONNECT << 16;
3470 scmd->scsi_done(scmd);
3471 r = SUCCESS;
3472 goto out;
3473 }
3474
3475
3476 handle = 0;
3477 if (sas_device_priv_data->sas_target->flags &
3478 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3479 sas_device = mpt3sas_get_sdev_from_target(ioc,
3480 target_priv_data);
3481 if (sas_device)
3482 handle = sas_device->volume_handle;
3483 } else
3484 handle = sas_device_priv_data->sas_target->handle;
3485
3486 if (!handle) {
3487 scmd->result = DID_RESET << 16;
3488 r = FAILED;
3489 goto out;
3490 }
3491
3492 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3493
3494 if (pcie_device && (!ioc->tm_custom_handling) &&
3495 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3496 tr_timeout = pcie_device->reset_timeout;
3497 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3498 } else
3499 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3500 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3501 scmd->device->id, 0,
3502 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3503 tr_timeout, tr_method);
3504
3505 if (r == SUCCESS && atomic_read(&starget->target_busy))
3506 r = FAILED;
3507 out:
3508 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3509 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3510
3511 if (sas_device)
3512 sas_device_put(sas_device);
3513 if (pcie_device)
3514 pcie_device_put(pcie_device);
3515 return r;
3516}
3517
3518
3519
3520
3521
3522
3523
3524
3525static int
3526scsih_host_reset(struct scsi_cmnd *scmd)
3527{
3528 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3529 int r, retval;
3530
3531 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3532 scsi_print_command(scmd);
3533
3534 if (ioc->is_driver_loading || ioc->remove_host) {
3535 ioc_info(ioc, "Blocking the host reset\n");
3536 r = FAILED;
3537 goto out;
3538 }
3539
3540 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3541 r = (retval < 0) ? FAILED : SUCCESS;
3542out:
3543 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3544 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3545
3546 return r;
3547}
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558static void
3559_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3560{
3561 unsigned long flags;
3562
3563 if (ioc->firmware_event_thread == NULL)
3564 return;
3565
3566 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3567 fw_event_work_get(fw_event);
3568 INIT_LIST_HEAD(&fw_event->list);
3569 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3570 INIT_WORK(&fw_event->work, _firmware_event_work);
3571 fw_event_work_get(fw_event);
3572 queue_work(ioc->firmware_event_thread, &fw_event->work);
3573 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3574}
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584static void
3585_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3586 *fw_event)
3587{
3588 unsigned long flags;
3589
3590 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3591 if (!list_empty(&fw_event->list)) {
3592 list_del_init(&fw_event->list);
3593 fw_event_work_put(fw_event);
3594 }
3595 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3596}
3597
3598
3599
3600
3601
3602
3603
3604void
3605mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3606 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3607{
3608 struct fw_event_work *fw_event;
3609 u16 sz;
3610
3611 if (ioc->is_driver_loading)
3612 return;
3613 sz = sizeof(*event_data);
3614 fw_event = alloc_fw_event_work(sz);
3615 if (!fw_event)
3616 return;
3617 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3618 fw_event->ioc = ioc;
3619 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3620 _scsih_fw_event_add(ioc, fw_event);
3621 fw_event_work_put(fw_event);
3622}
3623
3624
3625
3626
3627
3628static void
3629_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3630{
3631 struct fw_event_work *fw_event;
3632
3633 if (ioc->is_driver_loading)
3634 return;
3635 fw_event = alloc_fw_event_work(0);
3636 if (!fw_event)
3637 return;
3638 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3639 fw_event->ioc = ioc;
3640 _scsih_fw_event_add(ioc, fw_event);
3641 fw_event_work_put(fw_event);
3642}
3643
3644
3645
3646
3647
3648void
3649mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3650{
3651 struct fw_event_work *fw_event;
3652
3653 fw_event = alloc_fw_event_work(0);
3654 if (!fw_event)
3655 return;
3656 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3657 fw_event->ioc = ioc;
3658 _scsih_fw_event_add(ioc, fw_event);
3659 fw_event_work_put(fw_event);
3660}
3661
3662static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3663{
3664 unsigned long flags;
3665 struct fw_event_work *fw_event = NULL;
3666
3667 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3668 if (!list_empty(&ioc->fw_event_list)) {
3669 fw_event = list_first_entry(&ioc->fw_event_list,
3670 struct fw_event_work, list);
3671 list_del_init(&fw_event->list);
3672 }
3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3674
3675 return fw_event;
3676}
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687static void
3688_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3689{
3690 struct fw_event_work *fw_event;
3691
3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3693 !ioc->firmware_event_thread)
3694 return;
3695
3696 ioc->fw_events_cleanup = 1;
3697 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3698 (fw_event = ioc->current_event)) {
3699
3700
3701
3702
3703
3704
3705
3706
3707 if (cancel_work_sync(&fw_event->work))
3708 fw_event_work_put(fw_event);
3709
3710 fw_event_work_put(fw_event);
3711 }
3712 ioc->fw_events_cleanup = 0;
3713}
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723static void
3724_scsih_internal_device_block(struct scsi_device *sdev,
3725 struct MPT3SAS_DEVICE *sas_device_priv_data)
3726{
3727 int r = 0;
3728
3729 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3730 sas_device_priv_data->sas_target->handle);
3731 sas_device_priv_data->block = 1;
3732
3733 r = scsi_internal_device_block_nowait(sdev);
3734 if (r == -EINVAL)
3735 sdev_printk(KERN_WARNING, sdev,
3736 "device_block failed with return(%d) for handle(0x%04x)\n",
3737 r, sas_device_priv_data->sas_target->handle);
3738}
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748static void
3749_scsih_internal_device_unblock(struct scsi_device *sdev,
3750 struct MPT3SAS_DEVICE *sas_device_priv_data)
3751{
3752 int r = 0;
3753
3754 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3755 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3756 sas_device_priv_data->block = 0;
3757 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3758 if (r == -EINVAL) {
3759
3760
3761
3762
3763
3764 sdev_printk(KERN_WARNING, sdev,
3765 "device_unblock failed with return(%d) for handle(0x%04x) "
3766 "performing a block followed by an unblock\n",
3767 r, sas_device_priv_data->sas_target->handle);
3768 sas_device_priv_data->block = 1;
3769 r = scsi_internal_device_block_nowait(sdev);
3770 if (r)
3771 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3772 "failed with return(%d) for handle(0x%04x)\n",
3773 r, sas_device_priv_data->sas_target->handle);
3774
3775 sas_device_priv_data->block = 0;
3776 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3777 if (r)
3778 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3779 " failed with return(%d) for handle(0x%04x)\n",
3780 r, sas_device_priv_data->sas_target->handle);
3781 }
3782}
3783
3784
3785
3786
3787
3788
3789
3790static void
3791_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3792{
3793 struct MPT3SAS_DEVICE *sas_device_priv_data;
3794 struct scsi_device *sdev;
3795
3796 shost_for_each_device(sdev, ioc->shost) {
3797 sas_device_priv_data = sdev->hostdata;
3798 if (!sas_device_priv_data)
3799 continue;
3800 if (!sas_device_priv_data->block)
3801 continue;
3802
3803 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3804 "device_running, handle(0x%04x)\n",
3805 sas_device_priv_data->sas_target->handle));
3806 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3807 }
3808}
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819static void
3820_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3821 u64 sas_address, struct hba_port *port)
3822{
3823 struct MPT3SAS_DEVICE *sas_device_priv_data;
3824 struct scsi_device *sdev;
3825
3826 shost_for_each_device(sdev, ioc->shost) {
3827 sas_device_priv_data = sdev->hostdata;
3828 if (!sas_device_priv_data)
3829 continue;
3830 if (sas_device_priv_data->sas_target->sas_address
3831 != sas_address)
3832 continue;
3833 if (sas_device_priv_data->sas_target->port != port)
3834 continue;
3835 if (sas_device_priv_data->block)
3836 _scsih_internal_device_unblock(sdev,
3837 sas_device_priv_data);
3838 }
3839}
3840
3841
3842
3843
3844
3845
3846
3847static void
3848_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3849{
3850 struct MPT3SAS_DEVICE *sas_device_priv_data;
3851 struct scsi_device *sdev;
3852
3853 shost_for_each_device(sdev, ioc->shost) {
3854 sas_device_priv_data = sdev->hostdata;
3855 if (!sas_device_priv_data)
3856 continue;
3857 if (sas_device_priv_data->block)
3858 continue;
3859 if (sas_device_priv_data->ignore_delay_remove) {
3860 sdev_printk(KERN_INFO, sdev,
3861 "%s skip device_block for SES handle(0x%04x)\n",
3862 __func__, sas_device_priv_data->sas_target->handle);
3863 continue;
3864 }
3865 _scsih_internal_device_block(sdev, sas_device_priv_data);
3866 }
3867}
3868
3869
3870
3871
3872
3873
3874
3875
3876static void
3877_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3878{
3879 struct MPT3SAS_DEVICE *sas_device_priv_data;
3880 struct scsi_device *sdev;
3881 struct _sas_device *sas_device;
3882
3883 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3884
3885 shost_for_each_device(sdev, ioc->shost) {
3886 sas_device_priv_data = sdev->hostdata;
3887 if (!sas_device_priv_data)
3888 continue;
3889 if (sas_device_priv_data->sas_target->handle != handle)
3890 continue;
3891 if (sas_device_priv_data->block)
3892 continue;
3893 if (sas_device && sas_device->pend_sas_rphy_add)
3894 continue;
3895 if (sas_device_priv_data->ignore_delay_remove) {
3896 sdev_printk(KERN_INFO, sdev,
3897 "%s skip device_block for SES handle(0x%04x)\n",
3898 __func__, sas_device_priv_data->sas_target->handle);
3899 continue;
3900 }
3901 _scsih_internal_device_block(sdev, sas_device_priv_data);
3902 }
3903
3904 if (sas_device)
3905 sas_device_put(sas_device);
3906}
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917static void
3918_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3919 struct _sas_node *sas_expander)
3920{
3921 struct _sas_port *mpt3sas_port;
3922 struct _sas_device *sas_device;
3923 struct _sas_node *expander_sibling;
3924 unsigned long flags;
3925
3926 if (!sas_expander)
3927 return;
3928
3929 list_for_each_entry(mpt3sas_port,
3930 &sas_expander->sas_port_list, port_list) {
3931 if (mpt3sas_port->remote_identify.device_type ==
3932 SAS_END_DEVICE) {
3933 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3934 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3935 mpt3sas_port->remote_identify.sas_address,
3936 mpt3sas_port->hba_port);
3937 if (sas_device) {
3938 set_bit(sas_device->handle,
3939 ioc->blocking_handles);
3940 sas_device_put(sas_device);
3941 }
3942 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3943 }
3944 }
3945
3946 list_for_each_entry(mpt3sas_port,
3947 &sas_expander->sas_port_list, port_list) {
3948
3949 if (mpt3sas_port->remote_identify.device_type ==
3950 SAS_EDGE_EXPANDER_DEVICE ||
3951 mpt3sas_port->remote_identify.device_type ==
3952 SAS_FANOUT_EXPANDER_DEVICE) {
3953 expander_sibling =
3954 mpt3sas_scsih_expander_find_by_sas_address(
3955 ioc, mpt3sas_port->remote_identify.sas_address,
3956 mpt3sas_port->hba_port);
3957 _scsih_block_io_to_children_attached_to_ex(ioc,
3958 expander_sibling);
3959 }
3960 }
3961}
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971static void
3972_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3973 Mpi2EventDataSasTopologyChangeList_t *event_data)
3974{
3975 int i;
3976 u16 handle;
3977 u16 reason_code;
3978
3979 for (i = 0; i < event_data->NumEntries; i++) {
3980 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3981 if (!handle)
3982 continue;
3983 reason_code = event_data->PHY[i].PhyStatus &
3984 MPI2_EVENT_SAS_TOPO_RC_MASK;
3985 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3986 _scsih_block_io_device(ioc, handle);
3987 }
3988}
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998static void
3999_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4000 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4001{
4002 int i;
4003 u16 handle;
4004 u16 reason_code;
4005
4006 for (i = 0; i < event_data->NumEntries; i++) {
4007 handle =
4008 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4009 if (!handle)
4010 continue;
4011 reason_code = event_data->PortEntry[i].PortStatus;
4012 if (reason_code ==
4013 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4014 _scsih_block_io_device(ioc, handle);
4015 }
4016}
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032static void
4033_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4034{
4035 Mpi2SCSITaskManagementRequest_t *mpi_request;
4036 u16 smid;
4037 struct _sas_device *sas_device = NULL;
4038 struct _pcie_device *pcie_device = NULL;
4039 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4040 u64 sas_address = 0;
4041 unsigned long flags;
4042 struct _tr_list *delayed_tr;
4043 u32 ioc_state;
4044 u8 tr_method = 0;
4045 struct hba_port *port = NULL;
4046
4047 if (ioc->pci_error_recovery) {
4048 dewtprintk(ioc,
4049 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4050 __func__, handle));
4051 return;
4052 }
4053 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4054 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4055 dewtprintk(ioc,
4056 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4057 __func__, handle));
4058 return;
4059 }
4060
4061
4062 if (test_bit(handle, ioc->pd_handles))
4063 return;
4064
4065 clear_bit(handle, ioc->pend_os_device_add);
4066
4067 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4068 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4069 if (sas_device && sas_device->starget &&
4070 sas_device->starget->hostdata) {
4071 sas_target_priv_data = sas_device->starget->hostdata;
4072 sas_target_priv_data->deleted = 1;
4073 sas_address = sas_device->sas_address;
4074 port = sas_device->port;
4075 }
4076 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4077 if (!sas_device) {
4078 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4079 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4080 if (pcie_device && pcie_device->starget &&
4081 pcie_device->starget->hostdata) {
4082 sas_target_priv_data = pcie_device->starget->hostdata;
4083 sas_target_priv_data->deleted = 1;
4084 sas_address = pcie_device->wwid;
4085 }
4086 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4087 if (pcie_device && (!ioc->tm_custom_handling) &&
4088 (!(mpt3sas_scsih_is_pcie_scsi_device(
4089 pcie_device->device_info))))
4090 tr_method =
4091 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4092 else
4093 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4094 }
4095 if (sas_target_priv_data) {
4096 dewtprintk(ioc,
4097 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4098 handle, (u64)sas_address));
4099 if (sas_device) {
4100 if (sas_device->enclosure_handle != 0)
4101 dewtprintk(ioc,
4102 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4103 (u64)sas_device->enclosure_logical_id,
4104 sas_device->slot));
4105 if (sas_device->connector_name[0] != '\0')
4106 dewtprintk(ioc,
4107 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4108 sas_device->enclosure_level,
4109 sas_device->connector_name));
4110 } else if (pcie_device) {
4111 if (pcie_device->enclosure_handle != 0)
4112 dewtprintk(ioc,
4113 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4114 (u64)pcie_device->enclosure_logical_id,
4115 pcie_device->slot));
4116 if (pcie_device->connector_name[0] != '\0')
4117 dewtprintk(ioc,
4118 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4119 pcie_device->enclosure_level,
4120 pcie_device->connector_name));
4121 }
4122 _scsih_ublock_io_device(ioc, sas_address, port);
4123 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4124 }
4125
4126 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4127 if (!smid) {
4128 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4129 if (!delayed_tr)
4130 goto out;
4131 INIT_LIST_HEAD(&delayed_tr->list);
4132 delayed_tr->handle = handle;
4133 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4134 dewtprintk(ioc,
4135 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4136 handle));
4137 goto out;
4138 }
4139
4140 dewtprintk(ioc,
4141 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4142 handle, smid, ioc->tm_tr_cb_idx));
4143 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4144 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4145 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4146 mpi_request->DevHandle = cpu_to_le16(handle);
4147 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4148 mpi_request->MsgFlags = tr_method;
4149 set_bit(handle, ioc->device_remove_in_progress);
4150 ioc->put_smid_hi_priority(ioc, smid, 0);
4151 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4152
4153out:
4154 if (sas_device)
4155 sas_device_put(sas_device);
4156 if (pcie_device)
4157 pcie_device_put(pcie_device);
4158}
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176static u8
4177_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4178 u32 reply)
4179{
4180 u16 handle;
4181 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4182 Mpi2SCSITaskManagementReply_t *mpi_reply =
4183 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4184 Mpi2SasIoUnitControlRequest_t *mpi_request;
4185 u16 smid_sas_ctrl;
4186 u32 ioc_state;
4187 struct _sc_list *delayed_sc;
4188
4189 if (ioc->pci_error_recovery) {
4190 dewtprintk(ioc,
4191 ioc_info(ioc, "%s: host in pci error recovery\n",
4192 __func__));
4193 return 1;
4194 }
4195 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4196 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4197 dewtprintk(ioc,
4198 ioc_info(ioc, "%s: host is not operational\n",
4199 __func__));
4200 return 1;
4201 }
4202 if (unlikely(!mpi_reply)) {
4203 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4204 __FILE__, __LINE__, __func__);
4205 return 1;
4206 }
4207 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4208 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4209 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4210 dewtprintk(ioc,
4211 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4212 handle,
4213 le16_to_cpu(mpi_reply->DevHandle), smid));
4214 return 0;
4215 }
4216
4217 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4218 dewtprintk(ioc,
4219 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4220 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4221 le32_to_cpu(mpi_reply->IOCLogInfo),
4222 le32_to_cpu(mpi_reply->TerminationCount)));
4223
4224 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4225 if (!smid_sas_ctrl) {
4226 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4227 if (!delayed_sc)
4228 return _scsih_check_for_pending_tm(ioc, smid);
4229 INIT_LIST_HEAD(&delayed_sc->list);
4230 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4231 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4232 dewtprintk(ioc,
4233 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4234 handle));
4235 return _scsih_check_for_pending_tm(ioc, smid);
4236 }
4237
4238 dewtprintk(ioc,
4239 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4240 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4241 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4242 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4243 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4244 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4245 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4246 ioc->put_smid_default(ioc, smid_sas_ctrl);
4247
4248 return _scsih_check_for_pending_tm(ioc, smid);
4249}
4250
4251
4252
4253
4254
4255
4256
4257
4258inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4259 struct scsi_cmnd *scmd)
4260{
4261
4262 if (ioc->pci_error_recovery)
4263 return false;
4264
4265 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4266 if (ioc->remove_host)
4267 return false;
4268
4269 return true;
4270 }
4271
4272 if (ioc->remove_host) {
4273
4274 switch (scmd->cmnd[0]) {
4275 case SYNCHRONIZE_CACHE:
4276 case START_STOP:
4277 return true;
4278 default:
4279 return false;
4280 }
4281 }
4282
4283 return true;
4284}
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301static u8
4302_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4303 u8 msix_index, u32 reply)
4304{
4305 Mpi2SasIoUnitControlReply_t *mpi_reply =
4306 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4307
4308 if (likely(mpi_reply)) {
4309 dewtprintk(ioc,
4310 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4311 le16_to_cpu(mpi_reply->DevHandle), smid,
4312 le16_to_cpu(mpi_reply->IOCStatus),
4313 le32_to_cpu(mpi_reply->IOCLogInfo)));
4314 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4315 MPI2_IOCSTATUS_SUCCESS) {
4316 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4317 ioc->device_remove_in_progress);
4318 }
4319 } else {
4320 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4321 __FILE__, __LINE__, __func__);
4322 }
4323 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4324}
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336static void
4337_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4338{
4339 Mpi2SCSITaskManagementRequest_t *mpi_request;
4340 u16 smid;
4341 struct _tr_list *delayed_tr;
4342
4343 if (ioc->pci_error_recovery) {
4344 dewtprintk(ioc,
4345 ioc_info(ioc, "%s: host reset in progress!\n",
4346 __func__));
4347 return;
4348 }
4349
4350 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4351 if (!smid) {
4352 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4353 if (!delayed_tr)
4354 return;
4355 INIT_LIST_HEAD(&delayed_tr->list);
4356 delayed_tr->handle = handle;
4357 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4358 dewtprintk(ioc,
4359 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4360 handle));
4361 return;
4362 }
4363
4364 dewtprintk(ioc,
4365 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4366 handle, smid, ioc->tm_tr_volume_cb_idx));
4367 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4368 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4369 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4370 mpi_request->DevHandle = cpu_to_le16(handle);
4371 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4372 ioc->put_smid_hi_priority(ioc, smid, 0);
4373}
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386static u8
4387_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4388 u8 msix_index, u32 reply)
4389{
4390 u16 handle;
4391 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4392 Mpi2SCSITaskManagementReply_t *mpi_reply =
4393 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4394
4395 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4396 dewtprintk(ioc,
4397 ioc_info(ioc, "%s: host reset in progress!\n",
4398 __func__));
4399 return 1;
4400 }
4401 if (unlikely(!mpi_reply)) {
4402 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4403 __FILE__, __LINE__, __func__);
4404 return 1;
4405 }
4406
4407 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4408 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4409 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4410 dewtprintk(ioc,
4411 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4412 handle, le16_to_cpu(mpi_reply->DevHandle),
4413 smid));
4414 return 0;
4415 }
4416
4417 dewtprintk(ioc,
4418 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4419 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4420 le32_to_cpu(mpi_reply->IOCLogInfo),
4421 le32_to_cpu(mpi_reply->TerminationCount)));
4422
4423 return _scsih_check_for_pending_tm(ioc, smid);
4424}
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435static void
4436_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4437 U32 event_context)
4438{
4439 Mpi2EventAckRequest_t *ack_request;
4440 int i = smid - ioc->internal_smid;
4441 unsigned long flags;
4442
4443
4444
4445
4446
4447 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4448 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4449 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4450
4451 dewtprintk(ioc,
4452 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4453 le16_to_cpu(event), smid, ioc->base_cb_idx));
4454 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4455 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4456 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4457 ack_request->Event = event;
4458 ack_request->EventContext = event_context;
4459 ack_request->VF_ID = 0;
4460 ack_request->VP_ID = 0;
4461 ioc->put_smid_default(ioc, smid);
4462}
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473static void
4474_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4475 u16 smid, u16 handle)
4476{
4477 Mpi2SasIoUnitControlRequest_t *mpi_request;
4478 u32 ioc_state;
4479 int i = smid - ioc->internal_smid;
4480 unsigned long flags;
4481
4482 if (ioc->remove_host) {
4483 dewtprintk(ioc,
4484 ioc_info(ioc, "%s: host has been removed\n",
4485 __func__));
4486 return;
4487 } else if (ioc->pci_error_recovery) {
4488 dewtprintk(ioc,
4489 ioc_info(ioc, "%s: host in pci error recovery\n",
4490 __func__));
4491 return;
4492 }
4493 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4494 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4495 dewtprintk(ioc,
4496 ioc_info(ioc, "%s: host is not operational\n",
4497 __func__));
4498 return;
4499 }
4500
4501
4502
4503
4504
4505 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4506 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4507 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4508
4509 dewtprintk(ioc,
4510 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4511 handle, smid, ioc->tm_sas_control_cb_idx));
4512 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4513 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4514 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4515 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4516 mpi_request->DevHandle = cpu_to_le16(handle);
4517 ioc->put_smid_default(ioc, smid);
4518}
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533u8
4534mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4535{
4536 struct _sc_list *delayed_sc;
4537 struct _event_ack_list *delayed_event_ack;
4538
4539 if (!list_empty(&ioc->delayed_event_ack_list)) {
4540 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4541 struct _event_ack_list, list);
4542 _scsih_issue_delayed_event_ack(ioc, smid,
4543 delayed_event_ack->Event, delayed_event_ack->EventContext);
4544 list_del(&delayed_event_ack->list);
4545 kfree(delayed_event_ack);
4546 return 0;
4547 }
4548
4549 if (!list_empty(&ioc->delayed_sc_list)) {
4550 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4551 struct _sc_list, list);
4552 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4553 delayed_sc->handle);
4554 list_del(&delayed_sc->list);
4555 kfree(delayed_sc);
4556 return 0;
4557 }
4558 return 1;
4559}
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572static u8
4573_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4574{
4575 struct _tr_list *delayed_tr;
4576
4577 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4578 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4579 struct _tr_list, list);
4580 mpt3sas_base_free_smid(ioc, smid);
4581 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4582 list_del(&delayed_tr->list);
4583 kfree(delayed_tr);
4584 return 0;
4585 }
4586
4587 if (!list_empty(&ioc->delayed_tr_list)) {
4588 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4589 struct _tr_list, list);
4590 mpt3sas_base_free_smid(ioc, smid);
4591 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4592 list_del(&delayed_tr->list);
4593 kfree(delayed_tr);
4594 return 0;
4595 }
4596
4597 return 1;
4598}
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611static void
4612_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4613 Mpi2EventDataSasTopologyChangeList_t *event_data)
4614{
4615 struct fw_event_work *fw_event;
4616 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4617 u16 expander_handle;
4618 struct _sas_node *sas_expander;
4619 unsigned long flags;
4620 int i, reason_code;
4621 u16 handle;
4622
4623 for (i = 0 ; i < event_data->NumEntries; i++) {
4624 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4625 if (!handle)
4626 continue;
4627 reason_code = event_data->PHY[i].PhyStatus &
4628 MPI2_EVENT_SAS_TOPO_RC_MASK;
4629 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4630 _scsih_tm_tr_send(ioc, handle);
4631 }
4632
4633 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4634 if (expander_handle < ioc->sas_hba.num_phys) {
4635 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4636 return;
4637 }
4638 if (event_data->ExpStatus ==
4639 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4640
4641 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4642 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4643 expander_handle);
4644 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4645 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4646 do {
4647 handle = find_first_bit(ioc->blocking_handles,
4648 ioc->facts.MaxDevHandle);
4649 if (handle < ioc->facts.MaxDevHandle)
4650 _scsih_block_io_device(ioc, handle);
4651 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4652 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4653 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4654
4655 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4656 return;
4657
4658
4659 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4660 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4661 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4662 fw_event->ignore)
4663 continue;
4664 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4665 fw_event->event_data;
4666 if (local_event_data->ExpStatus ==
4667 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4668 local_event_data->ExpStatus ==
4669 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4670 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4671 expander_handle) {
4672 dewtprintk(ioc,
4673 ioc_info(ioc, "setting ignoring flag\n"));
4674 fw_event->ignore = 1;
4675 }
4676 }
4677 }
4678 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4679}
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692static void
4693_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4694 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4695{
4696 struct fw_event_work *fw_event;
4697 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4698 unsigned long flags;
4699 int i, reason_code;
4700 u16 handle, switch_handle;
4701
4702 for (i = 0; i < event_data->NumEntries; i++) {
4703 handle =
4704 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4705 if (!handle)
4706 continue;
4707 reason_code = event_data->PortEntry[i].PortStatus;
4708 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4709 _scsih_tm_tr_send(ioc, handle);
4710 }
4711
4712 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4713 if (!switch_handle) {
4714 _scsih_block_io_to_pcie_children_attached_directly(
4715 ioc, event_data);
4716 return;
4717 }
4718
4719 if ((event_data->SwitchStatus
4720 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4721 (event_data->SwitchStatus ==
4722 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4723 _scsih_block_io_to_pcie_children_attached_directly(
4724 ioc, event_data);
4725
4726 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4727 return;
4728
4729
4730 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4731 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4732 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4733 fw_event->ignore)
4734 continue;
4735 local_event_data =
4736 (Mpi26EventDataPCIeTopologyChangeList_t *)
4737 fw_event->event_data;
4738 if (local_event_data->SwitchStatus ==
4739 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4740 local_event_data->SwitchStatus ==
4741 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4742 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4743 switch_handle) {
4744 dewtprintk(ioc,
4745 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4746 fw_event->ignore = 1;
4747 }
4748 }
4749 }
4750 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4751}
4752
4753
4754
4755
4756
4757
4758
4759
4760static void
4761_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4762{
4763 struct _raid_device *raid_device;
4764 struct MPT3SAS_TARGET *sas_target_priv_data;
4765 unsigned long flags;
4766
4767 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4768 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4769 if (raid_device && raid_device->starget &&
4770 raid_device->starget->hostdata) {
4771 sas_target_priv_data =
4772 raid_device->starget->hostdata;
4773 sas_target_priv_data->deleted = 1;
4774 dewtprintk(ioc,
4775 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4776 handle, (u64)raid_device->wwid));
4777 }
4778 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4779}
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791static void
4792_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4793{
4794 if (!handle || handle == *a || handle == *b)
4795 return;
4796 if (!*a)
4797 *a = handle;
4798 else if (!*b)
4799 *b = handle;
4800}
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814static void
4815_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4816 Mpi2EventDataIrConfigChangeList_t *event_data)
4817{
4818 Mpi2EventIrConfigElement_t *element;
4819 int i;
4820 u16 handle, volume_handle, a, b;
4821 struct _tr_list *delayed_tr;
4822
4823 a = 0;
4824 b = 0;
4825
4826 if (ioc->is_warpdrive)
4827 return;
4828
4829
4830 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4831 for (i = 0; i < event_data->NumElements; i++, element++) {
4832 if (le32_to_cpu(event_data->Flags) &
4833 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4834 continue;
4835 if (element->ReasonCode ==
4836 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4837 element->ReasonCode ==
4838 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4839 volume_handle = le16_to_cpu(element->VolDevHandle);
4840 _scsih_set_volume_delete_flag(ioc, volume_handle);
4841 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4842 }
4843 }
4844
4845
4846 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4847 for (i = 0; i < event_data->NumElements; i++, element++) {
4848 if (le32_to_cpu(event_data->Flags) &
4849 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4850 continue;
4851 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4852 volume_handle = le16_to_cpu(element->VolDevHandle);
4853 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4854 }
4855 }
4856
4857 if (a)
4858 _scsih_tm_tr_volume_send(ioc, a);
4859 if (b)
4860 _scsih_tm_tr_volume_send(ioc, b);
4861
4862
4863 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4864 for (i = 0; i < event_data->NumElements; i++, element++) {
4865 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4866 continue;
4867 handle = le16_to_cpu(element->PhysDiskDevHandle);
4868 volume_handle = le16_to_cpu(element->VolDevHandle);
4869 clear_bit(handle, ioc->pd_handles);
4870 if (!volume_handle)
4871 _scsih_tm_tr_send(ioc, handle);
4872 else if (volume_handle == a || volume_handle == b) {
4873 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4874 BUG_ON(!delayed_tr);
4875 INIT_LIST_HEAD(&delayed_tr->list);
4876 delayed_tr->handle = handle;
4877 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4878 dewtprintk(ioc,
4879 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4880 handle));
4881 } else
4882 _scsih_tm_tr_send(ioc, handle);
4883 }
4884}
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897static void
4898_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4899 Mpi2EventDataIrVolume_t *event_data)
4900{
4901 u32 state;
4902
4903 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4904 return;
4905 state = le32_to_cpu(event_data->NewValue);
4906 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4907 MPI2_RAID_VOL_STATE_FAILED)
4908 _scsih_set_volume_delete_flag(ioc,
4909 le16_to_cpu(event_data->VolDevHandle));
4910}
4911
4912
4913
4914
4915
4916
4917
4918static void
4919_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4920 Mpi2EventDataTemperature_t *event_data)
4921{
4922 u32 doorbell;
4923 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4924 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4925 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4926 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4927 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4928 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4929 event_data->SensorNum);
4930 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4931 event_data->CurrentTemperature);
4932 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4933 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4934 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4935 MPI2_IOC_STATE_FAULT) {
4936 mpt3sas_print_fault_code(ioc,
4937 doorbell & MPI2_DOORBELL_DATA_MASK);
4938 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4939 MPI2_IOC_STATE_COREDUMP) {
4940 mpt3sas_print_coredump_info(ioc,
4941 doorbell & MPI2_DOORBELL_DATA_MASK);
4942 }
4943 }
4944 }
4945}
4946
4947static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4948{
4949 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4950
4951 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4952 return 0;
4953
4954 if (pending)
4955 return test_and_set_bit(0, &priv->ata_command_pending);
4956
4957 clear_bit(0, &priv->ata_command_pending);
4958 return 0;
4959}
4960
4961
4962
4963
4964
4965
4966
4967
4968static void
4969_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4970{
4971 struct scsi_cmnd *scmd;
4972 struct scsiio_tracker *st;
4973 u16 smid;
4974 int count = 0;
4975
4976 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4977 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4978 if (!scmd)
4979 continue;
4980 count++;
4981 _scsih_set_satl_pending(scmd, false);
4982 st = scsi_cmd_priv(scmd);
4983 mpt3sas_base_clear_st(ioc, st);
4984 scsi_dma_unmap(scmd);
4985 if (ioc->pci_error_recovery || ioc->remove_host)
4986 scmd->result = DID_NO_CONNECT << 16;
4987 else
4988 scmd->result = DID_RESET << 16;
4989 scmd->scsi_done(scmd);
4990 }
4991 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4992}
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002static void
5003_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5004 Mpi25SCSIIORequest_t *mpi_request)
5005{
5006 u16 eedp_flags;
5007 unsigned char prot_op = scsi_get_prot_op(scmd);
5008 unsigned char prot_type = scsi_get_prot_type(scmd);
5009 Mpi25SCSIIORequest_t *mpi_request_3v =
5010 (Mpi25SCSIIORequest_t *)mpi_request;
5011
5012 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
5013 return;
5014
5015 if (prot_op == SCSI_PROT_READ_STRIP)
5016 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5017 else if (prot_op == SCSI_PROT_WRITE_INSERT)
5018 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5019 else
5020 return;
5021
5022 switch (prot_type) {
5023 case SCSI_PROT_DIF_TYPE1:
5024 case SCSI_PROT_DIF_TYPE2:
5025
5026
5027
5028
5029
5030 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
5031 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
5032 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5033 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5034 cpu_to_be32(t10_pi_ref_tag(scmd->request));
5035 break;
5036
5037 case SCSI_PROT_DIF_TYPE3:
5038
5039
5040
5041
5042 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5043
5044 break;
5045 }
5046
5047 mpi_request_3v->EEDPBlockSize =
5048 cpu_to_le16(scmd->device->sector_size);
5049
5050 if (ioc->is_gen35_ioc)
5051 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5052 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5053}
5054
5055
5056
5057
5058
5059
5060static void
5061_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5062{
5063 u8 ascq;
5064
5065 switch (ioc_status) {
5066 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5067 ascq = 0x01;
5068 break;
5069 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5070 ascq = 0x02;
5071 break;
5072 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5073 ascq = 0x03;
5074 break;
5075 default:
5076 ascq = 0x00;
5077 break;
5078 }
5079 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
5080 ascq);
5081 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
5082 SAM_STAT_CHECK_CONDITION;
5083}
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096static int
5097scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5098{
5099 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5100 struct MPT3SAS_DEVICE *sas_device_priv_data;
5101 struct MPT3SAS_TARGET *sas_target_priv_data;
5102 struct _raid_device *raid_device;
5103 struct request *rq = scmd->request;
5104 int class;
5105 Mpi25SCSIIORequest_t *mpi_request;
5106 struct _pcie_device *pcie_device = NULL;
5107 u32 mpi_control;
5108 u16 smid;
5109 u16 handle;
5110
5111 if (ioc->logging_level & MPT_DEBUG_SCSI)
5112 scsi_print_command(scmd);
5113
5114 sas_device_priv_data = scmd->device->hostdata;
5115 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5116 scmd->result = DID_NO_CONNECT << 16;
5117 scmd->scsi_done(scmd);
5118 return 0;
5119 }
5120
5121 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5122 scmd->result = DID_NO_CONNECT << 16;
5123 scmd->scsi_done(scmd);
5124 return 0;
5125 }
5126
5127 sas_target_priv_data = sas_device_priv_data->sas_target;
5128
5129
5130 handle = sas_target_priv_data->handle;
5131 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5132 scmd->result = DID_NO_CONNECT << 16;
5133 scmd->scsi_done(scmd);
5134 return 0;
5135 }
5136
5137
5138 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5139
5140 return SCSI_MLQUEUE_HOST_BUSY;
5141 } else if (sas_target_priv_data->deleted) {
5142
5143 scmd->result = DID_NO_CONNECT << 16;
5144 scmd->scsi_done(scmd);
5145 return 0;
5146 } else if (sas_target_priv_data->tm_busy ||
5147 sas_device_priv_data->block) {
5148
5149 return SCSI_MLQUEUE_DEVICE_BUSY;
5150 }
5151
5152
5153
5154
5155
5156
5157 do {
5158 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5159 return SCSI_MLQUEUE_DEVICE_BUSY;
5160 } while (_scsih_set_satl_pending(scmd, true));
5161
5162 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5163 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5164 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5165 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5166 else
5167 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5168
5169
5170 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5171
5172 if (sas_device_priv_data->ncq_prio_enable) {
5173 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5174 if (class == IOPRIO_CLASS_RT)
5175 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5176 }
5177
5178
5179
5180 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5181 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5182 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5183 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5184
5185 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5186 if (!smid) {
5187 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5188 _scsih_set_satl_pending(scmd, false);
5189 goto out;
5190 }
5191 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5192 memset(mpi_request, 0, ioc->request_sz);
5193 _scsih_setup_eedp(ioc, scmd, mpi_request);
5194
5195 if (scmd->cmd_len == 32)
5196 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5197 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5198 if (sas_device_priv_data->sas_target->flags &
5199 MPT_TARGET_FLAGS_RAID_COMPONENT)
5200 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5201 else
5202 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5203 mpi_request->DevHandle = cpu_to_le16(handle);
5204 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5205 mpi_request->Control = cpu_to_le32(mpi_control);
5206 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5207 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5208 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5209 mpi_request->SenseBufferLowAddress =
5210 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5211 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5212 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5213 mpi_request->LUN);
5214 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5215
5216 if (mpi_request->DataLength) {
5217 pcie_device = sas_target_priv_data->pcie_dev;
5218 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5219 mpt3sas_base_free_smid(ioc, smid);
5220 _scsih_set_satl_pending(scmd, false);
5221 goto out;
5222 }
5223 } else
5224 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5225
5226 raid_device = sas_target_priv_data->raid_device;
5227 if (raid_device && raid_device->direct_io_enabled)
5228 mpt3sas_setup_direct_io(ioc, scmd,
5229 raid_device, mpi_request);
5230
5231 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5232 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5233 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5234 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5235 ioc->put_smid_fast_path(ioc, smid, handle);
5236 } else
5237 ioc->put_smid_scsi_io(ioc, smid,
5238 le16_to_cpu(mpi_request->DevHandle));
5239 } else
5240 ioc->put_smid_default(ioc, smid);
5241 return 0;
5242
5243 out:
5244 return SCSI_MLQUEUE_HOST_BUSY;
5245}
5246
5247
5248
5249
5250
5251
5252static void
5253_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5254{
5255 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5256
5257 data->skey = sense_buffer[1] & 0x0F;
5258 data->asc = sense_buffer[2];
5259 data->ascq = sense_buffer[3];
5260 } else {
5261
5262 data->skey = sense_buffer[2] & 0x0F;
5263 data->asc = sense_buffer[12];
5264 data->ascq = sense_buffer[13];
5265 }
5266}
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279static void
5280_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5281 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5282{
5283 u32 response_info;
5284 u8 *response_bytes;
5285 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5286 MPI2_IOCSTATUS_MASK;
5287 u8 scsi_state = mpi_reply->SCSIState;
5288 u8 scsi_status = mpi_reply->SCSIStatus;
5289 char *desc_ioc_state = NULL;
5290 char *desc_scsi_status = NULL;
5291 char *desc_scsi_state = ioc->tmp_string;
5292 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5293 struct _sas_device *sas_device = NULL;
5294 struct _pcie_device *pcie_device = NULL;
5295 struct scsi_target *starget = scmd->device->sdev_target;
5296 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5297 char *device_str = NULL;
5298
5299 if (!priv_target)
5300 return;
5301 if (ioc->hide_ir_msg)
5302 device_str = "WarpDrive";
5303 else
5304 device_str = "volume";
5305
5306 if (log_info == 0x31170000)
5307 return;
5308
5309 switch (ioc_status) {
5310 case MPI2_IOCSTATUS_SUCCESS:
5311 desc_ioc_state = "success";
5312 break;
5313 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5314 desc_ioc_state = "invalid function";
5315 break;
5316 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5317 desc_ioc_state = "scsi recovered error";
5318 break;
5319 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5320 desc_ioc_state = "scsi invalid dev handle";
5321 break;
5322 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5323 desc_ioc_state = "scsi device not there";
5324 break;
5325 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5326 desc_ioc_state = "scsi data overrun";
5327 break;
5328 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5329 desc_ioc_state = "scsi data underrun";
5330 break;
5331 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5332 desc_ioc_state = "scsi io data error";
5333 break;
5334 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5335 desc_ioc_state = "scsi protocol error";
5336 break;
5337 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5338 desc_ioc_state = "scsi task terminated";
5339 break;
5340 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5341 desc_ioc_state = "scsi residual mismatch";
5342 break;
5343 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5344 desc_ioc_state = "scsi task mgmt failed";
5345 break;
5346 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5347 desc_ioc_state = "scsi ioc terminated";
5348 break;
5349 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5350 desc_ioc_state = "scsi ext terminated";
5351 break;
5352 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5353 desc_ioc_state = "eedp guard error";
5354 break;
5355 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5356 desc_ioc_state = "eedp ref tag error";
5357 break;
5358 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5359 desc_ioc_state = "eedp app tag error";
5360 break;
5361 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5362 desc_ioc_state = "insufficient power";
5363 break;
5364 default:
5365 desc_ioc_state = "unknown";
5366 break;
5367 }
5368
5369 switch (scsi_status) {
5370 case MPI2_SCSI_STATUS_GOOD:
5371 desc_scsi_status = "good";
5372 break;
5373 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5374 desc_scsi_status = "check condition";
5375 break;
5376 case MPI2_SCSI_STATUS_CONDITION_MET:
5377 desc_scsi_status = "condition met";
5378 break;
5379 case MPI2_SCSI_STATUS_BUSY:
5380 desc_scsi_status = "busy";
5381 break;
5382 case MPI2_SCSI_STATUS_INTERMEDIATE:
5383 desc_scsi_status = "intermediate";
5384 break;
5385 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5386 desc_scsi_status = "intermediate condmet";
5387 break;
5388 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5389 desc_scsi_status = "reservation conflict";
5390 break;
5391 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5392 desc_scsi_status = "command terminated";
5393 break;
5394 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5395 desc_scsi_status = "task set full";
5396 break;
5397 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5398 desc_scsi_status = "aca active";
5399 break;
5400 case MPI2_SCSI_STATUS_TASK_ABORTED:
5401 desc_scsi_status = "task aborted";
5402 break;
5403 default:
5404 desc_scsi_status = "unknown";
5405 break;
5406 }
5407
5408 desc_scsi_state[0] = '\0';
5409 if (!scsi_state)
5410 desc_scsi_state = " ";
5411 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5412 strcat(desc_scsi_state, "response info ");
5413 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5414 strcat(desc_scsi_state, "state terminated ");
5415 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5416 strcat(desc_scsi_state, "no status ");
5417 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5418 strcat(desc_scsi_state, "autosense failed ");
5419 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5420 strcat(desc_scsi_state, "autosense valid ");
5421
5422 scsi_print_command(scmd);
5423
5424 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5425 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5426 device_str, (u64)priv_target->sas_address);
5427 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5428 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5429 if (pcie_device) {
5430 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5431 (u64)pcie_device->wwid, pcie_device->port_num);
5432 if (pcie_device->enclosure_handle != 0)
5433 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5434 (u64)pcie_device->enclosure_logical_id,
5435 pcie_device->slot);
5436 if (pcie_device->connector_name[0])
5437 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5438 pcie_device->enclosure_level,
5439 pcie_device->connector_name);
5440 pcie_device_put(pcie_device);
5441 }
5442 } else {
5443 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5444 if (sas_device) {
5445 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5446 (u64)sas_device->sas_address, sas_device->phy);
5447
5448 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5449 NULL, NULL);
5450
5451 sas_device_put(sas_device);
5452 }
5453 }
5454
5455 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5456 le16_to_cpu(mpi_reply->DevHandle),
5457 desc_ioc_state, ioc_status, smid);
5458 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5459 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5460 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5461 le16_to_cpu(mpi_reply->TaskTag),
5462 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5463 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5464 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5465
5466 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5467 struct sense_info data;
5468 _scsih_normalize_sense(scmd->sense_buffer, &data);
5469 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5470 data.skey, data.asc, data.ascq,
5471 le32_to_cpu(mpi_reply->SenseCount));
5472 }
5473 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5474 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5475 response_bytes = (u8 *)&response_info;
5476 _scsih_response_code(ioc, response_bytes[0]);
5477 }
5478}
5479
5480
5481
5482
5483
5484
5485
5486static void
5487_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5488{
5489 Mpi2SepReply_t mpi_reply;
5490 Mpi2SepRequest_t mpi_request;
5491 struct _sas_device *sas_device;
5492
5493 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5494 if (!sas_device)
5495 return;
5496
5497 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5498 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5499 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5500 mpi_request.SlotStatus =
5501 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5502 mpi_request.DevHandle = cpu_to_le16(handle);
5503 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5504 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5505 &mpi_request)) != 0) {
5506 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5507 __FILE__, __LINE__, __func__);
5508 goto out;
5509 }
5510 sas_device->pfa_led_on = 1;
5511
5512 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5513 dewtprintk(ioc,
5514 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5515 le16_to_cpu(mpi_reply.IOCStatus),
5516 le32_to_cpu(mpi_reply.IOCLogInfo)));
5517 goto out;
5518 }
5519out:
5520 sas_device_put(sas_device);
5521}
5522
5523
5524
5525
5526
5527
5528
5529static void
5530_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5531 struct _sas_device *sas_device)
5532{
5533 Mpi2SepReply_t mpi_reply;
5534 Mpi2SepRequest_t mpi_request;
5535
5536 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5537 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5538 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5539 mpi_request.SlotStatus = 0;
5540 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5541 mpi_request.DevHandle = 0;
5542 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5543 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5544 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5545 &mpi_request)) != 0) {
5546 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5547 __FILE__, __LINE__, __func__);
5548 return;
5549 }
5550
5551 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5552 dewtprintk(ioc,
5553 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5554 le16_to_cpu(mpi_reply.IOCStatus),
5555 le32_to_cpu(mpi_reply.IOCLogInfo)));
5556 return;
5557 }
5558}
5559
5560
5561
5562
5563
5564
5565
5566static void
5567_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5568{
5569 struct fw_event_work *fw_event;
5570
5571 fw_event = alloc_fw_event_work(0);
5572 if (!fw_event)
5573 return;
5574 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5575 fw_event->device_handle = handle;
5576 fw_event->ioc = ioc;
5577 _scsih_fw_event_add(ioc, fw_event);
5578 fw_event_work_put(fw_event);
5579}
5580
5581
5582
5583
5584
5585
5586
5587static void
5588_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5589{
5590 struct scsi_target *starget;
5591 struct MPT3SAS_TARGET *sas_target_priv_data;
5592 Mpi2EventNotificationReply_t *event_reply;
5593 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5594 struct _sas_device *sas_device;
5595 ssize_t sz;
5596 unsigned long flags;
5597
5598
5599 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5600 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5601 if (!sas_device)
5602 goto out_unlock;
5603
5604 starget = sas_device->starget;
5605 sas_target_priv_data = starget->hostdata;
5606
5607 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5608 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5609 goto out_unlock;
5610
5611 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5612
5613 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5614
5615 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5616 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5617
5618
5619 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5620 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5621 event_reply = kzalloc(sz, GFP_ATOMIC);
5622 if (!event_reply) {
5623 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5624 __FILE__, __LINE__, __func__);
5625 goto out;
5626 }
5627
5628 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5629 event_reply->Event =
5630 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5631 event_reply->MsgLength = sz/4;
5632 event_reply->EventDataLength =
5633 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5634 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5635 event_reply->EventData;
5636 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5637 event_data->ASC = 0x5D;
5638 event_data->DevHandle = cpu_to_le16(handle);
5639 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5640 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5641 kfree(event_reply);
5642out:
5643 if (sas_device)
5644 sas_device_put(sas_device);
5645 return;
5646
5647out_unlock:
5648 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5649 goto out;
5650}
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664static u8
5665_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5666{
5667 Mpi25SCSIIORequest_t *mpi_request;
5668 Mpi2SCSIIOReply_t *mpi_reply;
5669 struct scsi_cmnd *scmd;
5670 struct scsiio_tracker *st;
5671 u16 ioc_status;
5672 u32 xfer_cnt;
5673 u8 scsi_state;
5674 u8 scsi_status;
5675 u32 log_info;
5676 struct MPT3SAS_DEVICE *sas_device_priv_data;
5677 u32 response_code = 0;
5678
5679 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5680
5681 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5682 if (scmd == NULL)
5683 return 1;
5684
5685 _scsih_set_satl_pending(scmd, false);
5686
5687 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5688
5689 if (mpi_reply == NULL) {
5690 scmd->result = DID_OK << 16;
5691 goto out;
5692 }
5693
5694 sas_device_priv_data = scmd->device->hostdata;
5695 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5696 sas_device_priv_data->sas_target->deleted) {
5697 scmd->result = DID_NO_CONNECT << 16;
5698 goto out;
5699 }
5700 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5701
5702
5703
5704
5705
5706 st = scsi_cmd_priv(scmd);
5707 if (st->direct_io &&
5708 ((ioc_status & MPI2_IOCSTATUS_MASK)
5709 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5710 st->direct_io = 0;
5711 st->scmd = scmd;
5712 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5713 mpi_request->DevHandle =
5714 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5715 ioc->put_smid_scsi_io(ioc, smid,
5716 sas_device_priv_data->sas_target->handle);
5717 return 0;
5718 }
5719
5720 scsi_state = mpi_reply->SCSIState;
5721 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5722 response_code =
5723 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5724 if (!sas_device_priv_data->tlr_snoop_check) {
5725 sas_device_priv_data->tlr_snoop_check++;
5726 if ((!ioc->is_warpdrive &&
5727 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5728 !scsih_is_nvme(&scmd->device->sdev_gendev))
5729 && sas_is_tlr_enabled(scmd->device) &&
5730 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5731 sas_disable_tlr(scmd->device);
5732 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5733 }
5734 }
5735
5736 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5737 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5738 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5739 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5740 else
5741 log_info = 0;
5742 ioc_status &= MPI2_IOCSTATUS_MASK;
5743 scsi_status = mpi_reply->SCSIStatus;
5744
5745 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5746 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5747 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5748 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5749 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5750 }
5751
5752 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5753 struct sense_info data;
5754 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5755 smid);
5756 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5757 le32_to_cpu(mpi_reply->SenseCount));
5758 memcpy(scmd->sense_buffer, sense_data, sz);
5759 _scsih_normalize_sense(scmd->sense_buffer, &data);
5760
5761 if (data.asc == 0x5D)
5762 _scsih_smart_predicted_fault(ioc,
5763 le16_to_cpu(mpi_reply->DevHandle));
5764 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5765
5766 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5767 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5768 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5769 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5770 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5771 }
5772 switch (ioc_status) {
5773 case MPI2_IOCSTATUS_BUSY:
5774 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5775 scmd->result = SAM_STAT_BUSY;
5776 break;
5777
5778 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5779 scmd->result = DID_NO_CONNECT << 16;
5780 break;
5781
5782 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5783 if (sas_device_priv_data->block) {
5784 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5785 goto out;
5786 }
5787 if (log_info == 0x31110630) {
5788 if (scmd->retries > 2) {
5789 scmd->result = DID_NO_CONNECT << 16;
5790 scsi_device_set_state(scmd->device,
5791 SDEV_OFFLINE);
5792 } else {
5793 scmd->result = DID_SOFT_ERROR << 16;
5794 scmd->device->expecting_cc_ua = 1;
5795 }
5796 break;
5797 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5798 scmd->result = DID_RESET << 16;
5799 break;
5800 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5801 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5802 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5803 scmd->result = DID_RESET << 16;
5804 break;
5805 }
5806 scmd->result = DID_SOFT_ERROR << 16;
5807 break;
5808 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5809 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5810 scmd->result = DID_RESET << 16;
5811 break;
5812
5813 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5814 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5815 scmd->result = DID_SOFT_ERROR << 16;
5816 else
5817 scmd->result = (DID_OK << 16) | scsi_status;
5818 break;
5819
5820 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5821 scmd->result = (DID_OK << 16) | scsi_status;
5822
5823 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5824 break;
5825
5826 if (xfer_cnt < scmd->underflow) {
5827 if (scsi_status == SAM_STAT_BUSY)
5828 scmd->result = SAM_STAT_BUSY;
5829 else
5830 scmd->result = DID_SOFT_ERROR << 16;
5831 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5832 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5833 scmd->result = DID_SOFT_ERROR << 16;
5834 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5835 scmd->result = DID_RESET << 16;
5836 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5837 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5838 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5839 scmd->result = (DRIVER_SENSE << 24) |
5840 SAM_STAT_CHECK_CONDITION;
5841 scmd->sense_buffer[0] = 0x70;
5842 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5843 scmd->sense_buffer[12] = 0x20;
5844 scmd->sense_buffer[13] = 0;
5845 }
5846 break;
5847
5848 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5849 scsi_set_resid(scmd, 0);
5850 fallthrough;
5851 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5852 case MPI2_IOCSTATUS_SUCCESS:
5853 scmd->result = (DID_OK << 16) | scsi_status;
5854 if (response_code ==
5855 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5856 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5857 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5858 scmd->result = DID_SOFT_ERROR << 16;
5859 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5860 scmd->result = DID_RESET << 16;
5861 break;
5862
5863 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5864 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5865 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5866 _scsih_eedp_error_handling(scmd, ioc_status);
5867 break;
5868
5869 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5870 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5871 case MPI2_IOCSTATUS_INVALID_SGL:
5872 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5873 case MPI2_IOCSTATUS_INVALID_FIELD:
5874 case MPI2_IOCSTATUS_INVALID_STATE:
5875 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5876 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5877 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5878 default:
5879 scmd->result = DID_SOFT_ERROR << 16;
5880 break;
5881
5882 }
5883
5884 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5885 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5886
5887 out:
5888
5889 scsi_dma_unmap(scmd);
5890 mpt3sas_base_free_smid(ioc, smid);
5891 scmd->scsi_done(scmd);
5892 return 0;
5893}
5894
5895
5896
5897
5898
5899
5900
5901
5902static void
5903_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5904{
5905 u16 sz, ioc_status;
5906 int i;
5907 Mpi2ConfigReply_t mpi_reply;
5908 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5909 u16 attached_handle;
5910 u64 attached_sas_addr;
5911 u8 found = 0, port_id;
5912 Mpi2SasPhyPage0_t phy_pg0;
5913 struct hba_port *port, *port_next, *mport;
5914 struct virtual_phy *vphy, *vphy_next;
5915 struct _sas_device *sas_device;
5916
5917
5918
5919
5920 list_for_each_entry_safe(port, port_next,
5921 &ioc->port_table_list, list) {
5922 if (!port->vphys_mask)
5923 continue;
5924 list_for_each_entry_safe(vphy, vphy_next,
5925 &port->vphys_list, list) {
5926 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5927 }
5928 }
5929
5930
5931
5932
5933 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5934 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5935 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5936 if (!sas_iounit_pg0) {
5937 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5938 __FILE__, __LINE__, __func__);
5939 return;
5940 }
5941 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5942 sas_iounit_pg0, sz)) != 0)
5943 goto out;
5944 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5945 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5946 goto out;
5947
5948
5949
5950 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5951
5952
5953
5954 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5955 MPI2_SAS_NEG_LINK_RATE_1_5)
5956 continue;
5957
5958
5959
5960
5961
5962
5963
5964 if (!(le32_to_cpu(
5965 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5966 MPI2_SAS_DEVICE_INFO_SEP))
5967 continue;
5968
5969 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5970 i))) {
5971 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5972 __FILE__, __LINE__, __func__);
5973 continue;
5974 }
5975
5976 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
5977 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
5978 continue;
5979
5980
5981
5982 attached_handle = le16_to_cpu(
5983 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
5984 if (_scsih_get_sas_address(ioc, attached_handle,
5985 &attached_sas_addr) != 0) {
5986 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5987 __FILE__, __LINE__, __func__);
5988 continue;
5989 }
5990
5991 found = 0;
5992 port = port_next = NULL;
5993
5994
5995
5996
5997 list_for_each_entry_safe(port,
5998 port_next, &ioc->port_table_list, list) {
5999 if (!port->vphys_mask)
6000 continue;
6001 list_for_each_entry_safe(vphy, vphy_next,
6002 &port->vphys_list, list) {
6003
6004
6005
6006
6007 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6008 continue;
6009
6010
6011
6012
6013
6014
6015 if (vphy->sas_address != attached_sas_addr)
6016 continue;
6017
6018
6019
6020
6021 if (!(vphy->phy_mask & (1 << i)))
6022 vphy->phy_mask = (1 << i);
6023
6024
6025
6026
6027
6028
6029
6030 port_id = sas_iounit_pg0->PhyData[i].Port;
6031 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6032 if (!mport) {
6033 mport = kzalloc(
6034 sizeof(struct hba_port), GFP_KERNEL);
6035 if (!mport)
6036 break;
6037 mport->port_id = port_id;
6038 ioc_info(ioc,
6039 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6040 __func__, mport, mport->port_id);
6041 list_add_tail(&mport->list,
6042 &ioc->port_table_list);
6043 }
6044
6045
6046
6047
6048
6049
6050
6051 if (port != mport) {
6052 if (!mport->vphys_mask)
6053 INIT_LIST_HEAD(
6054 &mport->vphys_list);
6055 mport->vphys_mask |= (1 << i);
6056 port->vphys_mask &= ~(1 << i);
6057 list_move(&vphy->list,
6058 &mport->vphys_list);
6059 sas_device = mpt3sas_get_sdev_by_addr(
6060 ioc, attached_sas_addr, port);
6061 if (sas_device)
6062 sas_device->port = mport;
6063 }
6064
6065
6066
6067
6068
6069
6070
6071
6072 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6073 mport->sas_address = 0;
6074 mport->phy_mask = 0;
6075 mport->flags &=
6076 ~HBA_PORT_FLAG_DIRTY_PORT;
6077 }
6078
6079
6080
6081 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6082 found = 1;
6083 break;
6084 }
6085 if (found)
6086 break;
6087 }
6088 }
6089out:
6090 kfree(sas_iounit_pg0);
6091}
6092
6093
6094
6095
6096
6097
6098
6099
6100static int
6101_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6102 struct hba_port *port_table)
6103{
6104 u16 sz, ioc_status;
6105 int i, j;
6106 Mpi2ConfigReply_t mpi_reply;
6107 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6108 u16 attached_handle;
6109 u64 attached_sas_addr;
6110 u8 found = 0, port_count = 0, port_id;
6111
6112 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6113 * sizeof(Mpi2SasIOUnit0PhyData_t));
6114 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6115 if (!sas_iounit_pg0) {
6116 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6117 __FILE__, __LINE__, __func__);
6118 return port_count;
6119 }
6120
6121 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6122 sas_iounit_pg0, sz)) != 0)
6123 goto out;
6124 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6125 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6126 goto out;
6127 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6128 found = 0;
6129 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6130 MPI2_SAS_NEG_LINK_RATE_1_5)
6131 continue;
6132 attached_handle =
6133 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6134 if (_scsih_get_sas_address(
6135 ioc, attached_handle, &attached_sas_addr) != 0) {
6136 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6137 __FILE__, __LINE__, __func__);
6138 continue;
6139 }
6140
6141 for (j = 0; j < port_count; j++) {
6142 port_id = sas_iounit_pg0->PhyData[i].Port;
6143 if (port_table[j].port_id == port_id &&
6144 port_table[j].sas_address == attached_sas_addr) {
6145 port_table[j].phy_mask |= (1 << i);
6146 found = 1;
6147 break;
6148 }
6149 }
6150
6151 if (found)
6152 continue;
6153
6154 port_id = sas_iounit_pg0->PhyData[i].Port;
6155 port_table[port_count].port_id = port_id;
6156 port_table[port_count].phy_mask = (1 << i);
6157 port_table[port_count].sas_address = attached_sas_addr;
6158 port_count++;
6159 }
6160out:
6161 kfree(sas_iounit_pg0);
6162 return port_count;
6163}
6164
6165enum hba_port_matched_codes {
6166 NOT_MATCHED = 0,
6167 MATCHED_WITH_ADDR_AND_PHYMASK,
6168 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6169 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6170 MATCHED_WITH_ADDR,
6171};
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182
6183
6184static enum hba_port_matched_codes
6185_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6186 struct hba_port *port_entry,
6187 struct hba_port **matched_port_entry, int *count)
6188{
6189 struct hba_port *port_table_entry, *matched_port = NULL;
6190 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6191 int lcount = 0;
6192 *matched_port_entry = NULL;
6193
6194 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6195 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6196 continue;
6197
6198 if ((port_table_entry->sas_address == port_entry->sas_address)
6199 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6200 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6201 matched_port = port_table_entry;
6202 break;
6203 }
6204
6205 if ((port_table_entry->sas_address == port_entry->sas_address)
6206 && (port_table_entry->phy_mask & port_entry->phy_mask)
6207 && (port_table_entry->port_id == port_entry->port_id)) {
6208 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6209 matched_port = port_table_entry;
6210 continue;
6211 }
6212
6213 if ((port_table_entry->sas_address == port_entry->sas_address)
6214 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6215 if (matched_code ==
6216 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6217 continue;
6218 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6219 matched_port = port_table_entry;
6220 continue;
6221 }
6222
6223 if (port_table_entry->sas_address == port_entry->sas_address) {
6224 if (matched_code ==
6225 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6226 continue;
6227 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6228 continue;
6229 matched_code = MATCHED_WITH_ADDR;
6230 matched_port = port_table_entry;
6231 lcount++;
6232 }
6233 }
6234
6235 *matched_port_entry = matched_port;
6236 if (matched_code == MATCHED_WITH_ADDR)
6237 *count = lcount;
6238 return matched_code;
6239}
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251static void
6252_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6253 struct hba_port *port_table,
6254 int index, u8 port_count, int offset)
6255{
6256 struct _sas_node *sas_node = &ioc->sas_hba;
6257 u32 i, found = 0;
6258
6259 for (i = 0; i < port_count; i++) {
6260 if (i == index)
6261 continue;
6262
6263 if (port_table[i].phy_mask & (1 << offset)) {
6264 mpt3sas_transport_del_phy_from_an_existing_port(
6265 ioc, sas_node, &sas_node->phy[offset]);
6266 found = 1;
6267 break;
6268 }
6269 }
6270 if (!found)
6271 port_table[index].phy_mask |= (1 << offset);
6272}
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283
6284static void
6285_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6286 struct hba_port *hba_port_entry, struct hba_port *port_table,
6287 int index, int port_count)
6288{
6289 u32 phy_mask, offset = 0;
6290 struct _sas_node *sas_node = &ioc->sas_hba;
6291
6292 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6293
6294 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6295 if (phy_mask & (1 << offset)) {
6296 if (!(port_table[index].phy_mask & (1 << offset))) {
6297 _scsih_del_phy_part_of_anther_port(
6298 ioc, port_table, index, port_count,
6299 offset);
6300 continue;
6301 }
6302 if (sas_node->phy[offset].phy_belongs_to_port)
6303 mpt3sas_transport_del_phy_from_an_existing_port(
6304 ioc, sas_node, &sas_node->phy[offset]);
6305 mpt3sas_transport_add_phy_to_an_existing_port(
6306 ioc, sas_node, &sas_node->phy[offset],
6307 hba_port_entry->sas_address,
6308 hba_port_entry);
6309 }
6310 }
6311}
6312
6313
6314
6315
6316
6317
6318
6319static void
6320_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6321{
6322 struct hba_port *port, *port_next;
6323 struct virtual_phy *vphy, *vphy_next;
6324
6325 list_for_each_entry_safe(port, port_next,
6326 &ioc->port_table_list, list) {
6327 if (!port->vphys_mask)
6328 continue;
6329 list_for_each_entry_safe(vphy, vphy_next,
6330 &port->vphys_list, list) {
6331 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6332 drsprintk(ioc, ioc_info(ioc,
6333 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6334 vphy, port->port_id,
6335 vphy->phy_mask));
6336 port->vphys_mask &= ~vphy->phy_mask;
6337 list_del(&vphy->list);
6338 kfree(vphy);
6339 }
6340 }
6341 if (!port->vphys_mask && !port->sas_address)
6342 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6343 }
6344}
6345
6346
6347
6348
6349
6350
6351
6352static void
6353_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6354{
6355 struct hba_port *port, *port_next;
6356
6357 list_for_each_entry_safe(port, port_next,
6358 &ioc->port_table_list, list) {
6359 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6360 port->flags & HBA_PORT_FLAG_NEW_PORT)
6361 continue;
6362
6363 drsprintk(ioc, ioc_info(ioc,
6364 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6365 port, port->port_id, port->phy_mask));
6366 list_del(&port->list);
6367 kfree(port);
6368 }
6369}
6370
6371
6372
6373
6374
6375static void
6376_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6377{
6378 u32 port_count = 0;
6379 struct hba_port *port_table;
6380 struct hba_port *port_table_entry;
6381 struct hba_port *port_entry = NULL;
6382 int i, j, count = 0, lcount = 0;
6383 int ret;
6384 u64 sas_addr;
6385
6386 drsprintk(ioc, ioc_info(ioc,
6387 "updating ports for sas_host(0x%016llx)\n",
6388 (unsigned long long)ioc->sas_hba.sas_address));
6389
6390 port_table = kcalloc(ioc->sas_hba.num_phys,
6391 sizeof(struct hba_port), GFP_KERNEL);
6392 if (!port_table)
6393 return;
6394
6395 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6396 if (!port_count)
6397 return;
6398
6399 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6400 for (j = 0; j < port_count; j++)
6401 drsprintk(ioc, ioc_info(ioc,
6402 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6403 port_table[j].port_id,
6404 port_table[j].phy_mask, port_table[j].sas_address));
6405
6406 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6407 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6408
6409 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6410 port_table_entry = NULL;
6411 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6412 drsprintk(ioc, ioc_info(ioc,
6413 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6414 port_table_entry->port_id,
6415 port_table_entry->phy_mask,
6416 port_table_entry->sas_address));
6417 }
6418
6419 for (j = 0; j < port_count; j++) {
6420 ret = _scsih_look_and_get_matched_port_entry(ioc,
6421 &port_table[j], &port_entry, &count);
6422 if (!port_entry) {
6423 drsprintk(ioc, ioc_info(ioc,
6424 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6425 port_table[j].sas_address,
6426 port_table[j].port_id));
6427 continue;
6428 }
6429
6430 switch (ret) {
6431 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6432 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6433 _scsih_add_or_del_phys_from_existing_port(ioc,
6434 port_entry, port_table, j, port_count);
6435 break;
6436 case MATCHED_WITH_ADDR:
6437 sas_addr = port_table[j].sas_address;
6438 for (i = 0; i < port_count; i++) {
6439 if (port_table[i].sas_address == sas_addr)
6440 lcount++;
6441 }
6442
6443 if (count > 1 || lcount > 1)
6444 port_entry = NULL;
6445 else
6446 _scsih_add_or_del_phys_from_existing_port(ioc,
6447 port_entry, port_table, j, port_count);
6448 }
6449
6450 if (!port_entry)
6451 continue;
6452
6453 if (port_entry->port_id != port_table[j].port_id)
6454 port_entry->port_id = port_table[j].port_id;
6455 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6456 port_entry->phy_mask = port_table[j].phy_mask;
6457 }
6458
6459 port_table_entry = NULL;
6460}
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470static struct virtual_phy *
6471_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6472{
6473 struct virtual_phy *vphy;
6474 struct hba_port *port;
6475
6476 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6477 if (!port)
6478 return NULL;
6479
6480 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6481 if (!vphy) {
6482 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6483 if (!vphy)
6484 return NULL;
6485
6486 if (!port->vphys_mask)
6487 INIT_LIST_HEAD(&port->vphys_list);
6488
6489
6490
6491
6492
6493 port->vphys_mask |= (1 << phy_num);
6494 vphy->phy_mask |= (1 << phy_num);
6495
6496 list_add_tail(&vphy->list, &port->vphys_list);
6497
6498 ioc_info(ioc,
6499 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6500 vphy, port->port_id, phy_num);
6501 }
6502 return vphy;
6503}
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514static void
6515_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6516{
6517 u16 sz;
6518 u16 ioc_status;
6519 int i;
6520 Mpi2ConfigReply_t mpi_reply;
6521 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6522 u16 attached_handle;
6523 u8 link_rate, port_id;
6524 struct hba_port *port;
6525 Mpi2SasPhyPage0_t phy_pg0;
6526
6527 dtmprintk(ioc,
6528 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6529 (u64)ioc->sas_hba.sas_address));
6530
6531 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6532 * sizeof(Mpi2SasIOUnit0PhyData_t));
6533 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6534 if (!sas_iounit_pg0) {
6535 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6536 __FILE__, __LINE__, __func__);
6537 return;
6538 }
6539
6540 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6541 sas_iounit_pg0, sz)) != 0)
6542 goto out;
6543 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6544 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6545 goto out;
6546 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6547 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6548 if (i == 0)
6549 ioc->sas_hba.handle = le16_to_cpu(
6550 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6551 port_id = sas_iounit_pg0->PhyData[i].Port;
6552 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6553 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6554 if (!port)
6555 goto out;
6556
6557 port->port_id = port_id;
6558 ioc_info(ioc,
6559 "hba_port entry: %p, port: %d is added to hba_port list\n",
6560 port, port->port_id);
6561 if (ioc->shost_recovery)
6562 port->flags = HBA_PORT_FLAG_NEW_PORT;
6563 list_add_tail(&port->list, &ioc->port_table_list);
6564 }
6565
6566
6567
6568 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6569 MPI2_SAS_DEVICE_INFO_SEP &&
6570 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6571 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6572 &phy_pg0, i))) {
6573 ioc_err(ioc,
6574 "failure at %s:%d/%s()!\n",
6575 __FILE__, __LINE__, __func__);
6576 goto out;
6577 }
6578 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6579 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6580 continue;
6581
6582
6583
6584
6585 if (!_scsih_alloc_vphy(ioc, port_id, i))
6586 goto out;
6587 ioc->sas_hba.phy[i].hba_vphy = 1;
6588 }
6589
6590 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6591 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6592 AttachedDevHandle);
6593 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6594 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6595 ioc->sas_hba.phy[i].port =
6596 mpt3sas_get_port_by_id(ioc, port_id, 0);
6597 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6598 attached_handle, i, link_rate,
6599 ioc->sas_hba.phy[i].port);
6600 }
6601 out:
6602 kfree(sas_iounit_pg0);
6603}
6604
6605
6606
6607
6608
6609
6610
6611static void
6612_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6613{
6614 int i;
6615 Mpi2ConfigReply_t mpi_reply;
6616 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6617 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6618 Mpi2SasPhyPage0_t phy_pg0;
6619 Mpi2SasDevicePage0_t sas_device_pg0;
6620 Mpi2SasEnclosurePage0_t enclosure_pg0;
6621 u16 ioc_status;
6622 u16 sz;
6623 u8 device_missing_delay;
6624 u8 num_phys, port_id;
6625 struct hba_port *port;
6626
6627 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6628 if (!num_phys) {
6629 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6630 __FILE__, __LINE__, __func__);
6631 return;
6632 }
6633 ioc->sas_hba.phy = kcalloc(num_phys,
6634 sizeof(struct _sas_phy), GFP_KERNEL);
6635 if (!ioc->sas_hba.phy) {
6636 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6637 __FILE__, __LINE__, __func__);
6638 goto out;
6639 }
6640 ioc->sas_hba.num_phys = num_phys;
6641
6642
6643 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6644 sizeof(Mpi2SasIOUnit0PhyData_t));
6645 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6646 if (!sas_iounit_pg0) {
6647 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6648 __FILE__, __LINE__, __func__);
6649 return;
6650 }
6651 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6652 sas_iounit_pg0, sz))) {
6653 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6654 __FILE__, __LINE__, __func__);
6655 goto out;
6656 }
6657 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6658 MPI2_IOCSTATUS_MASK;
6659 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6660 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6661 __FILE__, __LINE__, __func__);
6662 goto out;
6663 }
6664
6665
6666 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6667 sizeof(Mpi2SasIOUnit1PhyData_t));
6668 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6669 if (!sas_iounit_pg1) {
6670 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6671 __FILE__, __LINE__, __func__);
6672 goto out;
6673 }
6674 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6675 sas_iounit_pg1, sz))) {
6676 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6677 __FILE__, __LINE__, __func__);
6678 goto out;
6679 }
6680 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6681 MPI2_IOCSTATUS_MASK;
6682 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6683 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6684 __FILE__, __LINE__, __func__);
6685 goto out;
6686 }
6687
6688 ioc->io_missing_delay =
6689 sas_iounit_pg1->IODeviceMissingDelay;
6690 device_missing_delay =
6691 sas_iounit_pg1->ReportDeviceMissingDelay;
6692 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6693 ioc->device_missing_delay = (device_missing_delay &
6694 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6695 else
6696 ioc->device_missing_delay = device_missing_delay &
6697 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6698
6699 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6700 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6701 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6702 i))) {
6703 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6704 __FILE__, __LINE__, __func__);
6705 goto out;
6706 }
6707 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6708 MPI2_IOCSTATUS_MASK;
6709 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6710 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6711 __FILE__, __LINE__, __func__);
6712 goto out;
6713 }
6714
6715 if (i == 0)
6716 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6717 PhyData[0].ControllerDevHandle);
6718
6719 port_id = sas_iounit_pg0->PhyData[i].Port;
6720 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6721 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6722 if (!port)
6723 goto out;
6724
6725 port->port_id = port_id;
6726 ioc_info(ioc,
6727 "hba_port entry: %p, port: %d is added to hba_port list\n",
6728 port, port->port_id);
6729 list_add_tail(&port->list,
6730 &ioc->port_table_list);
6731 }
6732
6733
6734
6735
6736 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6737 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6738 (phy_pg0.NegotiatedLinkRate >> 4) >=
6739 MPI2_SAS_NEG_LINK_RATE_1_5) {
6740
6741
6742
6743 if (!_scsih_alloc_vphy(ioc, port_id, i))
6744 goto out;
6745 ioc->sas_hba.phy[i].hba_vphy = 1;
6746 }
6747
6748 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6749 ioc->sas_hba.phy[i].phy_id = i;
6750 ioc->sas_hba.phy[i].port =
6751 mpt3sas_get_port_by_id(ioc, port_id, 0);
6752 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6753 phy_pg0, ioc->sas_hba.parent_dev);
6754 }
6755 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6756 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6757 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6758 __FILE__, __LINE__, __func__);
6759 goto out;
6760 }
6761 ioc->sas_hba.enclosure_handle =
6762 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6763 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6764 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6765 ioc->sas_hba.handle,
6766 (u64)ioc->sas_hba.sas_address,
6767 ioc->sas_hba.num_phys);
6768
6769 if (ioc->sas_hba.enclosure_handle) {
6770 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6771 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6772 ioc->sas_hba.enclosure_handle)))
6773 ioc->sas_hba.enclosure_logical_id =
6774 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6775 }
6776
6777 out:
6778 kfree(sas_iounit_pg1);
6779 kfree(sas_iounit_pg0);
6780}
6781
6782
6783
6784
6785
6786
6787
6788
6789
6790
6791static int
6792_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6793{
6794 struct _sas_node *sas_expander;
6795 struct _enclosure_node *enclosure_dev;
6796 Mpi2ConfigReply_t mpi_reply;
6797 Mpi2ExpanderPage0_t expander_pg0;
6798 Mpi2ExpanderPage1_t expander_pg1;
6799 u32 ioc_status;
6800 u16 parent_handle;
6801 u64 sas_address, sas_address_parent = 0;
6802 int i;
6803 unsigned long flags;
6804 struct _sas_port *mpt3sas_port = NULL;
6805 u8 port_id;
6806
6807 int rc = 0;
6808
6809 if (!handle)
6810 return -1;
6811
6812 if (ioc->shost_recovery || ioc->pci_error_recovery)
6813 return -1;
6814
6815 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6816 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6817 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6818 __FILE__, __LINE__, __func__);
6819 return -1;
6820 }
6821
6822 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6823 MPI2_IOCSTATUS_MASK;
6824 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6825 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6826 __FILE__, __LINE__, __func__);
6827 return -1;
6828 }
6829
6830
6831 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6832 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6833 != 0) {
6834 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6835 __FILE__, __LINE__, __func__);
6836 return -1;
6837 }
6838
6839 port_id = expander_pg0.PhysicalPort;
6840 if (sas_address_parent != ioc->sas_hba.sas_address) {
6841 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6842 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6843 sas_address_parent,
6844 mpt3sas_get_port_by_id(ioc, port_id, 0));
6845 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6846 if (!sas_expander) {
6847 rc = _scsih_expander_add(ioc, parent_handle);
6848 if (rc != 0)
6849 return rc;
6850 }
6851 }
6852
6853 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6854 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6855 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6856 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6857 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6858
6859 if (sas_expander)
6860 return 0;
6861
6862 sas_expander = kzalloc(sizeof(struct _sas_node),
6863 GFP_KERNEL);
6864 if (!sas_expander) {
6865 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6866 __FILE__, __LINE__, __func__);
6867 return -1;
6868 }
6869
6870 sas_expander->handle = handle;
6871 sas_expander->num_phys = expander_pg0.NumPhys;
6872 sas_expander->sas_address_parent = sas_address_parent;
6873 sas_expander->sas_address = sas_address;
6874 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6875 if (!sas_expander->port) {
6876 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6877 __FILE__, __LINE__, __func__);
6878 rc = -1;
6879 goto out_fail;
6880 }
6881
6882 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6883 handle, parent_handle,
6884 (u64)sas_expander->sas_address, sas_expander->num_phys);
6885
6886 if (!sas_expander->num_phys)
6887 goto out_fail;
6888 sas_expander->phy = kcalloc(sas_expander->num_phys,
6889 sizeof(struct _sas_phy), GFP_KERNEL);
6890 if (!sas_expander->phy) {
6891 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6892 __FILE__, __LINE__, __func__);
6893 rc = -1;
6894 goto out_fail;
6895 }
6896
6897 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6898 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6899 sas_address_parent, sas_expander->port);
6900 if (!mpt3sas_port) {
6901 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6902 __FILE__, __LINE__, __func__);
6903 rc = -1;
6904 goto out_fail;
6905 }
6906 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6907 sas_expander->rphy = mpt3sas_port->rphy;
6908
6909 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6910 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6911 &expander_pg1, i, handle))) {
6912 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6913 __FILE__, __LINE__, __func__);
6914 rc = -1;
6915 goto out_fail;
6916 }
6917 sas_expander->phy[i].handle = handle;
6918 sas_expander->phy[i].phy_id = i;
6919 sas_expander->phy[i].port =
6920 mpt3sas_get_port_by_id(ioc, port_id, 0);
6921
6922 if ((mpt3sas_transport_add_expander_phy(ioc,
6923 &sas_expander->phy[i], expander_pg1,
6924 sas_expander->parent_dev))) {
6925 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6926 __FILE__, __LINE__, __func__);
6927 rc = -1;
6928 goto out_fail;
6929 }
6930 }
6931
6932 if (sas_expander->enclosure_handle) {
6933 enclosure_dev =
6934 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6935 sas_expander->enclosure_handle);
6936 if (enclosure_dev)
6937 sas_expander->enclosure_logical_id =
6938 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6939 }
6940
6941 _scsih_expander_node_add(ioc, sas_expander);
6942 return 0;
6943
6944 out_fail:
6945
6946 if (mpt3sas_port)
6947 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6948 sas_address_parent, sas_expander->port);
6949 kfree(sas_expander);
6950 return rc;
6951}
6952
6953
6954
6955
6956
6957
6958void
6959mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6960 struct hba_port *port)
6961{
6962 struct _sas_node *sas_expander;
6963 unsigned long flags;
6964
6965 if (ioc->shost_recovery)
6966 return;
6967
6968 if (!port)
6969 return;
6970
6971 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6972 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6973 sas_address, port);
6974 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6975 if (sas_expander)
6976 _scsih_expander_node_remove(ioc, sas_expander);
6977}
6978
6979
6980
6981
6982
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992static u8
6993_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6994{
6995 MPI2DefaultReply_t *mpi_reply;
6996
6997 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6998 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
6999 return 1;
7000 if (ioc->scsih_cmds.smid != smid)
7001 return 1;
7002 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7003 if (mpi_reply) {
7004 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7005 mpi_reply->MsgLength*4);
7006 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7007 }
7008 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7009 complete(&ioc->scsih_cmds.done);
7010 return 1;
7011}
7012
7013
7014
7015
7016#define MPT3_MAX_LUNS (255)
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028static u8
7029_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7030 u16 handle, u8 access_status)
7031{
7032 u8 rc = 1;
7033 char *desc = NULL;
7034
7035 switch (access_status) {
7036 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7037 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7038 rc = 0;
7039 break;
7040 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7041 desc = "sata capability failed";
7042 break;
7043 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7044 desc = "sata affiliation conflict";
7045 break;
7046 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7047 desc = "route not addressable";
7048 break;
7049 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7050 desc = "smp error not addressable";
7051 break;
7052 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7053 desc = "device blocked";
7054 break;
7055 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7056 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7057 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7058 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7059 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7060 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7061 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7062 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7063 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7064 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7065 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7066 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7067 desc = "sata initialization failed";
7068 break;
7069 default:
7070 desc = "unknown";
7071 break;
7072 }
7073
7074 if (!rc)
7075 return 0;
7076
7077 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7078 desc, (u64)sas_address, handle);
7079 return rc;
7080}
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090static void
7091_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7092 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7093{
7094 Mpi2ConfigReply_t mpi_reply;
7095 Mpi2SasDevicePage0_t sas_device_pg0;
7096 struct _sas_device *sas_device = NULL;
7097 struct _enclosure_node *enclosure_dev = NULL;
7098 u32 ioc_status;
7099 unsigned long flags;
7100 u64 sas_address;
7101 struct scsi_target *starget;
7102 struct MPT3SAS_TARGET *sas_target_priv_data;
7103 u32 device_info;
7104 struct hba_port *port;
7105
7106 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7107 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7108 return;
7109
7110 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7111 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7112 return;
7113
7114
7115
7116
7117 if (phy_number != sas_device_pg0.PhyNum)
7118 return;
7119
7120
7121 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7122 if (!(_scsih_is_end_device(device_info)))
7123 return;
7124
7125 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7126 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7127 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7128 if (!port)
7129 goto out_unlock;
7130 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7131 sas_address, port);
7132
7133 if (!sas_device)
7134 goto out_unlock;
7135
7136 if (unlikely(sas_device->handle != handle)) {
7137 starget = sas_device->starget;
7138 sas_target_priv_data = starget->hostdata;
7139 starget_printk(KERN_INFO, starget,
7140 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7141 sas_device->handle, handle);
7142 sas_target_priv_data->handle = handle;
7143 sas_device->handle = handle;
7144 if (le16_to_cpu(sas_device_pg0.Flags) &
7145 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7146 sas_device->enclosure_level =
7147 sas_device_pg0.EnclosureLevel;
7148 memcpy(sas_device->connector_name,
7149 sas_device_pg0.ConnectorName, 4);
7150 sas_device->connector_name[4] = '\0';
7151 } else {
7152 sas_device->enclosure_level = 0;
7153 sas_device->connector_name[0] = '\0';
7154 }
7155
7156 sas_device->enclosure_handle =
7157 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7158 sas_device->is_chassis_slot_valid = 0;
7159 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7160 sas_device->enclosure_handle);
7161 if (enclosure_dev) {
7162 sas_device->enclosure_logical_id =
7163 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7164 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7165 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7166 sas_device->is_chassis_slot_valid = 1;
7167 sas_device->chassis_slot =
7168 enclosure_dev->pg0.ChassisSlot;
7169 }
7170 }
7171 }
7172
7173
7174 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7175 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7176 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7177 handle);
7178 goto out_unlock;
7179 }
7180
7181
7182 if (_scsih_check_access_status(ioc, sas_address, handle,
7183 sas_device_pg0.AccessStatus))
7184 goto out_unlock;
7185
7186 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7187 _scsih_ublock_io_device(ioc, sas_address, port);
7188
7189 if (sas_device)
7190 sas_device_put(sas_device);
7191 return;
7192
7193out_unlock:
7194 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7195 if (sas_device)
7196 sas_device_put(sas_device);
7197}
7198
7199
7200
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210static int
7211_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7212 u8 is_pd)
7213{
7214 Mpi2ConfigReply_t mpi_reply;
7215 Mpi2SasDevicePage0_t sas_device_pg0;
7216 struct _sas_device *sas_device;
7217 struct _enclosure_node *enclosure_dev = NULL;
7218 u32 ioc_status;
7219 u64 sas_address;
7220 u32 device_info;
7221 u8 port_id;
7222
7223 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7224 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7225 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7226 __FILE__, __LINE__, __func__);
7227 return -1;
7228 }
7229
7230 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7231 MPI2_IOCSTATUS_MASK;
7232 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7233 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7234 __FILE__, __LINE__, __func__);
7235 return -1;
7236 }
7237
7238
7239 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7240 if (!(_scsih_is_end_device(device_info)))
7241 return -1;
7242 set_bit(handle, ioc->pend_os_device_add);
7243 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7244
7245
7246 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7247 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7248 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7249 handle);
7250 return -1;
7251 }
7252
7253
7254 if (_scsih_check_access_status(ioc, sas_address, handle,
7255 sas_device_pg0.AccessStatus))
7256 return -1;
7257
7258 port_id = sas_device_pg0.PhysicalPort;
7259 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7260 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7261 if (sas_device) {
7262 clear_bit(handle, ioc->pend_os_device_add);
7263 sas_device_put(sas_device);
7264 return -1;
7265 }
7266
7267 if (sas_device_pg0.EnclosureHandle) {
7268 enclosure_dev =
7269 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7270 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7271 if (enclosure_dev == NULL)
7272 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7273 sas_device_pg0.EnclosureHandle);
7274 }
7275
7276 sas_device = kzalloc(sizeof(struct _sas_device),
7277 GFP_KERNEL);
7278 if (!sas_device) {
7279 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7280 __FILE__, __LINE__, __func__);
7281 return 0;
7282 }
7283
7284 kref_init(&sas_device->refcount);
7285 sas_device->handle = handle;
7286 if (_scsih_get_sas_address(ioc,
7287 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7288 &sas_device->sas_address_parent) != 0)
7289 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7290 __FILE__, __LINE__, __func__);
7291 sas_device->enclosure_handle =
7292 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7293 if (sas_device->enclosure_handle != 0)
7294 sas_device->slot =
7295 le16_to_cpu(sas_device_pg0.Slot);
7296 sas_device->device_info = device_info;
7297 sas_device->sas_address = sas_address;
7298 sas_device->phy = sas_device_pg0.PhyNum;
7299 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7300 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7301 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7302 if (!sas_device->port) {
7303 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7304 __FILE__, __LINE__, __func__);
7305 goto out;
7306 }
7307
7308 if (le16_to_cpu(sas_device_pg0.Flags)
7309 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7310 sas_device->enclosure_level =
7311 sas_device_pg0.EnclosureLevel;
7312 memcpy(sas_device->connector_name,
7313 sas_device_pg0.ConnectorName, 4);
7314 sas_device->connector_name[4] = '\0';
7315 } else {
7316 sas_device->enclosure_level = 0;
7317 sas_device->connector_name[0] = '\0';
7318 }
7319
7320 sas_device->is_chassis_slot_valid = 0;
7321 if (enclosure_dev) {
7322 sas_device->enclosure_logical_id =
7323 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7324 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7325 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7326 sas_device->is_chassis_slot_valid = 1;
7327 sas_device->chassis_slot =
7328 enclosure_dev->pg0.ChassisSlot;
7329 }
7330 }
7331
7332
7333 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7334
7335 if (ioc->wait_for_discovery_to_complete)
7336 _scsih_sas_device_init_add(ioc, sas_device);
7337 else
7338 _scsih_sas_device_add(ioc, sas_device);
7339
7340out:
7341 sas_device_put(sas_device);
7342 return 0;
7343}
7344
7345
7346
7347
7348
7349
7350static void
7351_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7352 struct _sas_device *sas_device)
7353{
7354 struct MPT3SAS_TARGET *sas_target_priv_data;
7355
7356 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7357 (sas_device->pfa_led_on)) {
7358 _scsih_turn_off_pfa_led(ioc, sas_device);
7359 sas_device->pfa_led_on = 0;
7360 }
7361
7362 dewtprintk(ioc,
7363 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7364 __func__,
7365 sas_device->handle, (u64)sas_device->sas_address));
7366
7367 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7368 NULL, NULL));
7369
7370 if (sas_device->starget && sas_device->starget->hostdata) {
7371 sas_target_priv_data = sas_device->starget->hostdata;
7372 sas_target_priv_data->deleted = 1;
7373 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7374 sas_device->port);
7375 sas_target_priv_data->handle =
7376 MPT3SAS_INVALID_DEVICE_HANDLE;
7377 }
7378
7379 if (!ioc->hide_drives)
7380 mpt3sas_transport_port_remove(ioc,
7381 sas_device->sas_address,
7382 sas_device->sas_address_parent,
7383 sas_device->port);
7384
7385 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7386 sas_device->handle, (u64)sas_device->sas_address);
7387
7388 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7389
7390 dewtprintk(ioc,
7391 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7392 __func__,
7393 sas_device->handle, (u64)sas_device->sas_address));
7394 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7395 NULL, NULL));
7396}
7397
7398
7399
7400
7401
7402
7403
7404static void
7405_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7406 Mpi2EventDataSasTopologyChangeList_t *event_data)
7407{
7408 int i;
7409 u16 handle;
7410 u16 reason_code;
7411 u8 phy_number;
7412 char *status_str = NULL;
7413 u8 link_rate, prev_link_rate;
7414
7415 switch (event_data->ExpStatus) {
7416 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7417 status_str = "add";
7418 break;
7419 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7420 status_str = "remove";
7421 break;
7422 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7423 case 0:
7424 status_str = "responding";
7425 break;
7426 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7427 status_str = "remove delay";
7428 break;
7429 default:
7430 status_str = "unknown status";
7431 break;
7432 }
7433 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7434 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7435 "start_phy(%02d), count(%d)\n",
7436 le16_to_cpu(event_data->ExpanderDevHandle),
7437 le16_to_cpu(event_data->EnclosureHandle),
7438 event_data->StartPhyNum, event_data->NumEntries);
7439 for (i = 0; i < event_data->NumEntries; i++) {
7440 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7441 if (!handle)
7442 continue;
7443 phy_number = event_data->StartPhyNum + i;
7444 reason_code = event_data->PHY[i].PhyStatus &
7445 MPI2_EVENT_SAS_TOPO_RC_MASK;
7446 switch (reason_code) {
7447 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7448 status_str = "target add";
7449 break;
7450 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7451 status_str = "target remove";
7452 break;
7453 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7454 status_str = "delay target remove";
7455 break;
7456 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7457 status_str = "link rate change";
7458 break;
7459 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7460 status_str = "target responding";
7461 break;
7462 default:
7463 status_str = "unknown";
7464 break;
7465 }
7466 link_rate = event_data->PHY[i].LinkRate >> 4;
7467 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7468 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7469 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7470 handle, status_str, link_rate, prev_link_rate);
7471
7472 }
7473}
7474
7475
7476
7477
7478
7479
7480
7481
7482static int
7483_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7484 struct fw_event_work *fw_event)
7485{
7486 int i;
7487 u16 parent_handle, handle;
7488 u16 reason_code;
7489 u8 phy_number, max_phys;
7490 struct _sas_node *sas_expander;
7491 u64 sas_address;
7492 unsigned long flags;
7493 u8 link_rate, prev_link_rate;
7494 struct hba_port *port;
7495 Mpi2EventDataSasTopologyChangeList_t *event_data =
7496 (Mpi2EventDataSasTopologyChangeList_t *)
7497 fw_event->event_data;
7498
7499 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7500 _scsih_sas_topology_change_event_debug(ioc, event_data);
7501
7502 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7503 return 0;
7504
7505 if (!ioc->sas_hba.num_phys)
7506 _scsih_sas_host_add(ioc);
7507 else
7508 _scsih_sas_host_refresh(ioc);
7509
7510 if (fw_event->ignore) {
7511 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7512 return 0;
7513 }
7514
7515 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7516 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7517
7518
7519 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7520 if (_scsih_expander_add(ioc, parent_handle) != 0)
7521 return 0;
7522
7523 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7524 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7525 parent_handle);
7526 if (sas_expander) {
7527 sas_address = sas_expander->sas_address;
7528 max_phys = sas_expander->num_phys;
7529 port = sas_expander->port;
7530 } else if (parent_handle < ioc->sas_hba.num_phys) {
7531 sas_address = ioc->sas_hba.sas_address;
7532 max_phys = ioc->sas_hba.num_phys;
7533 } else {
7534 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7535 return 0;
7536 }
7537 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7538
7539
7540 for (i = 0; i < event_data->NumEntries; i++) {
7541 if (fw_event->ignore) {
7542 dewtprintk(ioc,
7543 ioc_info(ioc, "ignoring expander event\n"));
7544 return 0;
7545 }
7546 if (ioc->remove_host || ioc->pci_error_recovery)
7547 return 0;
7548 phy_number = event_data->StartPhyNum + i;
7549 if (phy_number >= max_phys)
7550 continue;
7551 reason_code = event_data->PHY[i].PhyStatus &
7552 MPI2_EVENT_SAS_TOPO_RC_MASK;
7553 if ((event_data->PHY[i].PhyStatus &
7554 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7555 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7556 continue;
7557 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7558 if (!handle)
7559 continue;
7560 link_rate = event_data->PHY[i].LinkRate >> 4;
7561 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7562 switch (reason_code) {
7563 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7564
7565 if (ioc->shost_recovery)
7566 break;
7567
7568 if (link_rate == prev_link_rate)
7569 break;
7570
7571 mpt3sas_transport_update_links(ioc, sas_address,
7572 handle, phy_number, link_rate, port);
7573
7574 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7575 break;
7576
7577 _scsih_check_device(ioc, sas_address, handle,
7578 phy_number, link_rate);
7579
7580 if (!test_bit(handle, ioc->pend_os_device_add))
7581 break;
7582
7583 fallthrough;
7584
7585 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7586
7587 if (ioc->shost_recovery)
7588 break;
7589
7590 mpt3sas_transport_update_links(ioc, sas_address,
7591 handle, phy_number, link_rate, port);
7592
7593 _scsih_add_device(ioc, handle, phy_number, 0);
7594
7595 break;
7596 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7597
7598 _scsih_device_remove_by_handle(ioc, handle);
7599 break;
7600 }
7601 }
7602
7603
7604 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7605 sas_expander)
7606 mpt3sas_expander_remove(ioc, sas_address, port);
7607
7608 return 0;
7609}
7610
7611
7612
7613
7614
7615
7616
7617static void
7618_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7619 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7620{
7621 char *reason_str = NULL;
7622
7623 switch (event_data->ReasonCode) {
7624 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7625 reason_str = "smart data";
7626 break;
7627 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7628 reason_str = "unsupported device discovered";
7629 break;
7630 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7631 reason_str = "internal device reset";
7632 break;
7633 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7634 reason_str = "internal task abort";
7635 break;
7636 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7637 reason_str = "internal task abort set";
7638 break;
7639 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7640 reason_str = "internal clear task set";
7641 break;
7642 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7643 reason_str = "internal query task";
7644 break;
7645 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7646 reason_str = "sata init failure";
7647 break;
7648 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7649 reason_str = "internal device reset complete";
7650 break;
7651 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7652 reason_str = "internal task abort complete";
7653 break;
7654 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7655 reason_str = "internal async notification";
7656 break;
7657 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7658 reason_str = "expander reduced functionality";
7659 break;
7660 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7661 reason_str = "expander reduced functionality complete";
7662 break;
7663 default:
7664 reason_str = "unknown reason";
7665 break;
7666 }
7667 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7668 reason_str, le16_to_cpu(event_data->DevHandle),
7669 (u64)le64_to_cpu(event_data->SASAddress),
7670 le16_to_cpu(event_data->TaskTag));
7671 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7672 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7673 event_data->ASC, event_data->ASCQ);
7674 pr_cont("\n");
7675}
7676
7677
7678
7679
7680
7681
7682
7683static void
7684_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7685 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7686{
7687 struct MPT3SAS_TARGET *target_priv_data;
7688 struct _sas_device *sas_device;
7689 u64 sas_address;
7690 unsigned long flags;
7691
7692
7693
7694
7695 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7696 return;
7697
7698 if (event_data->ReasonCode !=
7699 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7700 event_data->ReasonCode !=
7701 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7702 return;
7703
7704 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7705 sas_address = le64_to_cpu(event_data->SASAddress);
7706 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7707 sas_address,
7708 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7709
7710 if (!sas_device || !sas_device->starget)
7711 goto out;
7712
7713 target_priv_data = sas_device->starget->hostdata;
7714 if (!target_priv_data)
7715 goto out;
7716
7717 if (event_data->ReasonCode ==
7718 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7719 target_priv_data->tm_busy = 1;
7720 else
7721 target_priv_data->tm_busy = 0;
7722
7723 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7724 ioc_info(ioc,
7725 "%s tm_busy flag for handle(0x%04x)\n",
7726 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7727 target_priv_data->handle);
7728
7729out:
7730 if (sas_device)
7731 sas_device_put(sas_device);
7732
7733 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7734}
7735
7736
7737
7738
7739
7740
7741
7742
7743
7744
7745
7746static u8
7747_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7748 u16 handle, u8 access_status)
7749{
7750 u8 rc = 1;
7751 char *desc = NULL;
7752
7753 switch (access_status) {
7754 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7755 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7756 rc = 0;
7757 break;
7758 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7759 desc = "PCIe device capability failed";
7760 break;
7761 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7762 desc = "PCIe device blocked";
7763 ioc_info(ioc,
7764 "Device with Access Status (%s): wwid(0x%016llx), "
7765 "handle(0x%04x)\n ll only be added to the internal list",
7766 desc, (u64)wwid, handle);
7767 rc = 0;
7768 break;
7769 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7770 desc = "PCIe device mem space access failed";
7771 break;
7772 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7773 desc = "PCIe device unsupported";
7774 break;
7775 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7776 desc = "PCIe device MSIx Required";
7777 break;
7778 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7779 desc = "PCIe device init fail max";
7780 break;
7781 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7782 desc = "PCIe device status unknown";
7783 break;
7784 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7785 desc = "nvme ready timeout";
7786 break;
7787 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7788 desc = "nvme device configuration unsupported";
7789 break;
7790 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7791 desc = "nvme identify failed";
7792 break;
7793 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7794 desc = "nvme qconfig failed";
7795 break;
7796 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7797 desc = "nvme qcreation failed";
7798 break;
7799 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7800 desc = "nvme eventcfg failed";
7801 break;
7802 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7803 desc = "nvme get feature stat failed";
7804 break;
7805 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7806 desc = "nvme idle timeout";
7807 break;
7808 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7809 desc = "nvme failure status";
7810 break;
7811 default:
7812 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7813 access_status, (u64)wwid, handle);
7814 return rc;
7815 }
7816
7817 if (!rc)
7818 return rc;
7819
7820 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7821 desc, (u64)wwid, handle);
7822 return rc;
7823}
7824
7825
7826
7827
7828
7829
7830
7831static void
7832_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7833 struct _pcie_device *pcie_device)
7834{
7835 struct MPT3SAS_TARGET *sas_target_priv_data;
7836
7837 dewtprintk(ioc,
7838 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7839 __func__,
7840 pcie_device->handle, (u64)pcie_device->wwid));
7841 if (pcie_device->enclosure_handle != 0)
7842 dewtprintk(ioc,
7843 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7844 __func__,
7845 (u64)pcie_device->enclosure_logical_id,
7846 pcie_device->slot));
7847 if (pcie_device->connector_name[0] != '\0')
7848 dewtprintk(ioc,
7849 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7850 __func__,
7851 pcie_device->enclosure_level,
7852 pcie_device->connector_name));
7853
7854 if (pcie_device->starget && pcie_device->starget->hostdata) {
7855 sas_target_priv_data = pcie_device->starget->hostdata;
7856 sas_target_priv_data->deleted = 1;
7857 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7858 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7859 }
7860
7861 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7862 pcie_device->handle, (u64)pcie_device->wwid);
7863 if (pcie_device->enclosure_handle != 0)
7864 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7865 (u64)pcie_device->enclosure_logical_id,
7866 pcie_device->slot);
7867 if (pcie_device->connector_name[0] != '\0')
7868 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7869 pcie_device->enclosure_level,
7870 pcie_device->connector_name);
7871
7872 if (pcie_device->starget && (pcie_device->access_status !=
7873 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7874 scsi_remove_target(&pcie_device->starget->dev);
7875 dewtprintk(ioc,
7876 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7877 __func__,
7878 pcie_device->handle, (u64)pcie_device->wwid));
7879 if (pcie_device->enclosure_handle != 0)
7880 dewtprintk(ioc,
7881 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7882 __func__,
7883 (u64)pcie_device->enclosure_logical_id,
7884 pcie_device->slot));
7885 if (pcie_device->connector_name[0] != '\0')
7886 dewtprintk(ioc,
7887 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7888 __func__,
7889 pcie_device->enclosure_level,
7890 pcie_device->connector_name));
7891
7892 kfree(pcie_device->serial_number);
7893}
7894
7895
7896
7897
7898
7899
7900
7901static void
7902_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7903{
7904 Mpi2ConfigReply_t mpi_reply;
7905 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7906 u32 ioc_status;
7907 struct _pcie_device *pcie_device;
7908 u64 wwid;
7909 unsigned long flags;
7910 struct scsi_target *starget;
7911 struct MPT3SAS_TARGET *sas_target_priv_data;
7912 u32 device_info;
7913
7914 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7915 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7916 return;
7917
7918 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7919 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7920 return;
7921
7922
7923 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7924 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7925 return;
7926
7927 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7928 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7929 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7930
7931 if (!pcie_device) {
7932 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7933 return;
7934 }
7935
7936 if (unlikely(pcie_device->handle != handle)) {
7937 starget = pcie_device->starget;
7938 sas_target_priv_data = starget->hostdata;
7939 pcie_device->access_status = pcie_device_pg0.AccessStatus;
7940 starget_printk(KERN_INFO, starget,
7941 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7942 pcie_device->handle, handle);
7943 sas_target_priv_data->handle = handle;
7944 pcie_device->handle = handle;
7945
7946 if (le32_to_cpu(pcie_device_pg0.Flags) &
7947 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7948 pcie_device->enclosure_level =
7949 pcie_device_pg0.EnclosureLevel;
7950 memcpy(&pcie_device->connector_name[0],
7951 &pcie_device_pg0.ConnectorName[0], 4);
7952 } else {
7953 pcie_device->enclosure_level = 0;
7954 pcie_device->connector_name[0] = '\0';
7955 }
7956 }
7957
7958
7959 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7960 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7961 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7962 handle);
7963 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7964 pcie_device_put(pcie_device);
7965 return;
7966 }
7967
7968
7969 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7970 pcie_device_pg0.AccessStatus)) {
7971 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7972 pcie_device_put(pcie_device);
7973 return;
7974 }
7975
7976 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7977 pcie_device_put(pcie_device);
7978
7979 _scsih_ublock_io_device(ioc, wwid, NULL);
7980
7981 return;
7982}
7983
7984
7985
7986
7987
7988
7989
7990
7991
7992
7993static int
7994_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7995{
7996 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7997 Mpi26PCIeDevicePage2_t pcie_device_pg2;
7998 Mpi2ConfigReply_t mpi_reply;
7999 struct _pcie_device *pcie_device;
8000 struct _enclosure_node *enclosure_dev;
8001 u32 ioc_status;
8002 u64 wwid;
8003
8004 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8005 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8006 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8007 __FILE__, __LINE__, __func__);
8008 return 0;
8009 }
8010 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8011 MPI2_IOCSTATUS_MASK;
8012 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8013 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8014 __FILE__, __LINE__, __func__);
8015 return 0;
8016 }
8017
8018 set_bit(handle, ioc->pend_os_device_add);
8019 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8020
8021
8022 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8023 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8024 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8025 handle);
8026 return 0;
8027 }
8028
8029
8030 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8031 pcie_device_pg0.AccessStatus))
8032 return 0;
8033
8034 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8035 (pcie_device_pg0.DeviceInfo))))
8036 return 0;
8037
8038 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8039 if (pcie_device) {
8040 clear_bit(handle, ioc->pend_os_device_add);
8041 pcie_device_put(pcie_device);
8042 return 0;
8043 }
8044
8045
8046
8047
8048
8049 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8050 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8051 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8052 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8053 handle)) {
8054 ioc_err(ioc,
8055 "failure at %s:%d/%s()!\n", __FILE__,
8056 __LINE__, __func__);
8057 return 0;
8058 }
8059
8060 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8061 MPI2_IOCSTATUS_MASK;
8062 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8063 ioc_err(ioc,
8064 "failure at %s:%d/%s()!\n", __FILE__,
8065 __LINE__, __func__);
8066 return 0;
8067 }
8068 }
8069
8070 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8071 if (!pcie_device) {
8072 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8073 __FILE__, __LINE__, __func__);
8074 return 0;
8075 }
8076
8077 kref_init(&pcie_device->refcount);
8078 pcie_device->id = ioc->pcie_target_id++;
8079 pcie_device->channel = PCIE_CHANNEL;
8080 pcie_device->handle = handle;
8081 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8082 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8083 pcie_device->wwid = wwid;
8084 pcie_device->port_num = pcie_device_pg0.PortNum;
8085 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8086 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8087
8088 pcie_device->enclosure_handle =
8089 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8090 if (pcie_device->enclosure_handle != 0)
8091 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8092
8093 if (le32_to_cpu(pcie_device_pg0.Flags) &
8094 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8095 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8096 memcpy(&pcie_device->connector_name[0],
8097 &pcie_device_pg0.ConnectorName[0], 4);
8098 } else {
8099 pcie_device->enclosure_level = 0;
8100 pcie_device->connector_name[0] = '\0';
8101 }
8102
8103
8104 if (pcie_device->enclosure_handle) {
8105 enclosure_dev =
8106 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8107 pcie_device->enclosure_handle);
8108 if (enclosure_dev)
8109 pcie_device->enclosure_logical_id =
8110 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8111 }
8112
8113 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8114 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8115 pcie_device->nvme_mdts =
8116 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8117 pcie_device->shutdown_latency =
8118 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8119
8120
8121
8122
8123
8124 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8125 ioc->max_shutdown_latency =
8126 pcie_device->shutdown_latency;
8127 if (pcie_device_pg2.ControllerResetTO)
8128 pcie_device->reset_timeout =
8129 pcie_device_pg2.ControllerResetTO;
8130 else
8131 pcie_device->reset_timeout = 30;
8132 } else
8133 pcie_device->reset_timeout = 30;
8134
8135 if (ioc->wait_for_discovery_to_complete)
8136 _scsih_pcie_device_init_add(ioc, pcie_device);
8137 else
8138 _scsih_pcie_device_add(ioc, pcie_device);
8139
8140 pcie_device_put(pcie_device);
8141 return 0;
8142}
8143
8144
8145
8146
8147
8148
8149
8150
8151static void
8152_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8153 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8154{
8155 int i;
8156 u16 handle;
8157 u16 reason_code;
8158 u8 port_number;
8159 char *status_str = NULL;
8160 u8 link_rate, prev_link_rate;
8161
8162 switch (event_data->SwitchStatus) {
8163 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8164 status_str = "add";
8165 break;
8166 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8167 status_str = "remove";
8168 break;
8169 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8170 case 0:
8171 status_str = "responding";
8172 break;
8173 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8174 status_str = "remove delay";
8175 break;
8176 default:
8177 status_str = "unknown status";
8178 break;
8179 }
8180 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8181 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8182 "start_port(%02d), count(%d)\n",
8183 le16_to_cpu(event_data->SwitchDevHandle),
8184 le16_to_cpu(event_data->EnclosureHandle),
8185 event_data->StartPortNum, event_data->NumEntries);
8186 for (i = 0; i < event_data->NumEntries; i++) {
8187 handle =
8188 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8189 if (!handle)
8190 continue;
8191 port_number = event_data->StartPortNum + i;
8192 reason_code = event_data->PortEntry[i].PortStatus;
8193 switch (reason_code) {
8194 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8195 status_str = "target add";
8196 break;
8197 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8198 status_str = "target remove";
8199 break;
8200 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8201 status_str = "delay target remove";
8202 break;
8203 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8204 status_str = "link rate change";
8205 break;
8206 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8207 status_str = "target responding";
8208 break;
8209 default:
8210 status_str = "unknown";
8211 break;
8212 }
8213 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8214 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8215 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8216 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8217 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8218 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8219 handle, status_str, link_rate, prev_link_rate);
8220 }
8221}
8222
8223
8224
8225
8226
8227
8228
8229
8230
8231static void
8232_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8233 struct fw_event_work *fw_event)
8234{
8235 int i;
8236 u16 handle;
8237 u16 reason_code;
8238 u8 link_rate, prev_link_rate;
8239 unsigned long flags;
8240 int rc;
8241 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8242 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8243 struct _pcie_device *pcie_device;
8244
8245 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8246 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8247
8248 if (ioc->shost_recovery || ioc->remove_host ||
8249 ioc->pci_error_recovery)
8250 return;
8251
8252 if (fw_event->ignore) {
8253 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8254 return;
8255 }
8256
8257
8258 for (i = 0; i < event_data->NumEntries; i++) {
8259 if (fw_event->ignore) {
8260 dewtprintk(ioc,
8261 ioc_info(ioc, "ignoring switch event\n"));
8262 return;
8263 }
8264 if (ioc->remove_host || ioc->pci_error_recovery)
8265 return;
8266 reason_code = event_data->PortEntry[i].PortStatus;
8267 handle =
8268 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8269 if (!handle)
8270 continue;
8271
8272 link_rate = event_data->PortEntry[i].CurrentPortInfo
8273 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8274 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8275 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8276
8277 switch (reason_code) {
8278 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8279 if (ioc->shost_recovery)
8280 break;
8281 if (link_rate == prev_link_rate)
8282 break;
8283 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8284 break;
8285
8286 _scsih_pcie_check_device(ioc, handle);
8287
8288
8289
8290
8291
8292
8293
8294 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8295 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8296 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8297
8298 if (pcie_device) {
8299 pcie_device_put(pcie_device);
8300 break;
8301 }
8302
8303 if (!test_bit(handle, ioc->pend_os_device_add))
8304 break;
8305
8306 dewtprintk(ioc,
8307 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8308 handle));
8309 event_data->PortEntry[i].PortStatus &= 0xF0;
8310 event_data->PortEntry[i].PortStatus |=
8311 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8312 fallthrough;
8313 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8314 if (ioc->shost_recovery)
8315 break;
8316 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8317 break;
8318
8319 rc = _scsih_pcie_add_device(ioc, handle);
8320 if (!rc) {
8321
8322
8323
8324
8325
8326 event_data->PortEntry[i].PortStatus |=
8327 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8328 }
8329 break;
8330 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8331 _scsih_pcie_device_remove_by_handle(ioc, handle);
8332 break;
8333 }
8334 }
8335}
8336
8337
8338
8339
8340
8341
8342
8343static void
8344_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8345 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8346{
8347 char *reason_str = NULL;
8348
8349 switch (event_data->ReasonCode) {
8350 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8351 reason_str = "smart data";
8352 break;
8353 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8354 reason_str = "unsupported device discovered";
8355 break;
8356 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8357 reason_str = "internal device reset";
8358 break;
8359 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8360 reason_str = "internal task abort";
8361 break;
8362 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8363 reason_str = "internal task abort set";
8364 break;
8365 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8366 reason_str = "internal clear task set";
8367 break;
8368 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8369 reason_str = "internal query task";
8370 break;
8371 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8372 reason_str = "device init failure";
8373 break;
8374 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8375 reason_str = "internal device reset complete";
8376 break;
8377 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8378 reason_str = "internal task abort complete";
8379 break;
8380 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8381 reason_str = "internal async notification";
8382 break;
8383 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8384 reason_str = "pcie hot reset failed";
8385 break;
8386 default:
8387 reason_str = "unknown reason";
8388 break;
8389 }
8390
8391 ioc_info(ioc, "PCIE device status change: (%s)\n"
8392 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8393 reason_str, le16_to_cpu(event_data->DevHandle),
8394 (u64)le64_to_cpu(event_data->WWID),
8395 le16_to_cpu(event_data->TaskTag));
8396 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8397 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8398 event_data->ASC, event_data->ASCQ);
8399 pr_cont("\n");
8400}
8401
8402
8403
8404
8405
8406
8407
8408
8409static void
8410_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8411 struct fw_event_work *fw_event)
8412{
8413 struct MPT3SAS_TARGET *target_priv_data;
8414 struct _pcie_device *pcie_device;
8415 u64 wwid;
8416 unsigned long flags;
8417 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8418 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8419 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8420 _scsih_pcie_device_status_change_event_debug(ioc,
8421 event_data);
8422
8423 if (event_data->ReasonCode !=
8424 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8425 event_data->ReasonCode !=
8426 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8427 return;
8428
8429 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8430 wwid = le64_to_cpu(event_data->WWID);
8431 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8432
8433 if (!pcie_device || !pcie_device->starget)
8434 goto out;
8435
8436 target_priv_data = pcie_device->starget->hostdata;
8437 if (!target_priv_data)
8438 goto out;
8439
8440 if (event_data->ReasonCode ==
8441 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8442 target_priv_data->tm_busy = 1;
8443 else
8444 target_priv_data->tm_busy = 0;
8445out:
8446 if (pcie_device)
8447 pcie_device_put(pcie_device);
8448
8449 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8450}
8451
8452
8453
8454
8455
8456
8457
8458
8459static void
8460_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8461 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8462{
8463 char *reason_str = NULL;
8464
8465 switch (event_data->ReasonCode) {
8466 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8467 reason_str = "enclosure add";
8468 break;
8469 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8470 reason_str = "enclosure remove";
8471 break;
8472 default:
8473 reason_str = "unknown reason";
8474 break;
8475 }
8476
8477 ioc_info(ioc, "enclosure status change: (%s)\n"
8478 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8479 reason_str,
8480 le16_to_cpu(event_data->EnclosureHandle),
8481 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8482 le16_to_cpu(event_data->StartSlot));
8483}
8484
8485
8486
8487
8488
8489
8490
8491static void
8492_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8493 struct fw_event_work *fw_event)
8494{
8495 Mpi2ConfigReply_t mpi_reply;
8496 struct _enclosure_node *enclosure_dev = NULL;
8497 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8498 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8499 int rc;
8500 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8501
8502 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8503 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8504 (Mpi2EventDataSasEnclDevStatusChange_t *)
8505 fw_event->event_data);
8506 if (ioc->shost_recovery)
8507 return;
8508
8509 if (enclosure_handle)
8510 enclosure_dev =
8511 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8512 enclosure_handle);
8513 switch (event_data->ReasonCode) {
8514 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8515 if (!enclosure_dev) {
8516 enclosure_dev =
8517 kzalloc(sizeof(struct _enclosure_node),
8518 GFP_KERNEL);
8519 if (!enclosure_dev) {
8520 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8521 __FILE__, __LINE__, __func__);
8522 return;
8523 }
8524 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8525 &enclosure_dev->pg0,
8526 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8527 enclosure_handle);
8528
8529 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8530 MPI2_IOCSTATUS_MASK)) {
8531 kfree(enclosure_dev);
8532 return;
8533 }
8534
8535 list_add_tail(&enclosure_dev->list,
8536 &ioc->enclosure_list);
8537 }
8538 break;
8539 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8540 if (enclosure_dev) {
8541 list_del(&enclosure_dev->list);
8542 kfree(enclosure_dev);
8543 }
8544 break;
8545 default:
8546 break;
8547 }
8548}
8549
8550
8551
8552
8553
8554
8555
8556static void
8557_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8558 struct fw_event_work *fw_event)
8559{
8560 struct scsi_cmnd *scmd;
8561 struct scsi_device *sdev;
8562 struct scsiio_tracker *st;
8563 u16 smid, handle;
8564 u32 lun;
8565 struct MPT3SAS_DEVICE *sas_device_priv_data;
8566 u32 termination_count;
8567 u32 query_count;
8568 Mpi2SCSITaskManagementReply_t *mpi_reply;
8569 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8570 (Mpi2EventDataSasBroadcastPrimitive_t *)
8571 fw_event->event_data;
8572 u16 ioc_status;
8573 unsigned long flags;
8574 int r;
8575 u8 max_retries = 0;
8576 u8 task_abort_retries;
8577
8578 mutex_lock(&ioc->tm_cmds.mutex);
8579 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8580 __func__, event_data->PhyNum, event_data->PortWidth);
8581
8582 _scsih_block_io_all_device(ioc);
8583
8584 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8585 mpi_reply = ioc->tm_cmds.reply;
8586 broadcast_aen_retry:
8587
8588
8589 if (max_retries++ == 5) {
8590 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8591 goto out;
8592 } else if (max_retries > 1)
8593 dewtprintk(ioc,
8594 ioc_info(ioc, "%s: %d retry\n",
8595 __func__, max_retries - 1));
8596
8597 termination_count = 0;
8598 query_count = 0;
8599 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8600 if (ioc->shost_recovery)
8601 goto out;
8602 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8603 if (!scmd)
8604 continue;
8605 st = scsi_cmd_priv(scmd);
8606 sdev = scmd->device;
8607 sas_device_priv_data = sdev->hostdata;
8608 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8609 continue;
8610
8611 if (sas_device_priv_data->sas_target->flags &
8612 MPT_TARGET_FLAGS_RAID_COMPONENT)
8613 continue;
8614
8615 if (sas_device_priv_data->sas_target->flags &
8616 MPT_TARGET_FLAGS_VOLUME)
8617 continue;
8618
8619 if (sas_device_priv_data->sas_target->flags &
8620 MPT_TARGET_FLAGS_PCIE_DEVICE)
8621 continue;
8622
8623 handle = sas_device_priv_data->sas_target->handle;
8624 lun = sas_device_priv_data->lun;
8625 query_count++;
8626
8627 if (ioc->shost_recovery)
8628 goto out;
8629
8630 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8631 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8632 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8633 st->msix_io, 30, 0);
8634 if (r == FAILED) {
8635 sdev_printk(KERN_WARNING, sdev,
8636 "mpt3sas_scsih_issue_tm: FAILED when sending "
8637 "QUERY_TASK: scmd(%p)\n", scmd);
8638 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8639 goto broadcast_aen_retry;
8640 }
8641 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8642 & MPI2_IOCSTATUS_MASK;
8643 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8644 sdev_printk(KERN_WARNING, sdev,
8645 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8646 ioc_status, scmd);
8647 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8648 goto broadcast_aen_retry;
8649 }
8650
8651
8652 if (mpi_reply->ResponseCode ==
8653 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8654 mpi_reply->ResponseCode ==
8655 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8656 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8657 continue;
8658 }
8659 task_abort_retries = 0;
8660 tm_retry:
8661 if (task_abort_retries++ == 60) {
8662 dewtprintk(ioc,
8663 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8664 __func__));
8665 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8666 goto broadcast_aen_retry;
8667 }
8668
8669 if (ioc->shost_recovery)
8670 goto out_no_lock;
8671
8672 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8673 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8674 st->smid, st->msix_io, 30, 0);
8675 if (r == FAILED || st->cb_idx != 0xFF) {
8676 sdev_printk(KERN_WARNING, sdev,
8677 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8678 "scmd(%p)\n", scmd);
8679 goto tm_retry;
8680 }
8681
8682 if (task_abort_retries > 1)
8683 sdev_printk(KERN_WARNING, sdev,
8684 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8685 " scmd(%p)\n",
8686 task_abort_retries - 1, scmd);
8687
8688 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8689 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8690 }
8691
8692 if (ioc->broadcast_aen_pending) {
8693 dewtprintk(ioc,
8694 ioc_info(ioc,
8695 "%s: loop back due to pending AEN\n",
8696 __func__));
8697 ioc->broadcast_aen_pending = 0;
8698 goto broadcast_aen_retry;
8699 }
8700
8701 out:
8702 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8703 out_no_lock:
8704
8705 dewtprintk(ioc,
8706 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8707 __func__, query_count, termination_count));
8708
8709 ioc->broadcast_aen_busy = 0;
8710 if (!ioc->shost_recovery)
8711 _scsih_ublock_io_all_device(ioc);
8712 mutex_unlock(&ioc->tm_cmds.mutex);
8713}
8714
8715
8716
8717
8718
8719
8720
8721static void
8722_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8723 struct fw_event_work *fw_event)
8724{
8725 Mpi2EventDataSasDiscovery_t *event_data =
8726 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8727
8728 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8729 ioc_info(ioc, "discovery event: (%s)",
8730 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8731 "start" : "stop");
8732 if (event_data->DiscoveryStatus)
8733 pr_cont("discovery_status(0x%08x)",
8734 le32_to_cpu(event_data->DiscoveryStatus));
8735 pr_cont("\n");
8736 }
8737
8738 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8739 !ioc->sas_hba.num_phys) {
8740 if (disable_discovery > 0 && ioc->shost_recovery) {
8741
8742 while (ioc->shost_recovery)
8743 ssleep(1);
8744 }
8745 _scsih_sas_host_add(ioc);
8746 }
8747}
8748
8749
8750
8751
8752
8753
8754
8755
8756static void
8757_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8758 struct fw_event_work *fw_event)
8759{
8760 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8761 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8762
8763 switch (event_data->ReasonCode) {
8764 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8765 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8766 le16_to_cpu(event_data->DevHandle),
8767 (u64)le64_to_cpu(event_data->SASAddress),
8768 event_data->PhysicalPort);
8769 break;
8770 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8771 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8772 le16_to_cpu(event_data->DevHandle),
8773 (u64)le64_to_cpu(event_data->SASAddress),
8774 event_data->PhysicalPort);
8775 break;
8776 default:
8777 break;
8778 }
8779}
8780
8781
8782
8783
8784
8785
8786
8787static void
8788_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8789 struct fw_event_work *fw_event)
8790{
8791 Mpi26EventDataPCIeEnumeration_t *event_data =
8792 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8793
8794 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8795 return;
8796
8797 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8798 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8799 "started" : "completed",
8800 event_data->Flags);
8801 if (event_data->EnumerationStatus)
8802 pr_cont("enumeration_status(0x%08x)",
8803 le32_to_cpu(event_data->EnumerationStatus));
8804 pr_cont("\n");
8805}
8806
8807
8808
8809
8810
8811
8812
8813
8814
8815static int
8816_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8817{
8818 Mpi2RaidActionRequest_t *mpi_request;
8819 Mpi2RaidActionReply_t *mpi_reply;
8820 u16 smid;
8821 u8 issue_reset = 0;
8822 int rc = 0;
8823 u16 ioc_status;
8824 u32 log_info;
8825
8826 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8827 return rc;
8828
8829 mutex_lock(&ioc->scsih_cmds.mutex);
8830
8831 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8832 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8833 rc = -EAGAIN;
8834 goto out;
8835 }
8836 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8837
8838 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8839 if (!smid) {
8840 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8841 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8842 rc = -EAGAIN;
8843 goto out;
8844 }
8845
8846 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8847 ioc->scsih_cmds.smid = smid;
8848 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8849
8850 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8851 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8852 mpi_request->PhysDiskNum = phys_disk_num;
8853
8854 dewtprintk(ioc,
8855 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8856 handle, phys_disk_num));
8857
8858 init_completion(&ioc->scsih_cmds.done);
8859 ioc->put_smid_default(ioc, smid);
8860 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8861
8862 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8863 mpt3sas_check_cmd_timeout(ioc,
8864 ioc->scsih_cmds.status, mpi_request,
8865 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8866 rc = -EFAULT;
8867 goto out;
8868 }
8869
8870 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8871
8872 mpi_reply = ioc->scsih_cmds.reply;
8873 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8874 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8875 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8876 else
8877 log_info = 0;
8878 ioc_status &= MPI2_IOCSTATUS_MASK;
8879 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8880 dewtprintk(ioc,
8881 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8882 ioc_status, log_info));
8883 rc = -EFAULT;
8884 } else
8885 dewtprintk(ioc,
8886 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8887 }
8888
8889 out:
8890 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8891 mutex_unlock(&ioc->scsih_cmds.mutex);
8892
8893 if (issue_reset)
8894 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8895 return rc;
8896}
8897
8898
8899
8900
8901
8902
8903
8904static void
8905_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8906{
8907 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8908 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8909 sdev->no_uld_attach ? "hiding" : "exposing");
8910 WARN_ON(scsi_device_reprobe(sdev));
8911}
8912
8913
8914
8915
8916
8917
8918
8919static void
8920_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8921 Mpi2EventIrConfigElement_t *element)
8922{
8923 struct _raid_device *raid_device;
8924 unsigned long flags;
8925 u64 wwid;
8926 u16 handle = le16_to_cpu(element->VolDevHandle);
8927 int rc;
8928
8929 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8930 if (!wwid) {
8931 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8932 __FILE__, __LINE__, __func__);
8933 return;
8934 }
8935
8936 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8937 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8938 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8939
8940 if (raid_device)
8941 return;
8942
8943 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8944 if (!raid_device) {
8945 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8946 __FILE__, __LINE__, __func__);
8947 return;
8948 }
8949
8950 raid_device->id = ioc->sas_id++;
8951 raid_device->channel = RAID_CHANNEL;
8952 raid_device->handle = handle;
8953 raid_device->wwid = wwid;
8954 _scsih_raid_device_add(ioc, raid_device);
8955 if (!ioc->wait_for_discovery_to_complete) {
8956 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8957 raid_device->id, 0);
8958 if (rc)
8959 _scsih_raid_device_remove(ioc, raid_device);
8960 } else {
8961 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8962 _scsih_determine_boot_device(ioc, raid_device, 1);
8963 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8964 }
8965}
8966
8967
8968
8969
8970
8971
8972
8973static void
8974_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8975{
8976 struct _raid_device *raid_device;
8977 unsigned long flags;
8978 struct MPT3SAS_TARGET *sas_target_priv_data;
8979 struct scsi_target *starget = NULL;
8980
8981 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8982 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8983 if (raid_device) {
8984 if (raid_device->starget) {
8985 starget = raid_device->starget;
8986 sas_target_priv_data = starget->hostdata;
8987 sas_target_priv_data->deleted = 1;
8988 }
8989 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8990 raid_device->handle, (u64)raid_device->wwid);
8991 list_del(&raid_device->list);
8992 kfree(raid_device);
8993 }
8994 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8995 if (starget)
8996 scsi_remove_target(&starget->dev);
8997}
8998
8999
9000
9001
9002
9003
9004
9005static void
9006_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9007 Mpi2EventIrConfigElement_t *element)
9008{
9009 struct _sas_device *sas_device;
9010 struct scsi_target *starget = NULL;
9011 struct MPT3SAS_TARGET *sas_target_priv_data;
9012 unsigned long flags;
9013 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9014
9015 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9016 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9017 if (sas_device) {
9018 sas_device->volume_handle = 0;
9019 sas_device->volume_wwid = 0;
9020 clear_bit(handle, ioc->pd_handles);
9021 if (sas_device->starget && sas_device->starget->hostdata) {
9022 starget = sas_device->starget;
9023 sas_target_priv_data = starget->hostdata;
9024 sas_target_priv_data->flags &=
9025 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9026 }
9027 }
9028 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9029 if (!sas_device)
9030 return;
9031
9032
9033 if (starget)
9034 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9035
9036 sas_device_put(sas_device);
9037}
9038
9039
9040
9041
9042
9043
9044
9045static void
9046_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9047 Mpi2EventIrConfigElement_t *element)
9048{
9049 struct _sas_device *sas_device;
9050 struct scsi_target *starget = NULL;
9051 struct MPT3SAS_TARGET *sas_target_priv_data;
9052 unsigned long flags;
9053 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9054 u16 volume_handle = 0;
9055 u64 volume_wwid = 0;
9056
9057 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9058 if (volume_handle)
9059 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9060 &volume_wwid);
9061
9062 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9063 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9064 if (sas_device) {
9065 set_bit(handle, ioc->pd_handles);
9066 if (sas_device->starget && sas_device->starget->hostdata) {
9067 starget = sas_device->starget;
9068 sas_target_priv_data = starget->hostdata;
9069 sas_target_priv_data->flags |=
9070 MPT_TARGET_FLAGS_RAID_COMPONENT;
9071 sas_device->volume_handle = volume_handle;
9072 sas_device->volume_wwid = volume_wwid;
9073 }
9074 }
9075 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9076 if (!sas_device)
9077 return;
9078
9079
9080 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9081
9082 if (starget)
9083 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9084
9085 sas_device_put(sas_device);
9086}
9087
9088
9089
9090
9091
9092
9093
9094static void
9095_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9096 Mpi2EventIrConfigElement_t *element)
9097{
9098 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9099
9100 _scsih_device_remove_by_handle(ioc, handle);
9101}
9102
9103
9104
9105
9106
9107
9108
9109static void
9110_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9111 Mpi2EventIrConfigElement_t *element)
9112{
9113 struct _sas_device *sas_device;
9114 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9115 Mpi2ConfigReply_t mpi_reply;
9116 Mpi2SasDevicePage0_t sas_device_pg0;
9117 u32 ioc_status;
9118 u64 sas_address;
9119 u16 parent_handle;
9120
9121 set_bit(handle, ioc->pd_handles);
9122
9123 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9124 if (sas_device) {
9125 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9126 sas_device_put(sas_device);
9127 return;
9128 }
9129
9130 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9131 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9132 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9133 __FILE__, __LINE__, __func__);
9134 return;
9135 }
9136
9137 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9138 MPI2_IOCSTATUS_MASK;
9139 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9140 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9141 __FILE__, __LINE__, __func__);
9142 return;
9143 }
9144
9145 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9146 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9147 mpt3sas_transport_update_links(ioc, sas_address, handle,
9148 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9149 mpt3sas_get_port_by_id(ioc,
9150 sas_device_pg0.PhysicalPort, 0));
9151
9152 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9153 _scsih_add_device(ioc, handle, 0, 1);
9154}
9155
9156
9157
9158
9159
9160
9161
9162static void
9163_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9164 Mpi2EventDataIrConfigChangeList_t *event_data)
9165{
9166 Mpi2EventIrConfigElement_t *element;
9167 u8 element_type;
9168 int i;
9169 char *reason_str = NULL, *element_str = NULL;
9170
9171 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9172
9173 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9174 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9175 "foreign" : "native",
9176 event_data->NumElements);
9177 for (i = 0; i < event_data->NumElements; i++, element++) {
9178 switch (element->ReasonCode) {
9179 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9180 reason_str = "add";
9181 break;
9182 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9183 reason_str = "remove";
9184 break;
9185 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9186 reason_str = "no change";
9187 break;
9188 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9189 reason_str = "hide";
9190 break;
9191 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9192 reason_str = "unhide";
9193 break;
9194 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9195 reason_str = "volume_created";
9196 break;
9197 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9198 reason_str = "volume_deleted";
9199 break;
9200 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9201 reason_str = "pd_created";
9202 break;
9203 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9204 reason_str = "pd_deleted";
9205 break;
9206 default:
9207 reason_str = "unknown reason";
9208 break;
9209 }
9210 element_type = le16_to_cpu(element->ElementFlags) &
9211 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9212 switch (element_type) {
9213 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9214 element_str = "volume";
9215 break;
9216 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9217 element_str = "phys disk";
9218 break;
9219 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9220 element_str = "hot spare";
9221 break;
9222 default:
9223 element_str = "unknown element";
9224 break;
9225 }
9226 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9227 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9228 reason_str, le16_to_cpu(element->VolDevHandle),
9229 le16_to_cpu(element->PhysDiskDevHandle),
9230 element->PhysDiskNum);
9231 }
9232}
9233
9234
9235
9236
9237
9238
9239
9240static void
9241_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9242 struct fw_event_work *fw_event)
9243{
9244 Mpi2EventIrConfigElement_t *element;
9245 int i;
9246 u8 foreign_config;
9247 Mpi2EventDataIrConfigChangeList_t *event_data =
9248 (Mpi2EventDataIrConfigChangeList_t *)
9249 fw_event->event_data;
9250
9251 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9252 (!ioc->hide_ir_msg))
9253 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9254
9255 foreign_config = (le32_to_cpu(event_data->Flags) &
9256 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9257
9258 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9259 if (ioc->shost_recovery &&
9260 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9261 for (i = 0; i < event_data->NumElements; i++, element++) {
9262 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9263 _scsih_ir_fastpath(ioc,
9264 le16_to_cpu(element->PhysDiskDevHandle),
9265 element->PhysDiskNum);
9266 }
9267 return;
9268 }
9269
9270 for (i = 0; i < event_data->NumElements; i++, element++) {
9271
9272 switch (element->ReasonCode) {
9273 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9274 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9275 if (!foreign_config)
9276 _scsih_sas_volume_add(ioc, element);
9277 break;
9278 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9279 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9280 if (!foreign_config)
9281 _scsih_sas_volume_delete(ioc,
9282 le16_to_cpu(element->VolDevHandle));
9283 break;
9284 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9285 if (!ioc->is_warpdrive)
9286 _scsih_sas_pd_hide(ioc, element);
9287 break;
9288 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9289 if (!ioc->is_warpdrive)
9290 _scsih_sas_pd_expose(ioc, element);
9291 break;
9292 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9293 if (!ioc->is_warpdrive)
9294 _scsih_sas_pd_add(ioc, element);
9295 break;
9296 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9297 if (!ioc->is_warpdrive)
9298 _scsih_sas_pd_delete(ioc, element);
9299 break;
9300 }
9301 }
9302}
9303
9304
9305
9306
9307
9308
9309
9310static void
9311_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9312 struct fw_event_work *fw_event)
9313{
9314 u64 wwid;
9315 unsigned long flags;
9316 struct _raid_device *raid_device;
9317 u16 handle;
9318 u32 state;
9319 int rc;
9320 Mpi2EventDataIrVolume_t *event_data =
9321 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9322
9323 if (ioc->shost_recovery)
9324 return;
9325
9326 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9327 return;
9328
9329 handle = le16_to_cpu(event_data->VolDevHandle);
9330 state = le32_to_cpu(event_data->NewValue);
9331 if (!ioc->hide_ir_msg)
9332 dewtprintk(ioc,
9333 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9334 __func__, handle,
9335 le32_to_cpu(event_data->PreviousValue),
9336 state));
9337 switch (state) {
9338 case MPI2_RAID_VOL_STATE_MISSING:
9339 case MPI2_RAID_VOL_STATE_FAILED:
9340 _scsih_sas_volume_delete(ioc, handle);
9341 break;
9342
9343 case MPI2_RAID_VOL_STATE_ONLINE:
9344 case MPI2_RAID_VOL_STATE_DEGRADED:
9345 case MPI2_RAID_VOL_STATE_OPTIMAL:
9346
9347 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9348 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9349 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9350
9351 if (raid_device)
9352 break;
9353
9354 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9355 if (!wwid) {
9356 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9357 __FILE__, __LINE__, __func__);
9358 break;
9359 }
9360
9361 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9362 if (!raid_device) {
9363 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9364 __FILE__, __LINE__, __func__);
9365 break;
9366 }
9367
9368 raid_device->id = ioc->sas_id++;
9369 raid_device->channel = RAID_CHANNEL;
9370 raid_device->handle = handle;
9371 raid_device->wwid = wwid;
9372 _scsih_raid_device_add(ioc, raid_device);
9373 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9374 raid_device->id, 0);
9375 if (rc)
9376 _scsih_raid_device_remove(ioc, raid_device);
9377 break;
9378
9379 case MPI2_RAID_VOL_STATE_INITIALIZING:
9380 default:
9381 break;
9382 }
9383}
9384
9385
9386
9387
9388
9389
9390
9391static void
9392_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9393 struct fw_event_work *fw_event)
9394{
9395 u16 handle, parent_handle;
9396 u32 state;
9397 struct _sas_device *sas_device;
9398 Mpi2ConfigReply_t mpi_reply;
9399 Mpi2SasDevicePage0_t sas_device_pg0;
9400 u32 ioc_status;
9401 Mpi2EventDataIrPhysicalDisk_t *event_data =
9402 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9403 u64 sas_address;
9404
9405 if (ioc->shost_recovery)
9406 return;
9407
9408 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9409 return;
9410
9411 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9412 state = le32_to_cpu(event_data->NewValue);
9413
9414 if (!ioc->hide_ir_msg)
9415 dewtprintk(ioc,
9416 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9417 __func__, handle,
9418 le32_to_cpu(event_data->PreviousValue),
9419 state));
9420
9421 switch (state) {
9422 case MPI2_RAID_PD_STATE_ONLINE:
9423 case MPI2_RAID_PD_STATE_DEGRADED:
9424 case MPI2_RAID_PD_STATE_REBUILDING:
9425 case MPI2_RAID_PD_STATE_OPTIMAL:
9426 case MPI2_RAID_PD_STATE_HOT_SPARE:
9427
9428 if (!ioc->is_warpdrive)
9429 set_bit(handle, ioc->pd_handles);
9430
9431 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9432 if (sas_device) {
9433 sas_device_put(sas_device);
9434 return;
9435 }
9436
9437 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9438 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9439 handle))) {
9440 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9441 __FILE__, __LINE__, __func__);
9442 return;
9443 }
9444
9445 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9446 MPI2_IOCSTATUS_MASK;
9447 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9448 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9449 __FILE__, __LINE__, __func__);
9450 return;
9451 }
9452
9453 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9454 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9455 mpt3sas_transport_update_links(ioc, sas_address, handle,
9456 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9457 mpt3sas_get_port_by_id(ioc,
9458 sas_device_pg0.PhysicalPort, 0));
9459
9460 _scsih_add_device(ioc, handle, 0, 1);
9461
9462 break;
9463
9464 case MPI2_RAID_PD_STATE_OFFLINE:
9465 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9466 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9467 default:
9468 break;
9469 }
9470}
9471
9472
9473
9474
9475
9476
9477
9478static void
9479_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9480 Mpi2EventDataIrOperationStatus_t *event_data)
9481{
9482 char *reason_str = NULL;
9483
9484 switch (event_data->RAIDOperation) {
9485 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9486 reason_str = "resync";
9487 break;
9488 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9489 reason_str = "online capacity expansion";
9490 break;
9491 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9492 reason_str = "consistency check";
9493 break;
9494 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9495 reason_str = "background init";
9496 break;
9497 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9498 reason_str = "make data consistent";
9499 break;
9500 }
9501
9502 if (!reason_str)
9503 return;
9504
9505 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9506 reason_str,
9507 le16_to_cpu(event_data->VolDevHandle),
9508 event_data->PercentComplete);
9509}
9510
9511
9512
9513
9514
9515
9516
9517static void
9518_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9519 struct fw_event_work *fw_event)
9520{
9521 Mpi2EventDataIrOperationStatus_t *event_data =
9522 (Mpi2EventDataIrOperationStatus_t *)
9523 fw_event->event_data;
9524 static struct _raid_device *raid_device;
9525 unsigned long flags;
9526 u16 handle;
9527
9528 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9529 (!ioc->hide_ir_msg))
9530 _scsih_sas_ir_operation_status_event_debug(ioc,
9531 event_data);
9532
9533
9534 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9535
9536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9537 handle = le16_to_cpu(event_data->VolDevHandle);
9538 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9539 if (raid_device)
9540 raid_device->percent_complete =
9541 event_data->PercentComplete;
9542 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9543 }
9544}
9545
9546
9547
9548
9549
9550
9551
9552
9553static void
9554_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9555{
9556 struct MPT3SAS_DEVICE *sas_device_priv_data;
9557 struct scsi_device *sdev;
9558
9559 shost_for_each_device(sdev, ioc->shost) {
9560 sas_device_priv_data = sdev->hostdata;
9561 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9562 sas_device_priv_data->sas_target->deleted = 1;
9563 }
9564}
9565
9566
9567
9568
9569
9570
9571
9572
9573
9574static void
9575_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9576Mpi2SasDevicePage0_t *sas_device_pg0)
9577{
9578 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9579 struct scsi_target *starget;
9580 struct _sas_device *sas_device = NULL;
9581 struct _enclosure_node *enclosure_dev = NULL;
9582 unsigned long flags;
9583 struct hba_port *port = mpt3sas_get_port_by_id(
9584 ioc, sas_device_pg0->PhysicalPort, 0);
9585
9586 if (sas_device_pg0->EnclosureHandle) {
9587 enclosure_dev =
9588 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9589 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9590 if (enclosure_dev == NULL)
9591 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9592 sas_device_pg0->EnclosureHandle);
9593 }
9594 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9595 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9596 if (sas_device->sas_address != le64_to_cpu(
9597 sas_device_pg0->SASAddress))
9598 continue;
9599 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9600 continue;
9601 if (sas_device->port != port)
9602 continue;
9603 sas_device->responding = 1;
9604 starget = sas_device->starget;
9605 if (starget && starget->hostdata) {
9606 sas_target_priv_data = starget->hostdata;
9607 sas_target_priv_data->tm_busy = 0;
9608 sas_target_priv_data->deleted = 0;
9609 } else
9610 sas_target_priv_data = NULL;
9611 if (starget) {
9612 starget_printk(KERN_INFO, starget,
9613 "handle(0x%04x), sas_addr(0x%016llx)\n",
9614 le16_to_cpu(sas_device_pg0->DevHandle),
9615 (unsigned long long)
9616 sas_device->sas_address);
9617
9618 if (sas_device->enclosure_handle != 0)
9619 starget_printk(KERN_INFO, starget,
9620 "enclosure logical id(0x%016llx), slot(%d)\n",
9621 (unsigned long long)
9622 sas_device->enclosure_logical_id,
9623 sas_device->slot);
9624 }
9625 if (le16_to_cpu(sas_device_pg0->Flags) &
9626 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9627 sas_device->enclosure_level =
9628 sas_device_pg0->EnclosureLevel;
9629 memcpy(&sas_device->connector_name[0],
9630 &sas_device_pg0->ConnectorName[0], 4);
9631 } else {
9632 sas_device->enclosure_level = 0;
9633 sas_device->connector_name[0] = '\0';
9634 }
9635
9636 sas_device->enclosure_handle =
9637 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9638 sas_device->is_chassis_slot_valid = 0;
9639 if (enclosure_dev) {
9640 sas_device->enclosure_logical_id = le64_to_cpu(
9641 enclosure_dev->pg0.EnclosureLogicalID);
9642 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9643 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9644 sas_device->is_chassis_slot_valid = 1;
9645 sas_device->chassis_slot =
9646 enclosure_dev->pg0.ChassisSlot;
9647 }
9648 }
9649
9650 if (sas_device->handle == le16_to_cpu(
9651 sas_device_pg0->DevHandle))
9652 goto out;
9653 pr_info("\thandle changed from(0x%04x)!!!\n",
9654 sas_device->handle);
9655 sas_device->handle = le16_to_cpu(
9656 sas_device_pg0->DevHandle);
9657 if (sas_target_priv_data)
9658 sas_target_priv_data->handle =
9659 le16_to_cpu(sas_device_pg0->DevHandle);
9660 goto out;
9661 }
9662 out:
9663 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9664}
9665
9666
9667
9668
9669
9670
9671static void
9672_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9673{
9674 struct _enclosure_node *enclosure_dev;
9675 Mpi2ConfigReply_t mpi_reply;
9676 u16 enclosure_handle;
9677 int rc;
9678
9679
9680 mpt3sas_free_enclosure_list(ioc);
9681
9682
9683 enclosure_handle = 0xFFFF;
9684 do {
9685 enclosure_dev =
9686 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9687 if (!enclosure_dev) {
9688 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9689 __FILE__, __LINE__, __func__);
9690 return;
9691 }
9692 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9693 &enclosure_dev->pg0,
9694 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9695 enclosure_handle);
9696
9697 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9698 MPI2_IOCSTATUS_MASK)) {
9699 kfree(enclosure_dev);
9700 return;
9701 }
9702 list_add_tail(&enclosure_dev->list,
9703 &ioc->enclosure_list);
9704 enclosure_handle =
9705 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9706 } while (1);
9707}
9708
9709
9710
9711
9712
9713
9714
9715
9716static void
9717_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9718{
9719 Mpi2SasDevicePage0_t sas_device_pg0;
9720 Mpi2ConfigReply_t mpi_reply;
9721 u16 ioc_status;
9722 u16 handle;
9723 u32 device_info;
9724
9725 ioc_info(ioc, "search for end-devices: start\n");
9726
9727 if (list_empty(&ioc->sas_device_list))
9728 goto out;
9729
9730 handle = 0xFFFF;
9731 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9732 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9733 handle))) {
9734 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9735 MPI2_IOCSTATUS_MASK;
9736 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9737 break;
9738 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9739 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9740 if (!(_scsih_is_end_device(device_info)))
9741 continue;
9742 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9743 }
9744
9745 out:
9746 ioc_info(ioc, "search for end-devices: complete\n");
9747}
9748
9749
9750
9751
9752
9753
9754
9755
9756
9757static void
9758_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9759 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9760{
9761 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9762 struct scsi_target *starget;
9763 struct _pcie_device *pcie_device;
9764 unsigned long flags;
9765
9766 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9767 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9768 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9769 && (pcie_device->slot == le16_to_cpu(
9770 pcie_device_pg0->Slot))) {
9771 pcie_device->access_status =
9772 pcie_device_pg0->AccessStatus;
9773 pcie_device->responding = 1;
9774 starget = pcie_device->starget;
9775 if (starget && starget->hostdata) {
9776 sas_target_priv_data = starget->hostdata;
9777 sas_target_priv_data->tm_busy = 0;
9778 sas_target_priv_data->deleted = 0;
9779 } else
9780 sas_target_priv_data = NULL;
9781 if (starget) {
9782 starget_printk(KERN_INFO, starget,
9783 "handle(0x%04x), wwid(0x%016llx) ",
9784 pcie_device->handle,
9785 (unsigned long long)pcie_device->wwid);
9786 if (pcie_device->enclosure_handle != 0)
9787 starget_printk(KERN_INFO, starget,
9788 "enclosure logical id(0x%016llx), "
9789 "slot(%d)\n",
9790 (unsigned long long)
9791 pcie_device->enclosure_logical_id,
9792 pcie_device->slot);
9793 }
9794
9795 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9796 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9797 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9798 pcie_device->enclosure_level =
9799 pcie_device_pg0->EnclosureLevel;
9800 memcpy(&pcie_device->connector_name[0],
9801 &pcie_device_pg0->ConnectorName[0], 4);
9802 } else {
9803 pcie_device->enclosure_level = 0;
9804 pcie_device->connector_name[0] = '\0';
9805 }
9806
9807 if (pcie_device->handle == le16_to_cpu(
9808 pcie_device_pg0->DevHandle))
9809 goto out;
9810 pr_info("\thandle changed from(0x%04x)!!!\n",
9811 pcie_device->handle);
9812 pcie_device->handle = le16_to_cpu(
9813 pcie_device_pg0->DevHandle);
9814 if (sas_target_priv_data)
9815 sas_target_priv_data->handle =
9816 le16_to_cpu(pcie_device_pg0->DevHandle);
9817 goto out;
9818 }
9819 }
9820
9821 out:
9822 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9823}
9824
9825
9826
9827
9828
9829
9830
9831
9832static void
9833_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9834{
9835 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9836 Mpi2ConfigReply_t mpi_reply;
9837 u16 ioc_status;
9838 u16 handle;
9839 u32 device_info;
9840
9841 ioc_info(ioc, "search for end-devices: start\n");
9842
9843 if (list_empty(&ioc->pcie_device_list))
9844 goto out;
9845
9846 handle = 0xFFFF;
9847 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9848 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9849 handle))) {
9850 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9851 MPI2_IOCSTATUS_MASK;
9852 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9853 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9854 __func__, ioc_status,
9855 le32_to_cpu(mpi_reply.IOCLogInfo));
9856 break;
9857 }
9858 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9859 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9860 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9861 continue;
9862 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9863 }
9864out:
9865 ioc_info(ioc, "search for PCIe end-devices: complete\n");
9866}
9867
9868
9869
9870
9871
9872
9873
9874
9875
9876
9877static void
9878_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9879 u16 handle)
9880{
9881 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9882 struct scsi_target *starget;
9883 struct _raid_device *raid_device;
9884 unsigned long flags;
9885
9886 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9887 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
9888 if (raid_device->wwid == wwid && raid_device->starget) {
9889 starget = raid_device->starget;
9890 if (starget && starget->hostdata) {
9891 sas_target_priv_data = starget->hostdata;
9892 sas_target_priv_data->deleted = 0;
9893 } else
9894 sas_target_priv_data = NULL;
9895 raid_device->responding = 1;
9896 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9897 starget_printk(KERN_INFO, raid_device->starget,
9898 "handle(0x%04x), wwid(0x%016llx)\n", handle,
9899 (unsigned long long)raid_device->wwid);
9900
9901
9902
9903
9904
9905
9906 mpt3sas_init_warpdrive_properties(ioc, raid_device);
9907 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9908 if (raid_device->handle == handle) {
9909 spin_unlock_irqrestore(&ioc->raid_device_lock,
9910 flags);
9911 return;
9912 }
9913 pr_info("\thandle changed from(0x%04x)!!!\n",
9914 raid_device->handle);
9915 raid_device->handle = handle;
9916 if (sas_target_priv_data)
9917 sas_target_priv_data->handle = handle;
9918 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9919 return;
9920 }
9921 }
9922 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9923}
9924
9925
9926
9927
9928
9929
9930
9931
9932static void
9933_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9934{
9935 Mpi2RaidVolPage1_t volume_pg1;
9936 Mpi2RaidVolPage0_t volume_pg0;
9937 Mpi2RaidPhysDiskPage0_t pd_pg0;
9938 Mpi2ConfigReply_t mpi_reply;
9939 u16 ioc_status;
9940 u16 handle;
9941 u8 phys_disk_num;
9942
9943 if (!ioc->ir_firmware)
9944 return;
9945
9946 ioc_info(ioc, "search for raid volumes: start\n");
9947
9948 if (list_empty(&ioc->raid_device_list))
9949 goto out;
9950
9951 handle = 0xFFFF;
9952 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9953 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9954 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9955 MPI2_IOCSTATUS_MASK;
9956 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9957 break;
9958 handle = le16_to_cpu(volume_pg1.DevHandle);
9959
9960 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9961 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9962 sizeof(Mpi2RaidVolPage0_t)))
9963 continue;
9964
9965 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9966 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9967 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9968 _scsih_mark_responding_raid_device(ioc,
9969 le64_to_cpu(volume_pg1.WWID), handle);
9970 }
9971
9972
9973 if (!ioc->is_warpdrive) {
9974 phys_disk_num = 0xFF;
9975 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9976 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9977 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9978 phys_disk_num))) {
9979 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9980 MPI2_IOCSTATUS_MASK;
9981 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9982 break;
9983 phys_disk_num = pd_pg0.PhysDiskNum;
9984 handle = le16_to_cpu(pd_pg0.DevHandle);
9985 set_bit(handle, ioc->pd_handles);
9986 }
9987 }
9988 out:
9989 ioc_info(ioc, "search for responding raid volumes: complete\n");
9990}
9991
9992
9993
9994
9995
9996
9997
9998
9999
10000static void
10001_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10002 Mpi2ExpanderPage0_t *expander_pg0)
10003{
10004 struct _sas_node *sas_expander = NULL;
10005 unsigned long flags;
10006 int i;
10007 struct _enclosure_node *enclosure_dev = NULL;
10008 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10009 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10010 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10011 struct hba_port *port = mpt3sas_get_port_by_id(
10012 ioc, expander_pg0->PhysicalPort, 0);
10013
10014 if (enclosure_handle)
10015 enclosure_dev =
10016 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10017 enclosure_handle);
10018
10019 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10020 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10021 if (sas_expander->sas_address != sas_address)
10022 continue;
10023 if (sas_expander->port != port)
10024 continue;
10025 sas_expander->responding = 1;
10026
10027 if (enclosure_dev) {
10028 sas_expander->enclosure_logical_id =
10029 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10030 sas_expander->enclosure_handle =
10031 le16_to_cpu(expander_pg0->EnclosureHandle);
10032 }
10033
10034 if (sas_expander->handle == handle)
10035 goto out;
10036 pr_info("\texpander(0x%016llx): handle changed" \
10037 " from(0x%04x) to (0x%04x)!!!\n",
10038 (unsigned long long)sas_expander->sas_address,
10039 sas_expander->handle, handle);
10040 sas_expander->handle = handle;
10041 for (i = 0 ; i < sas_expander->num_phys ; i++)
10042 sas_expander->phy[i].handle = handle;
10043 goto out;
10044 }
10045 out:
10046 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10047}
10048
10049
10050
10051
10052
10053
10054
10055
10056static void
10057_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10058{
10059 Mpi2ExpanderPage0_t expander_pg0;
10060 Mpi2ConfigReply_t mpi_reply;
10061 u16 ioc_status;
10062 u64 sas_address;
10063 u16 handle;
10064 u8 port;
10065
10066 ioc_info(ioc, "search for expanders: start\n");
10067
10068 if (list_empty(&ioc->sas_expander_list))
10069 goto out;
10070
10071 handle = 0xFFFF;
10072 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10073 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10074
10075 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10076 MPI2_IOCSTATUS_MASK;
10077 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10078 break;
10079
10080 handle = le16_to_cpu(expander_pg0.DevHandle);
10081 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10082 port = expander_pg0.PhysicalPort;
10083 pr_info(
10084 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10085 handle, (unsigned long long)sas_address,
10086 (ioc->multipath_on_hba ?
10087 port : MULTIPATH_DISABLED_PORT_ID));
10088 _scsih_mark_responding_expander(ioc, &expander_pg0);
10089 }
10090
10091 out:
10092 ioc_info(ioc, "search for expanders: complete\n");
10093}
10094
10095
10096
10097
10098
10099static void
10100_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10101{
10102 struct _sas_device *sas_device, *sas_device_next;
10103 struct _sas_node *sas_expander, *sas_expander_next;
10104 struct _raid_device *raid_device, *raid_device_next;
10105 struct _pcie_device *pcie_device, *pcie_device_next;
10106 struct list_head tmp_list;
10107 unsigned long flags;
10108 LIST_HEAD(head);
10109
10110 ioc_info(ioc, "removing unresponding devices: start\n");
10111
10112
10113 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10114
10115
10116
10117
10118 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10119 list_for_each_entry_safe(sas_device, sas_device_next,
10120 &ioc->sas_device_list, list) {
10121 if (!sas_device->responding)
10122 list_move_tail(&sas_device->list, &head);
10123 else
10124 sas_device->responding = 0;
10125 }
10126 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10127
10128
10129
10130
10131 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10132 _scsih_remove_device(ioc, sas_device);
10133 list_del_init(&sas_device->list);
10134 sas_device_put(sas_device);
10135 }
10136
10137 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10138 INIT_LIST_HEAD(&head);
10139 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10140 list_for_each_entry_safe(pcie_device, pcie_device_next,
10141 &ioc->pcie_device_list, list) {
10142 if (!pcie_device->responding)
10143 list_move_tail(&pcie_device->list, &head);
10144 else
10145 pcie_device->responding = 0;
10146 }
10147 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10148
10149 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10150 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10151 list_del_init(&pcie_device->list);
10152 pcie_device_put(pcie_device);
10153 }
10154
10155
10156 if (ioc->ir_firmware) {
10157 ioc_info(ioc, "removing unresponding devices: volumes\n");
10158 list_for_each_entry_safe(raid_device, raid_device_next,
10159 &ioc->raid_device_list, list) {
10160 if (!raid_device->responding)
10161 _scsih_sas_volume_delete(ioc,
10162 raid_device->handle);
10163 else
10164 raid_device->responding = 0;
10165 }
10166 }
10167
10168
10169 ioc_info(ioc, "removing unresponding devices: expanders\n");
10170 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10171 INIT_LIST_HEAD(&tmp_list);
10172 list_for_each_entry_safe(sas_expander, sas_expander_next,
10173 &ioc->sas_expander_list, list) {
10174 if (!sas_expander->responding)
10175 list_move_tail(&sas_expander->list, &tmp_list);
10176 else
10177 sas_expander->responding = 0;
10178 }
10179 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10180 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10181 list) {
10182 _scsih_expander_node_remove(ioc, sas_expander);
10183 }
10184
10185 ioc_info(ioc, "removing unresponding devices: complete\n");
10186
10187
10188 _scsih_ublock_io_all_device(ioc);
10189}
10190
10191static void
10192_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10193 struct _sas_node *sas_expander, u16 handle)
10194{
10195 Mpi2ExpanderPage1_t expander_pg1;
10196 Mpi2ConfigReply_t mpi_reply;
10197 int i;
10198
10199 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10200 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10201 &expander_pg1, i, handle))) {
10202 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10203 __FILE__, __LINE__, __func__);
10204 return;
10205 }
10206
10207 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10208 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10209 expander_pg1.NegotiatedLinkRate >> 4,
10210 sas_expander->port);
10211 }
10212}
10213
10214
10215
10216
10217
10218static void
10219_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10220{
10221 Mpi2ExpanderPage0_t expander_pg0;
10222 Mpi2SasDevicePage0_t sas_device_pg0;
10223 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10224 Mpi2RaidVolPage1_t *volume_pg1;
10225 Mpi2RaidVolPage0_t *volume_pg0;
10226 Mpi2RaidPhysDiskPage0_t pd_pg0;
10227 Mpi2EventIrConfigElement_t element;
10228 Mpi2ConfigReply_t mpi_reply;
10229 u8 phys_disk_num, port_id;
10230 u16 ioc_status;
10231 u16 handle, parent_handle;
10232 u64 sas_address;
10233 struct _sas_device *sas_device;
10234 struct _pcie_device *pcie_device;
10235 struct _sas_node *expander_device;
10236 static struct _raid_device *raid_device;
10237 u8 retry_count;
10238 unsigned long flags;
10239
10240 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
10241 if (!volume_pg0)
10242 return;
10243
10244 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
10245 if (!volume_pg1) {
10246 kfree(volume_pg0);
10247 return;
10248 }
10249
10250 ioc_info(ioc, "scan devices: start\n");
10251
10252 _scsih_sas_host_refresh(ioc);
10253
10254 ioc_info(ioc, "\tscan devices: expanders start\n");
10255
10256
10257 handle = 0xFFFF;
10258 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10259 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10260 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10261 MPI2_IOCSTATUS_MASK;
10262 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10263 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10264 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10265 break;
10266 }
10267 handle = le16_to_cpu(expander_pg0.DevHandle);
10268 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10269 port_id = expander_pg0.PhysicalPort;
10270 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10271 ioc, le64_to_cpu(expander_pg0.SASAddress),
10272 mpt3sas_get_port_by_id(ioc, port_id, 0));
10273 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10274 if (expander_device)
10275 _scsih_refresh_expander_links(ioc, expander_device,
10276 handle);
10277 else {
10278 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10279 handle,
10280 (u64)le64_to_cpu(expander_pg0.SASAddress));
10281 _scsih_expander_add(ioc, handle);
10282 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10283 handle,
10284 (u64)le64_to_cpu(expander_pg0.SASAddress));
10285 }
10286 }
10287
10288 ioc_info(ioc, "\tscan devices: expanders complete\n");
10289
10290 if (!ioc->ir_firmware)
10291 goto skip_to_sas;
10292
10293 ioc_info(ioc, "\tscan devices: phys disk start\n");
10294
10295
10296 phys_disk_num = 0xFF;
10297 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10298 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10299 phys_disk_num))) {
10300 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10301 MPI2_IOCSTATUS_MASK;
10302 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10303 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10304 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10305 break;
10306 }
10307 phys_disk_num = pd_pg0.PhysDiskNum;
10308 handle = le16_to_cpu(pd_pg0.DevHandle);
10309 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10310 if (sas_device) {
10311 sas_device_put(sas_device);
10312 continue;
10313 }
10314 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10315 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10316 handle) != 0)
10317 continue;
10318 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10319 MPI2_IOCSTATUS_MASK;
10320 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10321 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10322 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10323 break;
10324 }
10325 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10326 if (!_scsih_get_sas_address(ioc, parent_handle,
10327 &sas_address)) {
10328 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10329 handle,
10330 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10331 port_id = sas_device_pg0.PhysicalPort;
10332 mpt3sas_transport_update_links(ioc, sas_address,
10333 handle, sas_device_pg0.PhyNum,
10334 MPI2_SAS_NEG_LINK_RATE_1_5,
10335 mpt3sas_get_port_by_id(ioc, port_id, 0));
10336 set_bit(handle, ioc->pd_handles);
10337 retry_count = 0;
10338
10339
10340
10341
10342 while (_scsih_add_device(ioc, handle, retry_count++,
10343 1)) {
10344 ssleep(1);
10345 }
10346 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10347 handle,
10348 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10349 }
10350 }
10351
10352 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10353
10354 ioc_info(ioc, "\tscan devices: volumes start\n");
10355
10356
10357 handle = 0xFFFF;
10358 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10359 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10360 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10361 MPI2_IOCSTATUS_MASK;
10362 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10363 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10364 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10365 break;
10366 }
10367 handle = le16_to_cpu(volume_pg1->DevHandle);
10368 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10369 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10370 le64_to_cpu(volume_pg1->WWID));
10371 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10372 if (raid_device)
10373 continue;
10374 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10375 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10376 sizeof(Mpi2RaidVolPage0_t)))
10377 continue;
10378 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10379 MPI2_IOCSTATUS_MASK;
10380 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10381 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10382 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10383 break;
10384 }
10385 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10386 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10387 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10388 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10389 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10390 element.VolDevHandle = volume_pg1->DevHandle;
10391 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10392 volume_pg1->DevHandle);
10393 _scsih_sas_volume_add(ioc, &element);
10394 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10395 volume_pg1->DevHandle);
10396 }
10397 }
10398
10399 ioc_info(ioc, "\tscan devices: volumes complete\n");
10400
10401 skip_to_sas:
10402
10403 ioc_info(ioc, "\tscan devices: end devices start\n");
10404
10405
10406 handle = 0xFFFF;
10407 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10408 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10409 handle))) {
10410 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10411 MPI2_IOCSTATUS_MASK;
10412 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10413 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10414 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10415 break;
10416 }
10417 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10418 if (!(_scsih_is_end_device(
10419 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10420 continue;
10421 port_id = sas_device_pg0.PhysicalPort;
10422 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10423 le64_to_cpu(sas_device_pg0.SASAddress),
10424 mpt3sas_get_port_by_id(ioc, port_id, 0));
10425 if (sas_device) {
10426 sas_device_put(sas_device);
10427 continue;
10428 }
10429 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10430 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10431 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10432 handle,
10433 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10434 mpt3sas_transport_update_links(ioc, sas_address, handle,
10435 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10436 mpt3sas_get_port_by_id(ioc, port_id, 0));
10437 retry_count = 0;
10438
10439
10440
10441
10442 while (_scsih_add_device(ioc, handle, retry_count++,
10443 0)) {
10444 ssleep(1);
10445 }
10446 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10447 handle,
10448 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10449 }
10450 }
10451 ioc_info(ioc, "\tscan devices: end devices complete\n");
10452 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10453
10454
10455 handle = 0xFFFF;
10456 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10457 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10458 handle))) {
10459 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10460 & MPI2_IOCSTATUS_MASK;
10461 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10462 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10463 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10464 break;
10465 }
10466 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10467 if (!(_scsih_is_nvme_pciescsi_device(
10468 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10469 continue;
10470 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10471 le64_to_cpu(pcie_device_pg0.WWID));
10472 if (pcie_device) {
10473 pcie_device_put(pcie_device);
10474 continue;
10475 }
10476 retry_count = 0;
10477 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10478 _scsih_pcie_add_device(ioc, handle);
10479
10480 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10481 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10482 }
10483
10484 kfree(volume_pg0);
10485 kfree(volume_pg1);
10486
10487 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10488 ioc_info(ioc, "scan devices: complete\n");
10489}
10490
10491
10492
10493
10494
10495
10496
10497void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10498{
10499 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10500}
10501
10502
10503
10504
10505
10506
10507
10508
10509void
10510mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10511{
10512 dtmprintk(ioc,
10513 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10514 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10515 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10516 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10517 complete(&ioc->scsih_cmds.done);
10518 }
10519 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10520 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10521 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10522 complete(&ioc->tm_cmds.done);
10523 }
10524
10525 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10526 memset(ioc->device_remove_in_progress, 0,
10527 ioc->device_remove_in_progress_sz);
10528 _scsih_fw_event_cleanup_queue(ioc);
10529 _scsih_flush_running_cmds(ioc);
10530}
10531
10532
10533
10534
10535
10536
10537
10538void
10539mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10540{
10541 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10542 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
10543 !ioc->sas_hba.num_phys)) {
10544 if (ioc->multipath_on_hba) {
10545 _scsih_sas_port_refresh(ioc);
10546 _scsih_update_vphys_after_reset(ioc);
10547 }
10548 _scsih_prep_device_scan(ioc);
10549 _scsih_create_enclosure_list_after_reset(ioc);
10550 _scsih_search_responding_sas_devices(ioc);
10551 _scsih_search_responding_pcie_devices(ioc);
10552 _scsih_search_responding_raid_devices(ioc);
10553 _scsih_search_responding_expanders(ioc);
10554 _scsih_error_recovery_delete_devices(ioc);
10555 }
10556}
10557
10558
10559
10560
10561
10562
10563
10564static void
10565_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10566{
10567 ioc->current_event = fw_event;
10568 _scsih_fw_event_del_from_list(ioc, fw_event);
10569
10570
10571 if (ioc->remove_host || ioc->pci_error_recovery) {
10572 fw_event_work_put(fw_event);
10573 ioc->current_event = NULL;
10574 return;
10575 }
10576
10577 switch (fw_event->event) {
10578 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10579 mpt3sas_process_trigger_data(ioc,
10580 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10581 fw_event->event_data);
10582 break;
10583 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10584 while (scsi_host_in_recovery(ioc->shost) ||
10585 ioc->shost_recovery) {
10586
10587
10588
10589
10590 if (ioc->remove_host || ioc->fw_events_cleanup)
10591 goto out;
10592 ssleep(1);
10593 }
10594 _scsih_remove_unresponding_devices(ioc);
10595 _scsih_del_dirty_vphy(ioc);
10596 _scsih_del_dirty_port_entries(ioc);
10597 _scsih_scan_for_devices_after_reset(ioc);
10598 _scsih_set_nvme_max_shutdown_latency(ioc);
10599 break;
10600 case MPT3SAS_PORT_ENABLE_COMPLETE:
10601 ioc->start_scan = 0;
10602 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10603 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10604 missing_delay[1]);
10605 dewtprintk(ioc,
10606 ioc_info(ioc, "port enable: complete from worker thread\n"));
10607 break;
10608 case MPT3SAS_TURN_ON_PFA_LED:
10609 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10610 break;
10611 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10612 _scsih_sas_topology_change_event(ioc, fw_event);
10613 break;
10614 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10615 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10616 _scsih_sas_device_status_change_event_debug(ioc,
10617 (Mpi2EventDataSasDeviceStatusChange_t *)
10618 fw_event->event_data);
10619 break;
10620 case MPI2_EVENT_SAS_DISCOVERY:
10621 _scsih_sas_discovery_event(ioc, fw_event);
10622 break;
10623 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10624 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10625 break;
10626 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10627 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10628 break;
10629 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10630 _scsih_sas_enclosure_dev_status_change_event(ioc,
10631 fw_event);
10632 break;
10633 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10634 _scsih_sas_ir_config_change_event(ioc, fw_event);
10635 break;
10636 case MPI2_EVENT_IR_VOLUME:
10637 _scsih_sas_ir_volume_event(ioc, fw_event);
10638 break;
10639 case MPI2_EVENT_IR_PHYSICAL_DISK:
10640 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10641 break;
10642 case MPI2_EVENT_IR_OPERATION_STATUS:
10643 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10644 break;
10645 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10646 _scsih_pcie_device_status_change_event(ioc, fw_event);
10647 break;
10648 case MPI2_EVENT_PCIE_ENUMERATION:
10649 _scsih_pcie_enumeration_event(ioc, fw_event);
10650 break;
10651 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10652 _scsih_pcie_topology_change_event(ioc, fw_event);
10653 ioc->current_event = NULL;
10654 return;
10655 break;
10656 }
10657out:
10658 fw_event_work_put(fw_event);
10659 ioc->current_event = NULL;
10660}
10661
10662
10663
10664
10665
10666
10667
10668
10669
10670static void
10671_firmware_event_work(struct work_struct *work)
10672{
10673 struct fw_event_work *fw_event = container_of(work,
10674 struct fw_event_work, work);
10675
10676 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10677}
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690
10691
10692u8
10693mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10694 u32 reply)
10695{
10696 struct fw_event_work *fw_event;
10697 Mpi2EventNotificationReply_t *mpi_reply;
10698 u16 event;
10699 u16 sz;
10700 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10701
10702
10703 if (ioc->pci_error_recovery)
10704 return 1;
10705
10706 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10707
10708 if (unlikely(!mpi_reply)) {
10709 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10710 __FILE__, __LINE__, __func__);
10711 return 1;
10712 }
10713
10714 event = le16_to_cpu(mpi_reply->Event);
10715
10716 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10717 mpt3sas_trigger_event(ioc, event, 0);
10718
10719 switch (event) {
10720
10721 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10722 {
10723 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10724 (Mpi2EventDataSasBroadcastPrimitive_t *)
10725 mpi_reply->EventData;
10726
10727 if (baen_data->Primitive !=
10728 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10729 return 1;
10730
10731 if (ioc->broadcast_aen_busy) {
10732 ioc->broadcast_aen_pending++;
10733 return 1;
10734 } else
10735 ioc->broadcast_aen_busy = 1;
10736 break;
10737 }
10738
10739 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10740 _scsih_check_topo_delete_events(ioc,
10741 (Mpi2EventDataSasTopologyChangeList_t *)
10742 mpi_reply->EventData);
10743 break;
10744 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10745 _scsih_check_pcie_topo_remove_events(ioc,
10746 (Mpi26EventDataPCIeTopologyChangeList_t *)
10747 mpi_reply->EventData);
10748 break;
10749 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10750 _scsih_check_ir_config_unhide_events(ioc,
10751 (Mpi2EventDataIrConfigChangeList_t *)
10752 mpi_reply->EventData);
10753 break;
10754 case MPI2_EVENT_IR_VOLUME:
10755 _scsih_check_volume_delete_events(ioc,
10756 (Mpi2EventDataIrVolume_t *)
10757 mpi_reply->EventData);
10758 break;
10759 case MPI2_EVENT_LOG_ENTRY_ADDED:
10760 {
10761 Mpi2EventDataLogEntryAdded_t *log_entry;
10762 u32 *log_code;
10763
10764 if (!ioc->is_warpdrive)
10765 break;
10766
10767 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10768 mpi_reply->EventData;
10769 log_code = (u32 *)log_entry->LogData;
10770
10771 if (le16_to_cpu(log_entry->LogEntryQualifier)
10772 != MPT2_WARPDRIVE_LOGENTRY)
10773 break;
10774
10775 switch (le32_to_cpu(*log_code)) {
10776 case MPT2_WARPDRIVE_LC_SSDT:
10777 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10778 break;
10779 case MPT2_WARPDRIVE_LC_SSDLW:
10780 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10781 break;
10782 case MPT2_WARPDRIVE_LC_SSDLF:
10783 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10784 break;
10785 case MPT2_WARPDRIVE_LC_BRMF:
10786 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10787 break;
10788 }
10789
10790 break;
10791 }
10792 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10793 _scsih_sas_device_status_change_event(ioc,
10794 (Mpi2EventDataSasDeviceStatusChange_t *)
10795 mpi_reply->EventData);
10796 break;
10797 case MPI2_EVENT_IR_OPERATION_STATUS:
10798 case MPI2_EVENT_SAS_DISCOVERY:
10799 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10800 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10801 case MPI2_EVENT_IR_PHYSICAL_DISK:
10802 case MPI2_EVENT_PCIE_ENUMERATION:
10803 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10804 break;
10805
10806 case MPI2_EVENT_TEMP_THRESHOLD:
10807 _scsih_temp_threshold_events(ioc,
10808 (Mpi2EventDataTemperature_t *)
10809 mpi_reply->EventData);
10810 break;
10811 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10812 ActiveCableEventData =
10813 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10814 switch (ActiveCableEventData->ReasonCode) {
10815 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10816 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10817 ActiveCableEventData->ReceptacleID);
10818 pr_notice("cannot be powered and devices connected\n");
10819 pr_notice("to this active cable will not be seen\n");
10820 pr_notice("This active cable requires %d mW of power\n",
10821 le32_to_cpu(
10822 ActiveCableEventData->ActiveCablePowerRequirement));
10823 break;
10824
10825 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10826 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10827 ActiveCableEventData->ReceptacleID);
10828 pr_notice(
10829 "is not running at optimal speed(12 Gb/s rate)\n");
10830 break;
10831 }
10832
10833 break;
10834
10835 default:
10836 return 1;
10837 }
10838
10839 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10840 fw_event = alloc_fw_event_work(sz);
10841 if (!fw_event) {
10842 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10843 __FILE__, __LINE__, __func__);
10844 return 1;
10845 }
10846
10847 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
10848 fw_event->ioc = ioc;
10849 fw_event->VF_ID = mpi_reply->VF_ID;
10850 fw_event->VP_ID = mpi_reply->VP_ID;
10851 fw_event->event = event;
10852 _scsih_fw_event_add(ioc, fw_event);
10853 fw_event_work_put(fw_event);
10854 return 1;
10855}
10856
10857
10858
10859
10860
10861
10862
10863
10864
10865static void
10866_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
10867 struct _sas_node *sas_expander)
10868{
10869 struct _sas_port *mpt3sas_port, *next;
10870 unsigned long flags;
10871
10872
10873 list_for_each_entry_safe(mpt3sas_port, next,
10874 &sas_expander->sas_port_list, port_list) {
10875 if (ioc->shost_recovery)
10876 return;
10877 if (mpt3sas_port->remote_identify.device_type ==
10878 SAS_END_DEVICE)
10879 mpt3sas_device_remove_by_sas_address(ioc,
10880 mpt3sas_port->remote_identify.sas_address,
10881 mpt3sas_port->hba_port);
10882 else if (mpt3sas_port->remote_identify.device_type ==
10883 SAS_EDGE_EXPANDER_DEVICE ||
10884 mpt3sas_port->remote_identify.device_type ==
10885 SAS_FANOUT_EXPANDER_DEVICE)
10886 mpt3sas_expander_remove(ioc,
10887 mpt3sas_port->remote_identify.sas_address,
10888 mpt3sas_port->hba_port);
10889 }
10890
10891 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
10892 sas_expander->sas_address_parent, sas_expander->port);
10893
10894 ioc_info(ioc,
10895 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10896 sas_expander->handle, (unsigned long long)
10897 sas_expander->sas_address,
10898 sas_expander->port->port_id);
10899
10900 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10901 list_del(&sas_expander->list);
10902 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10903
10904 kfree(sas_expander->phy);
10905 kfree(sas_expander);
10906}
10907
10908
10909
10910
10911
10912
10913
10914
10915
10916static void
10917_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
10918{
10919 Mpi26IoUnitControlRequest_t *mpi_request;
10920 Mpi26IoUnitControlReply_t *mpi_reply;
10921 u16 smid;
10922
10923
10924 if (list_empty(&ioc->pcie_device_list))
10925 return;
10926
10927 mutex_lock(&ioc->scsih_cmds.mutex);
10928
10929 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10930 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10931 goto out;
10932 }
10933
10934 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10935
10936 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10937 if (!smid) {
10938 ioc_err(ioc,
10939 "%s: failed obtaining a smid\n", __func__);
10940 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10941 goto out;
10942 }
10943
10944 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10945 ioc->scsih_cmds.smid = smid;
10946 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
10947 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
10948 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
10949
10950 init_completion(&ioc->scsih_cmds.done);
10951 ioc->put_smid_default(ioc, smid);
10952
10953 ioc_info(ioc,
10954 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10955 ioc->max_shutdown_latency);
10956 wait_for_completion_timeout(&ioc->scsih_cmds.done,
10957 ioc->max_shutdown_latency*HZ);
10958
10959 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10960 ioc_err(ioc, "%s: timeout\n", __func__);
10961 goto out;
10962 }
10963
10964 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10965 mpi_reply = ioc->scsih_cmds.reply;
10966 ioc_info(ioc, "Io Unit Control shutdown (complete):"
10967 "ioc_status(0x%04x), loginfo(0x%08x)\n",
10968 le16_to_cpu(mpi_reply->IOCStatus),
10969 le32_to_cpu(mpi_reply->IOCLogInfo));
10970 }
10971 out:
10972 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10973 mutex_unlock(&ioc->scsih_cmds.mutex);
10974}
10975
10976
10977
10978
10979
10980
10981
10982
10983
10984static void
10985_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10986{
10987 Mpi2RaidActionRequest_t *mpi_request;
10988 Mpi2RaidActionReply_t *mpi_reply;
10989 u16 smid;
10990
10991
10992 if (!ioc->ir_firmware)
10993 return;
10994
10995
10996 if (list_empty(&ioc->raid_device_list))
10997 return;
10998
10999 mutex_lock(&ioc->scsih_cmds.mutex);
11000
11001 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11002 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11003 goto out;
11004 }
11005 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11006
11007 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11008 if (!smid) {
11009 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11010 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11011 goto out;
11012 }
11013
11014 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11015 ioc->scsih_cmds.smid = smid;
11016 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11017
11018 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11019 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11020
11021 if (!ioc->hide_ir_msg)
11022 ioc_info(ioc, "IR shutdown (sending)\n");
11023 init_completion(&ioc->scsih_cmds.done);
11024 ioc->put_smid_default(ioc, smid);
11025 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11026
11027 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11028 ioc_err(ioc, "%s: timeout\n", __func__);
11029 goto out;
11030 }
11031
11032 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11033 mpi_reply = ioc->scsih_cmds.reply;
11034 if (!ioc->hide_ir_msg)
11035 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11036 le16_to_cpu(mpi_reply->IOCStatus),
11037 le32_to_cpu(mpi_reply->IOCLogInfo));
11038 }
11039
11040 out:
11041 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11042 mutex_unlock(&ioc->scsih_cmds.mutex);
11043}
11044
11045
11046
11047
11048
11049
11050
11051
11052
11053
11054static int
11055_scsih_get_shost_and_ioc(struct pci_dev *pdev,
11056 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11057{
11058 *shost = pci_get_drvdata(pdev);
11059 if (*shost == NULL) {
11060 dev_err(&pdev->dev, "pdev's driver data is null\n");
11061 return -ENXIO;
11062 }
11063
11064 *ioc = shost_priv(*shost);
11065 if (*ioc == NULL) {
11066 dev_err(&pdev->dev, "shost's private data is null\n");
11067 return -ENXIO;
11068 }
11069
11070 return 0;
11071}
11072
11073
11074
11075
11076
11077
11078
11079static void scsih_remove(struct pci_dev *pdev)
11080{
11081 struct Scsi_Host *shost;
11082 struct MPT3SAS_ADAPTER *ioc;
11083 struct _sas_port *mpt3sas_port, *next_port;
11084 struct _raid_device *raid_device, *next;
11085 struct MPT3SAS_TARGET *sas_target_priv_data;
11086 struct _pcie_device *pcie_device, *pcienext;
11087 struct workqueue_struct *wq;
11088 unsigned long flags;
11089 Mpi2ConfigReply_t mpi_reply;
11090 struct hba_port *port, *port_next;
11091
11092 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11093 return;
11094
11095 ioc->remove_host = 1;
11096
11097 if (!pci_device_is_present(pdev))
11098 _scsih_flush_running_cmds(ioc);
11099
11100 _scsih_fw_event_cleanup_queue(ioc);
11101
11102 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11103 wq = ioc->firmware_event_thread;
11104 ioc->firmware_event_thread = NULL;
11105 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11106 if (wq)
11107 destroy_workqueue(wq);
11108
11109
11110
11111
11112 if (ioc->is_aero_ioc)
11113 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11114 &ioc->ioc_pg1_copy);
11115
11116 _scsih_ir_shutdown(ioc);
11117 mpt3sas_destroy_debugfs(ioc);
11118 sas_remove_host(shost);
11119 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11120 list) {
11121 if (raid_device->starget) {
11122 sas_target_priv_data =
11123 raid_device->starget->hostdata;
11124 sas_target_priv_data->deleted = 1;
11125 scsi_remove_target(&raid_device->starget->dev);
11126 }
11127 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11128 raid_device->handle, (u64)raid_device->wwid);
11129 _scsih_raid_device_remove(ioc, raid_device);
11130 }
11131 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11132 list) {
11133 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11134 list_del_init(&pcie_device->list);
11135 pcie_device_put(pcie_device);
11136 }
11137
11138
11139 list_for_each_entry_safe(mpt3sas_port, next_port,
11140 &ioc->sas_hba.sas_port_list, port_list) {
11141 if (mpt3sas_port->remote_identify.device_type ==
11142 SAS_END_DEVICE)
11143 mpt3sas_device_remove_by_sas_address(ioc,
11144 mpt3sas_port->remote_identify.sas_address,
11145 mpt3sas_port->hba_port);
11146 else if (mpt3sas_port->remote_identify.device_type ==
11147 SAS_EDGE_EXPANDER_DEVICE ||
11148 mpt3sas_port->remote_identify.device_type ==
11149 SAS_FANOUT_EXPANDER_DEVICE)
11150 mpt3sas_expander_remove(ioc,
11151 mpt3sas_port->remote_identify.sas_address,
11152 mpt3sas_port->hba_port);
11153 }
11154
11155 list_for_each_entry_safe(port, port_next,
11156 &ioc->port_table_list, list) {
11157 list_del(&port->list);
11158 kfree(port);
11159 }
11160
11161
11162 if (ioc->sas_hba.num_phys) {
11163 kfree(ioc->sas_hba.phy);
11164 ioc->sas_hba.phy = NULL;
11165 ioc->sas_hba.num_phys = 0;
11166 }
11167
11168 mpt3sas_base_detach(ioc);
11169 spin_lock(&gioc_lock);
11170 list_del(&ioc->list);
11171 spin_unlock(&gioc_lock);
11172 scsi_host_put(shost);
11173}
11174
11175
11176
11177
11178
11179static void
11180scsih_shutdown(struct pci_dev *pdev)
11181{
11182 struct Scsi_Host *shost;
11183 struct MPT3SAS_ADAPTER *ioc;
11184 struct workqueue_struct *wq;
11185 unsigned long flags;
11186 Mpi2ConfigReply_t mpi_reply;
11187
11188 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11189 return;
11190
11191 ioc->remove_host = 1;
11192
11193 if (!pci_device_is_present(pdev))
11194 _scsih_flush_running_cmds(ioc);
11195
11196 _scsih_fw_event_cleanup_queue(ioc);
11197
11198 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11199 wq = ioc->firmware_event_thread;
11200 ioc->firmware_event_thread = NULL;
11201 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11202 if (wq)
11203 destroy_workqueue(wq);
11204
11205
11206
11207
11208 if (ioc->is_aero_ioc)
11209 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11210 &ioc->ioc_pg1_copy);
11211
11212 _scsih_ir_shutdown(ioc);
11213 _scsih_nvme_shutdown(ioc);
11214 mpt3sas_base_detach(ioc);
11215}
11216
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226static void
11227_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11228{
11229 u32 channel;
11230 void *device;
11231 struct _sas_device *sas_device;
11232 struct _raid_device *raid_device;
11233 struct _pcie_device *pcie_device;
11234 u16 handle;
11235 u64 sas_address_parent;
11236 u64 sas_address;
11237 unsigned long flags;
11238 int rc;
11239 int tid;
11240 struct hba_port *port;
11241
11242
11243 if (!ioc->bios_pg3.BiosVersion)
11244 return;
11245
11246 device = NULL;
11247 if (ioc->req_boot_device.device) {
11248 device = ioc->req_boot_device.device;
11249 channel = ioc->req_boot_device.channel;
11250 } else if (ioc->req_alt_boot_device.device) {
11251 device = ioc->req_alt_boot_device.device;
11252 channel = ioc->req_alt_boot_device.channel;
11253 } else if (ioc->current_boot_device.device) {
11254 device = ioc->current_boot_device.device;
11255 channel = ioc->current_boot_device.channel;
11256 }
11257
11258 if (!device)
11259 return;
11260
11261 if (channel == RAID_CHANNEL) {
11262 raid_device = device;
11263 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11264 raid_device->id, 0);
11265 if (rc)
11266 _scsih_raid_device_remove(ioc, raid_device);
11267 } else if (channel == PCIE_CHANNEL) {
11268 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11269 pcie_device = device;
11270 tid = pcie_device->id;
11271 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11272 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11273 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11274 if (rc)
11275 _scsih_pcie_device_remove(ioc, pcie_device);
11276 } else {
11277 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11278 sas_device = device;
11279 handle = sas_device->handle;
11280 sas_address_parent = sas_device->sas_address_parent;
11281 sas_address = sas_device->sas_address;
11282 port = sas_device->port;
11283 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11284 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11285
11286 if (ioc->hide_drives)
11287 return;
11288
11289 if (!port)
11290 return;
11291
11292 if (!mpt3sas_transport_port_add(ioc, handle,
11293 sas_address_parent, port)) {
11294 _scsih_sas_device_remove(ioc, sas_device);
11295 } else if (!sas_device->starget) {
11296 if (!ioc->is_driver_loading) {
11297 mpt3sas_transport_port_remove(ioc,
11298 sas_address,
11299 sas_address_parent, port);
11300 _scsih_sas_device_remove(ioc, sas_device);
11301 }
11302 }
11303 }
11304}
11305
11306
11307
11308
11309
11310
11311
11312static void
11313_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11314{
11315 struct _raid_device *raid_device, *raid_next;
11316 int rc;
11317
11318 list_for_each_entry_safe(raid_device, raid_next,
11319 &ioc->raid_device_list, list) {
11320 if (raid_device->starget)
11321 continue;
11322 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11323 raid_device->id, 0);
11324 if (rc)
11325 _scsih_raid_device_remove(ioc, raid_device);
11326 }
11327}
11328
11329static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11330{
11331 struct _sas_device *sas_device = NULL;
11332 unsigned long flags;
11333
11334 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11335 if (!list_empty(&ioc->sas_device_init_list)) {
11336 sas_device = list_first_entry(&ioc->sas_device_init_list,
11337 struct _sas_device, list);
11338 sas_device_get(sas_device);
11339 }
11340 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11341
11342 return sas_device;
11343}
11344
11345static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11346 struct _sas_device *sas_device)
11347{
11348 unsigned long flags;
11349
11350 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11351
11352
11353
11354
11355
11356
11357
11358
11359
11360 if (!list_empty(&sas_device->list)) {
11361 list_del_init(&sas_device->list);
11362 sas_device_put(sas_device);
11363 }
11364
11365 sas_device_get(sas_device);
11366 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11367
11368 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11369}
11370
11371
11372
11373
11374
11375
11376
11377static void
11378_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11379{
11380 struct _sas_device *sas_device;
11381
11382 if (ioc->hide_drives)
11383 return;
11384
11385 while ((sas_device = get_next_sas_device(ioc))) {
11386 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11387 sas_device->sas_address_parent, sas_device->port)) {
11388 _scsih_sas_device_remove(ioc, sas_device);
11389 sas_device_put(sas_device);
11390 continue;
11391 } else if (!sas_device->starget) {
11392
11393
11394
11395
11396
11397
11398 if (!ioc->is_driver_loading) {
11399 mpt3sas_transport_port_remove(ioc,
11400 sas_device->sas_address,
11401 sas_device->sas_address_parent,
11402 sas_device->port);
11403 _scsih_sas_device_remove(ioc, sas_device);
11404 sas_device_put(sas_device);
11405 continue;
11406 }
11407 }
11408 sas_device_make_active(ioc, sas_device);
11409 sas_device_put(sas_device);
11410 }
11411}
11412
11413
11414
11415
11416
11417
11418
11419
11420
11421
11422static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11423{
11424 struct _pcie_device *pcie_device = NULL;
11425 unsigned long flags;
11426
11427 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11428 if (!list_empty(&ioc->pcie_device_init_list)) {
11429 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11430 struct _pcie_device, list);
11431 pcie_device_get(pcie_device);
11432 }
11433 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11434
11435 return pcie_device;
11436}
11437
11438
11439
11440
11441
11442
11443
11444
11445
11446static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11447 struct _pcie_device *pcie_device)
11448{
11449 unsigned long flags;
11450
11451 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11452
11453 if (!list_empty(&pcie_device->list)) {
11454 list_del_init(&pcie_device->list);
11455 pcie_device_put(pcie_device);
11456 }
11457 pcie_device_get(pcie_device);
11458 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11459
11460 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11461}
11462
11463
11464
11465
11466
11467
11468
11469static void
11470_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11471{
11472 struct _pcie_device *pcie_device;
11473 int rc;
11474
11475
11476 while ((pcie_device = get_next_pcie_device(ioc))) {
11477 if (pcie_device->starget) {
11478 pcie_device_put(pcie_device);
11479 continue;
11480 }
11481 if (pcie_device->access_status ==
11482 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11483 pcie_device_make_active(ioc, pcie_device);
11484 pcie_device_put(pcie_device);
11485 continue;
11486 }
11487 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11488 pcie_device->id, 0);
11489 if (rc) {
11490 _scsih_pcie_device_remove(ioc, pcie_device);
11491 pcie_device_put(pcie_device);
11492 continue;
11493 } else if (!pcie_device->starget) {
11494
11495
11496
11497
11498
11499
11500 if (!ioc->is_driver_loading) {
11501
11502
11503
11504 _scsih_pcie_device_remove(ioc, pcie_device);
11505 pcie_device_put(pcie_device);
11506 continue;
11507 }
11508 }
11509 pcie_device_make_active(ioc, pcie_device);
11510 pcie_device_put(pcie_device);
11511 }
11512}
11513
11514
11515
11516
11517
11518
11519
11520static void
11521_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11522{
11523 u16 volume_mapping_flags;
11524
11525 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11526 return;
11527
11528 _scsih_probe_boot_devices(ioc);
11529
11530 if (ioc->ir_firmware) {
11531 volume_mapping_flags =
11532 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11533 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11534 if (volume_mapping_flags ==
11535 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11536 _scsih_probe_raid(ioc);
11537 _scsih_probe_sas(ioc);
11538 } else {
11539 _scsih_probe_sas(ioc);
11540 _scsih_probe_raid(ioc);
11541 }
11542 } else {
11543 _scsih_probe_sas(ioc);
11544 _scsih_probe_pcie(ioc);
11545 }
11546}
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556static void
11557scsih_scan_start(struct Scsi_Host *shost)
11558{
11559 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11560 int rc;
11561 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11562 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11563 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11564 mpt3sas_enable_diag_buffer(ioc, 1);
11565
11566 if (disable_discovery > 0)
11567 return;
11568
11569 ioc->start_scan = 1;
11570 rc = mpt3sas_port_enable(ioc);
11571
11572 if (rc != 0)
11573 ioc_info(ioc, "port enable: FAILED\n");
11574}
11575
11576
11577
11578
11579
11580
11581
11582
11583
11584
11585static int
11586scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11587{
11588 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11589
11590 if (disable_discovery > 0) {
11591 ioc->is_driver_loading = 0;
11592 ioc->wait_for_discovery_to_complete = 0;
11593 return 1;
11594 }
11595
11596 if (time >= (300 * HZ)) {
11597 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11598 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11599 ioc->is_driver_loading = 0;
11600 return 1;
11601 }
11602
11603 if (ioc->start_scan)
11604 return 0;
11605
11606 if (ioc->start_scan_failed) {
11607 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11608 ioc->start_scan_failed);
11609 ioc->is_driver_loading = 0;
11610 ioc->wait_for_discovery_to_complete = 0;
11611 ioc->remove_host = 1;
11612 return 1;
11613 }
11614
11615 ioc_info(ioc, "port enable: SUCCESS\n");
11616 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11617
11618 if (ioc->wait_for_discovery_to_complete) {
11619 ioc->wait_for_discovery_to_complete = 0;
11620 _scsih_probe_devices(ioc);
11621 }
11622 mpt3sas_base_start_watchdog(ioc);
11623 ioc->is_driver_loading = 0;
11624 return 1;
11625}
11626
11627
11628
11629
11630
11631static int scsih_map_queues(struct Scsi_Host *shost)
11632{
11633 struct MPT3SAS_ADAPTER *ioc =
11634 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11635
11636 if (ioc->shost->nr_hw_queues == 1)
11637 return 0;
11638
11639 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
11640 ioc->pdev, ioc->high_iops_queues);
11641}
11642
11643
11644static struct scsi_host_template mpt2sas_driver_template = {
11645 .module = THIS_MODULE,
11646 .name = "Fusion MPT SAS Host",
11647 .proc_name = MPT2SAS_DRIVER_NAME,
11648 .queuecommand = scsih_qcmd,
11649 .target_alloc = scsih_target_alloc,
11650 .slave_alloc = scsih_slave_alloc,
11651 .slave_configure = scsih_slave_configure,
11652 .target_destroy = scsih_target_destroy,
11653 .slave_destroy = scsih_slave_destroy,
11654 .scan_finished = scsih_scan_finished,
11655 .scan_start = scsih_scan_start,
11656 .change_queue_depth = scsih_change_queue_depth,
11657 .eh_abort_handler = scsih_abort,
11658 .eh_device_reset_handler = scsih_dev_reset,
11659 .eh_target_reset_handler = scsih_target_reset,
11660 .eh_host_reset_handler = scsih_host_reset,
11661 .bios_param = scsih_bios_param,
11662 .can_queue = 1,
11663 .this_id = -1,
11664 .sg_tablesize = MPT2SAS_SG_DEPTH,
11665 .max_sectors = 32767,
11666 .cmd_per_lun = 7,
11667 .use_clustering = ENABLE_CLUSTERING,
11668 .shost_attrs = mpt3sas_host_attrs,
11669 .sdev_attrs = mpt3sas_dev_attrs,
11670 .track_queue_depth = 1,
11671 .cmd_size = sizeof(struct scsiio_tracker),
11672};
11673
11674
11675static struct raid_function_template mpt2sas_raid_functions = {
11676 .cookie = &mpt2sas_driver_template,
11677 .is_raid = scsih_is_raid,
11678 .get_resync = scsih_get_resync,
11679 .get_state = scsih_get_state,
11680};
11681
11682
11683static struct scsi_host_template mpt3sas_driver_template = {
11684 .module = THIS_MODULE,
11685 .name = "Fusion MPT SAS Host",
11686 .proc_name = MPT3SAS_DRIVER_NAME,
11687 .queuecommand = scsih_qcmd,
11688 .target_alloc = scsih_target_alloc,
11689 .slave_alloc = scsih_slave_alloc,
11690 .slave_configure = scsih_slave_configure,
11691 .target_destroy = scsih_target_destroy,
11692 .slave_destroy = scsih_slave_destroy,
11693 .scan_finished = scsih_scan_finished,
11694 .scan_start = scsih_scan_start,
11695 .change_queue_depth = scsih_change_queue_depth,
11696 .eh_abort_handler = scsih_abort,
11697 .eh_device_reset_handler = scsih_dev_reset,
11698 .eh_target_reset_handler = scsih_target_reset,
11699 .eh_host_reset_handler = scsih_host_reset,
11700 .bios_param = scsih_bios_param,
11701 .can_queue = 1,
11702 .this_id = -1,
11703 .sg_tablesize = MPT3SAS_SG_DEPTH,
11704 .max_sectors = 32767,
11705 .cmd_per_lun = 7,
11706 .use_clustering = ENABLE_CLUSTERING,
11707 .shost_attrs = mpt3sas_host_attrs,
11708 .sdev_attrs = mpt3sas_dev_attrs,
11709 .track_queue_depth = 1,
11710 .cmd_size = sizeof(struct scsiio_tracker),
11711 .map_queues = scsih_map_queues,
11712};
11713
11714
11715static struct raid_function_template mpt3sas_raid_functions = {
11716 .cookie = &mpt3sas_driver_template,
11717 .is_raid = scsih_is_raid,
11718 .get_resync = scsih_get_resync,
11719 .get_state = scsih_get_state,
11720};
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731static u16
11732_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11733{
11734
11735 switch (pdev->device) {
11736 case MPI2_MFGPAGE_DEVID_SSS6200:
11737 case MPI2_MFGPAGE_DEVID_SAS2004:
11738 case MPI2_MFGPAGE_DEVID_SAS2008:
11739 case MPI2_MFGPAGE_DEVID_SAS2108_1:
11740 case MPI2_MFGPAGE_DEVID_SAS2108_2:
11741 case MPI2_MFGPAGE_DEVID_SAS2108_3:
11742 case MPI2_MFGPAGE_DEVID_SAS2116_1:
11743 case MPI2_MFGPAGE_DEVID_SAS2116_2:
11744 case MPI2_MFGPAGE_DEVID_SAS2208_1:
11745 case MPI2_MFGPAGE_DEVID_SAS2208_2:
11746 case MPI2_MFGPAGE_DEVID_SAS2208_3:
11747 case MPI2_MFGPAGE_DEVID_SAS2208_4:
11748 case MPI2_MFGPAGE_DEVID_SAS2208_5:
11749 case MPI2_MFGPAGE_DEVID_SAS2208_6:
11750 case MPI2_MFGPAGE_DEVID_SAS2308_1:
11751 case MPI2_MFGPAGE_DEVID_SAS2308_2:
11752 case MPI2_MFGPAGE_DEVID_SAS2308_3:
11753 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11754 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11755 return MPI2_VERSION;
11756 case MPI25_MFGPAGE_DEVID_SAS3004:
11757 case MPI25_MFGPAGE_DEVID_SAS3008:
11758 case MPI25_MFGPAGE_DEVID_SAS3108_1:
11759 case MPI25_MFGPAGE_DEVID_SAS3108_2:
11760 case MPI25_MFGPAGE_DEVID_SAS3108_5:
11761 case MPI25_MFGPAGE_DEVID_SAS3108_6:
11762 return MPI25_VERSION;
11763 case MPI26_MFGPAGE_DEVID_SAS3216:
11764 case MPI26_MFGPAGE_DEVID_SAS3224:
11765 case MPI26_MFGPAGE_DEVID_SAS3316_1:
11766 case MPI26_MFGPAGE_DEVID_SAS3316_2:
11767 case MPI26_MFGPAGE_DEVID_SAS3316_3:
11768 case MPI26_MFGPAGE_DEVID_SAS3316_4:
11769 case MPI26_MFGPAGE_DEVID_SAS3324_1:
11770 case MPI26_MFGPAGE_DEVID_SAS3324_2:
11771 case MPI26_MFGPAGE_DEVID_SAS3324_3:
11772 case MPI26_MFGPAGE_DEVID_SAS3324_4:
11773 case MPI26_MFGPAGE_DEVID_SAS3508:
11774 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11775 case MPI26_MFGPAGE_DEVID_SAS3408:
11776 case MPI26_MFGPAGE_DEVID_SAS3516:
11777 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11778 case MPI26_MFGPAGE_DEVID_SAS3416:
11779 case MPI26_MFGPAGE_DEVID_SAS3616:
11780 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11781 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11782 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11783 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11784 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11785 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11786 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11787 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11788 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11789 return MPI26_VERSION;
11790 }
11791 return 0;
11792}
11793
11794
11795
11796
11797
11798
11799
11800
11801static int
11802_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
11803{
11804 struct MPT3SAS_ADAPTER *ioc;
11805 struct Scsi_Host *shost = NULL;
11806 int rv;
11807 u16 hba_mpi_version;
11808
11809
11810 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
11811 if (hba_mpi_version == 0)
11812 return -ENODEV;
11813
11814
11815
11816
11817 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
11818 return -ENODEV;
11819
11820
11821
11822
11823 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
11824 || hba_mpi_version == MPI26_VERSION)))
11825 return -ENODEV;
11826
11827 switch (hba_mpi_version) {
11828 case MPI2_VERSION:
11829 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
11830 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
11831
11832 shost = scsi_host_alloc(&mpt2sas_driver_template,
11833 sizeof(struct MPT3SAS_ADAPTER));
11834 if (!shost)
11835 return -ENODEV;
11836 ioc = shost_priv(shost);
11837 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11838 ioc->hba_mpi_version_belonged = hba_mpi_version;
11839 ioc->id = mpt2_ids++;
11840 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
11841 switch (pdev->device) {
11842 case MPI2_MFGPAGE_DEVID_SSS6200:
11843 ioc->is_warpdrive = 1;
11844 ioc->hide_ir_msg = 1;
11845 break;
11846 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11847 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11848 ioc->is_mcpu_endpoint = 1;
11849 break;
11850 default:
11851 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
11852 break;
11853 }
11854
11855 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11856 ioc->multipath_on_hba = 0;
11857 else
11858 ioc->multipath_on_hba = 1;
11859
11860 break;
11861 case MPI25_VERSION:
11862 case MPI26_VERSION:
11863
11864 shost = scsi_host_alloc(&mpt3sas_driver_template,
11865 sizeof(struct MPT3SAS_ADAPTER));
11866 if (!shost)
11867 return -ENODEV;
11868 ioc = shost_priv(shost);
11869 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11870 ioc->hba_mpi_version_belonged = hba_mpi_version;
11871 ioc->id = mpt3_ids++;
11872 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
11873 switch (pdev->device) {
11874 case MPI26_MFGPAGE_DEVID_SAS3508:
11875 case MPI26_MFGPAGE_DEVID_SAS3508_1:
11876 case MPI26_MFGPAGE_DEVID_SAS3408:
11877 case MPI26_MFGPAGE_DEVID_SAS3516:
11878 case MPI26_MFGPAGE_DEVID_SAS3516_1:
11879 case MPI26_MFGPAGE_DEVID_SAS3416:
11880 case MPI26_MFGPAGE_DEVID_SAS3616:
11881 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11882 ioc->is_gen35_ioc = 1;
11883 break;
11884 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11885 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11886 dev_err(&pdev->dev,
11887 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
11888 pdev->device, pdev->subsystem_vendor,
11889 pdev->subsystem_device);
11890 return 1;
11891 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11892 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11893 dev_err(&pdev->dev,
11894 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
11895 pdev->device, pdev->subsystem_vendor,
11896 pdev->subsystem_device);
11897 return 1;
11898 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11899 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11900 dev_info(&pdev->dev,
11901 "HBA is in Configurable Secure mode\n");
11902 fallthrough;
11903 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11904 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11905 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
11906 break;
11907 default:
11908 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
11909 }
11910 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
11911 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
11912 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
11913 ioc->combined_reply_queue = 1;
11914 if (ioc->is_gen35_ioc)
11915 ioc->combined_reply_index_count =
11916 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
11917 else
11918 ioc->combined_reply_index_count =
11919 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
11920 }
11921
11922 switch (ioc->is_gen35_ioc) {
11923 case 0:
11924 if (multipath_on_hba == -1 || multipath_on_hba == 0)
11925 ioc->multipath_on_hba = 0;
11926 else
11927 ioc->multipath_on_hba = 1;
11928 break;
11929 case 1:
11930 if (multipath_on_hba == -1 || multipath_on_hba > 0)
11931 ioc->multipath_on_hba = 1;
11932 else
11933 ioc->multipath_on_hba = 0;
11934 default:
11935 break;
11936 }
11937
11938 break;
11939 default:
11940 return -ENODEV;
11941 }
11942
11943 INIT_LIST_HEAD(&ioc->list);
11944 spin_lock(&gioc_lock);
11945 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
11946 spin_unlock(&gioc_lock);
11947 ioc->shost = shost;
11948 ioc->pdev = pdev;
11949 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
11950 ioc->tm_cb_idx = tm_cb_idx;
11951 ioc->ctl_cb_idx = ctl_cb_idx;
11952 ioc->base_cb_idx = base_cb_idx;
11953 ioc->port_enable_cb_idx = port_enable_cb_idx;
11954 ioc->transport_cb_idx = transport_cb_idx;
11955 ioc->scsih_cb_idx = scsih_cb_idx;
11956 ioc->config_cb_idx = config_cb_idx;
11957 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
11958 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
11959 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
11960 ioc->logging_level = logging_level;
11961 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
11962
11963 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
11964
11965
11966
11967 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
11968
11969 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
11970
11971 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
11972
11973
11974 mutex_init(&ioc->reset_in_progress_mutex);
11975
11976 mutex_init(&ioc->pci_access_mutex);
11977 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
11978 spin_lock_init(&ioc->scsi_lookup_lock);
11979 spin_lock_init(&ioc->sas_device_lock);
11980 spin_lock_init(&ioc->sas_node_lock);
11981 spin_lock_init(&ioc->fw_event_lock);
11982 spin_lock_init(&ioc->raid_device_lock);
11983 spin_lock_init(&ioc->pcie_device_lock);
11984 spin_lock_init(&ioc->diag_trigger_lock);
11985
11986 INIT_LIST_HEAD(&ioc->sas_device_list);
11987 INIT_LIST_HEAD(&ioc->sas_device_init_list);
11988 INIT_LIST_HEAD(&ioc->sas_expander_list);
11989 INIT_LIST_HEAD(&ioc->enclosure_list);
11990 INIT_LIST_HEAD(&ioc->pcie_device_list);
11991 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
11992 INIT_LIST_HEAD(&ioc->fw_event_list);
11993 INIT_LIST_HEAD(&ioc->raid_device_list);
11994 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
11995 INIT_LIST_HEAD(&ioc->delayed_tr_list);
11996 INIT_LIST_HEAD(&ioc->delayed_sc_list);
11997 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
11998 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
11999 INIT_LIST_HEAD(&ioc->reply_queue_list);
12000 INIT_LIST_HEAD(&ioc->port_table_list);
12001
12002 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12003
12004
12005 shost->max_cmd_len = 32;
12006 shost->max_lun = max_lun;
12007 shost->transportt = mpt3sas_transport_template;
12008 shost->unique_id = ioc->id;
12009
12010 if (ioc->is_mcpu_endpoint) {
12011
12012 shost->max_sectors = 128;
12013 ioc_info(ioc, "The max_sectors value is set to %d\n",
12014 shost->max_sectors);
12015 } else {
12016 if (max_sectors != 0xFFFF) {
12017 if (max_sectors < 64) {
12018 shost->max_sectors = 64;
12019 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12020 max_sectors);
12021 } else if (max_sectors > 32767) {
12022 shost->max_sectors = 32767;
12023 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12024 max_sectors);
12025 } else {
12026 shost->max_sectors = max_sectors & 0xFFFE;
12027 ioc_info(ioc, "The max_sectors value is set to %d\n",
12028 shost->max_sectors);
12029 }
12030 }
12031 }
12032
12033 if (prot_mask >= 0)
12034 scsi_host_set_prot(shost, (prot_mask & 0x07));
12035 else
12036 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12037 | SHOST_DIF_TYPE2_PROTECTION
12038 | SHOST_DIF_TYPE3_PROTECTION);
12039
12040 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12041
12042
12043 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12044 "fw_event_%s%d", ioc->driver_name, ioc->id);
12045 ioc->firmware_event_thread = alloc_ordered_workqueue(
12046 ioc->firmware_event_name, 0);
12047 if (!ioc->firmware_event_thread) {
12048 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12049 __FILE__, __LINE__, __func__);
12050 rv = -ENODEV;
12051 goto out_thread_fail;
12052 }
12053
12054 ioc->is_driver_loading = 1;
12055 if ((mpt3sas_base_attach(ioc))) {
12056 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12057 __FILE__, __LINE__, __func__);
12058 rv = -ENODEV;
12059 goto out_attach_fail;
12060 }
12061
12062 if (ioc->is_warpdrive) {
12063 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12064 ioc->hide_drives = 0;
12065 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12066 ioc->hide_drives = 1;
12067 else {
12068 if (mpt3sas_get_num_volumes(ioc))
12069 ioc->hide_drives = 1;
12070 else
12071 ioc->hide_drives = 0;
12072 }
12073 } else
12074 ioc->hide_drives = 0;
12075
12076 shost->host_tagset = 0;
12077 shost->nr_hw_queues = 1;
12078
12079 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
12080 host_tagset_enable && ioc->smp_affinity_enable) {
12081
12082 shost->host_tagset = 1;
12083 shost->nr_hw_queues =
12084 ioc->reply_queue_count - ioc->high_iops_queues;
12085
12086 dev_info(&ioc->pdev->dev,
12087 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12088 shost->can_queue, shost->nr_hw_queues);
12089 }
12090
12091 rv = scsi_add_host(shost, &pdev->dev);
12092 if (rv) {
12093 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12094 __FILE__, __LINE__, __func__);
12095 goto out_add_shost_fail;
12096 }
12097
12098 scsi_scan_host(shost);
12099 mpt3sas_setup_debugfs(ioc);
12100 return 0;
12101out_add_shost_fail:
12102 mpt3sas_base_detach(ioc);
12103 out_attach_fail:
12104 destroy_workqueue(ioc->firmware_event_thread);
12105 out_thread_fail:
12106 spin_lock(&gioc_lock);
12107 list_del(&ioc->list);
12108 spin_unlock(&gioc_lock);
12109 scsi_host_put(shost);
12110 return rv;
12111}
12112
12113
12114
12115
12116
12117
12118
12119static int __maybe_unused
12120scsih_suspend(struct device *dev)
12121{
12122 struct pci_dev *pdev = to_pci_dev(dev);
12123 struct Scsi_Host *shost;
12124 struct MPT3SAS_ADAPTER *ioc;
12125 int rc;
12126
12127 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12128 if (rc)
12129 return rc;
12130
12131 mpt3sas_base_stop_watchdog(ioc);
12132 flush_scheduled_work();
12133 scsi_block_requests(shost);
12134 _scsih_nvme_shutdown(ioc);
12135 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12136 pdev, pci_name(pdev));
12137
12138 mpt3sas_base_free_resources(ioc);
12139 return 0;
12140}
12141
12142
12143
12144
12145
12146
12147
12148static int __maybe_unused
12149scsih_resume(struct device *dev)
12150{
12151 struct pci_dev *pdev = to_pci_dev(dev);
12152 struct Scsi_Host *shost;
12153 struct MPT3SAS_ADAPTER *ioc;
12154 pci_power_t device_state = pdev->current_state;
12155 int r;
12156
12157 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12158 if (r)
12159 return r;
12160
12161 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12162 pdev, pci_name(pdev), device_state);
12163
12164 ioc->pdev = pdev;
12165 r = mpt3sas_base_map_resources(ioc);
12166 if (r)
12167 return r;
12168 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12169 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12170 scsi_unblock_requests(shost);
12171 mpt3sas_base_start_watchdog(ioc);
12172 return 0;
12173}
12174
12175
12176
12177
12178
12179
12180
12181
12182
12183
12184static pci_ers_result_t
12185scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12186{
12187 struct Scsi_Host *shost;
12188 struct MPT3SAS_ADAPTER *ioc;
12189
12190 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12191 return PCI_ERS_RESULT_DISCONNECT;
12192
12193 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12194
12195 switch (state) {
12196 case pci_channel_io_normal:
12197 return PCI_ERS_RESULT_CAN_RECOVER;
12198 case pci_channel_io_frozen:
12199
12200 ioc->pci_error_recovery = 1;
12201 scsi_block_requests(ioc->shost);
12202 mpt3sas_base_stop_watchdog(ioc);
12203 mpt3sas_base_free_resources(ioc);
12204 return PCI_ERS_RESULT_NEED_RESET;
12205 case pci_channel_io_perm_failure:
12206
12207 ioc->pci_error_recovery = 1;
12208 mpt3sas_base_stop_watchdog(ioc);
12209 _scsih_flush_running_cmds(ioc);
12210 return PCI_ERS_RESULT_DISCONNECT;
12211 }
12212 return PCI_ERS_RESULT_NEED_RESET;
12213}
12214
12215
12216
12217
12218
12219
12220
12221
12222
12223static pci_ers_result_t
12224scsih_pci_slot_reset(struct pci_dev *pdev)
12225{
12226 struct Scsi_Host *shost;
12227 struct MPT3SAS_ADAPTER *ioc;
12228 int rc;
12229
12230 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12231 return PCI_ERS_RESULT_DISCONNECT;
12232
12233 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12234
12235 ioc->pci_error_recovery = 0;
12236 ioc->pdev = pdev;
12237 pci_restore_state(pdev);
12238 rc = mpt3sas_base_map_resources(ioc);
12239 if (rc)
12240 return PCI_ERS_RESULT_DISCONNECT;
12241
12242 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12243 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12244
12245 ioc_warn(ioc, "hard reset: %s\n",
12246 (rc == 0) ? "success" : "failed");
12247
12248 if (!rc)
12249 return PCI_ERS_RESULT_RECOVERED;
12250 else
12251 return PCI_ERS_RESULT_DISCONNECT;
12252}
12253
12254
12255
12256
12257
12258
12259
12260
12261
12262static void
12263scsih_pci_resume(struct pci_dev *pdev)
12264{
12265 struct Scsi_Host *shost;
12266 struct MPT3SAS_ADAPTER *ioc;
12267
12268 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12269 return;
12270
12271 ioc_info(ioc, "PCI error: resume callback!!\n");
12272
12273 mpt3sas_base_start_watchdog(ioc);
12274 scsi_unblock_requests(ioc->shost);
12275}
12276
12277
12278
12279
12280
12281static pci_ers_result_t
12282scsih_pci_mmio_enabled(struct pci_dev *pdev)
12283{
12284 struct Scsi_Host *shost;
12285 struct MPT3SAS_ADAPTER *ioc;
12286
12287 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12288 return PCI_ERS_RESULT_DISCONNECT;
12289
12290 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12291
12292
12293
12294
12295
12296
12297
12298 return PCI_ERS_RESULT_RECOVERED;
12299}
12300
12301
12302
12303
12304
12305
12306
12307
12308bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12309{
12310 unsigned char *buf;
12311 bool ncq_prio_supp = false;
12312
12313 if (!scsi_device_supports_vpd(sdev))
12314 return ncq_prio_supp;
12315
12316 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12317 if (!buf)
12318 return ncq_prio_supp;
12319
12320 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12321 ncq_prio_supp = (buf[213] >> 4) & 1;
12322
12323 kfree(buf);
12324 return ncq_prio_supp;
12325}
12326
12327
12328
12329static const struct pci_device_id mpt3sas_pci_table[] = {
12330
12331 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12332 PCI_ANY_ID, PCI_ANY_ID },
12333 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12334 PCI_ANY_ID, PCI_ANY_ID },
12335 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12336 PCI_ANY_ID, PCI_ANY_ID },
12337 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12338 PCI_ANY_ID, PCI_ANY_ID },
12339 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12340 PCI_ANY_ID, PCI_ANY_ID },
12341 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12342 PCI_ANY_ID, PCI_ANY_ID },
12343
12344 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12345 PCI_ANY_ID, PCI_ANY_ID },
12346 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12347 PCI_ANY_ID, PCI_ANY_ID },
12348 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12349 PCI_ANY_ID, PCI_ANY_ID },
12350 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12351 PCI_ANY_ID, PCI_ANY_ID },
12352 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12353 PCI_ANY_ID, PCI_ANY_ID },
12354
12355 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12356 PCI_ANY_ID, PCI_ANY_ID },
12357 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12358 PCI_ANY_ID, PCI_ANY_ID },
12359
12360 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12361 PCI_ANY_ID, PCI_ANY_ID },
12362 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12363 PCI_ANY_ID, PCI_ANY_ID },
12364 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12365 PCI_ANY_ID, PCI_ANY_ID },
12366 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12367 PCI_ANY_ID, PCI_ANY_ID },
12368
12369 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12370 PCI_ANY_ID, PCI_ANY_ID },
12371 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12372 PCI_ANY_ID, PCI_ANY_ID },
12373
12374 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12375 PCI_ANY_ID, PCI_ANY_ID },
12376 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12377 PCI_ANY_ID, PCI_ANY_ID },
12378 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12379 PCI_ANY_ID, PCI_ANY_ID },
12380 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12381 PCI_ANY_ID, PCI_ANY_ID },
12382 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12383 PCI_ANY_ID, PCI_ANY_ID },
12384 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12385 PCI_ANY_ID, PCI_ANY_ID },
12386 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12387 PCI_ANY_ID, PCI_ANY_ID },
12388 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12389 PCI_ANY_ID, PCI_ANY_ID },
12390
12391 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12392 PCI_ANY_ID, PCI_ANY_ID },
12393 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12394 PCI_ANY_ID, PCI_ANY_ID },
12395 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12396 PCI_ANY_ID, PCI_ANY_ID },
12397 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12398 PCI_ANY_ID, PCI_ANY_ID },
12399 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12400 PCI_ANY_ID, PCI_ANY_ID },
12401 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12402 PCI_ANY_ID, PCI_ANY_ID },
12403
12404 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12405 PCI_ANY_ID, PCI_ANY_ID },
12406
12407
12408
12409
12410 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12411 PCI_ANY_ID, PCI_ANY_ID },
12412 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12413 PCI_ANY_ID, PCI_ANY_ID },
12414
12415
12416
12417
12418 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12419 PCI_ANY_ID, PCI_ANY_ID },
12420 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12421 PCI_ANY_ID, PCI_ANY_ID },
12422
12423
12424 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12425 PCI_ANY_ID, PCI_ANY_ID },
12426
12427
12428
12429
12430 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12431 PCI_ANY_ID, PCI_ANY_ID },
12432 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12433 PCI_ANY_ID, PCI_ANY_ID },
12434
12435
12436
12437
12438 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12439 PCI_ANY_ID, PCI_ANY_ID },
12440 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12441 PCI_ANY_ID, PCI_ANY_ID },
12442
12443 {0}
12444};
12445MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12446
12447static struct pci_error_handlers _mpt3sas_err_handler = {
12448 .error_detected = scsih_pci_error_detected,
12449 .mmio_enabled = scsih_pci_mmio_enabled,
12450 .slot_reset = scsih_pci_slot_reset,
12451 .resume = scsih_pci_resume,
12452};
12453
12454static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12455
12456static struct pci_driver mpt3sas_driver = {
12457 .name = MPT3SAS_DRIVER_NAME,
12458 .id_table = mpt3sas_pci_table,
12459 .probe = _scsih_probe,
12460 .remove = scsih_remove,
12461 .shutdown = scsih_shutdown,
12462 .err_handler = &_mpt3sas_err_handler,
12463 .driver.pm = &scsih_pm_ops,
12464};
12465
12466
12467
12468
12469
12470
12471static int
12472scsih_init(void)
12473{
12474 mpt2_ids = 0;
12475 mpt3_ids = 0;
12476
12477 mpt3sas_base_initialize_callback_handler();
12478
12479
12480 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12481
12482
12483 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12484
12485
12486 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12487 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12488 mpt3sas_port_enable_done);
12489
12490
12491 transport_cb_idx = mpt3sas_base_register_callback_handler(
12492 mpt3sas_transport_done);
12493
12494
12495 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12496
12497
12498 config_cb_idx = mpt3sas_base_register_callback_handler(
12499 mpt3sas_config_done);
12500
12501
12502 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12503
12504 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12505 _scsih_tm_tr_complete);
12506
12507 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12508 _scsih_tm_volume_tr_complete);
12509
12510 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12511 _scsih_sas_control_complete);
12512
12513 mpt3sas_init_debugfs();
12514 return 0;
12515}
12516
12517
12518
12519
12520
12521
12522static void
12523scsih_exit(void)
12524{
12525
12526 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12527 mpt3sas_base_release_callback_handler(tm_cb_idx);
12528 mpt3sas_base_release_callback_handler(base_cb_idx);
12529 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12530 mpt3sas_base_release_callback_handler(transport_cb_idx);
12531 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12532 mpt3sas_base_release_callback_handler(config_cb_idx);
12533 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12534
12535 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12536 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12537 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12538
12539
12540 if (hbas_to_enumerate != 1)
12541 raid_class_release(mpt3sas_raid_template);
12542 if (hbas_to_enumerate != 2)
12543 raid_class_release(mpt2sas_raid_template);
12544 sas_release_transport(mpt3sas_transport_template);
12545 mpt3sas_exit_debugfs();
12546}
12547
12548
12549
12550
12551
12552
12553static int __init
12554_mpt3sas_init(void)
12555{
12556 int error;
12557
12558 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12559 MPT3SAS_DRIVER_VERSION);
12560
12561 mpt3sas_transport_template =
12562 sas_attach_transport(&mpt3sas_transport_functions);
12563 if (!mpt3sas_transport_template)
12564 return -ENODEV;
12565
12566
12567
12568
12569 if (hbas_to_enumerate != 1) {
12570 mpt3sas_raid_template =
12571 raid_class_attach(&mpt3sas_raid_functions);
12572 if (!mpt3sas_raid_template) {
12573 sas_release_transport(mpt3sas_transport_template);
12574 return -ENODEV;
12575 }
12576 }
12577
12578
12579
12580
12581 if (hbas_to_enumerate != 2) {
12582 mpt2sas_raid_template =
12583 raid_class_attach(&mpt2sas_raid_functions);
12584 if (!mpt2sas_raid_template) {
12585 sas_release_transport(mpt3sas_transport_template);
12586 return -ENODEV;
12587 }
12588 }
12589
12590 error = scsih_init();
12591 if (error) {
12592 scsih_exit();
12593 return error;
12594 }
12595
12596 mpt3sas_ctl_init(hbas_to_enumerate);
12597
12598 error = pci_register_driver(&mpt3sas_driver);
12599 if (error)
12600 scsih_exit();
12601
12602 return error;
12603}
12604
12605
12606
12607
12608
12609static void __exit
12610_mpt3sas_exit(void)
12611{
12612 pr_info("mpt3sas version %s unloading\n",
12613 MPT3SAS_DRIVER_VERSION);
12614
12615 mpt3sas_ctl_exit(hbas_to_enumerate);
12616
12617 pci_unregister_driver(&mpt3sas_driver);
12618
12619 scsih_exit();
12620}
12621
12622module_init(_mpt3sas_init);
12623module_exit(_mpt3sas_exit);
12624