1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/module.h>
46#include <linux/kernel.h>
47#include <linux/init.h>
48#include <linux/errno.h>
49#include <linux/blkdev.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/delay.h>
53#include <linux/pci.h>
54#include <linux/interrupt.h>
55#include <linux/aer.h>
56#include <linux/raid_class.h>
57#include <linux/blk-mq-pci.h>
58#include <asm/unaligned.h>
59
60#include "mpt3sas_base.h"
61
62#define RAID_CHANNEL 1
63
64#define PCIE_CHANNEL 2
65
66
67static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69static void _firmware_event_work(struct work_struct *work);
70
71static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
78static void
79_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
82
83
84LIST_HEAD(mpt3sas_ioc_list);
85
86DEFINE_SPINLOCK(gioc_lock);
87
88MODULE_AUTHOR(MPT3SAS_AUTHOR);
89MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
90MODULE_LICENSE("GPL");
91MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
92MODULE_ALIAS("mpt2sas");
93
94
95static u8 scsi_io_cb_idx = -1;
96static u8 tm_cb_idx = -1;
97static u8 ctl_cb_idx = -1;
98static u8 base_cb_idx = -1;
99static u8 port_enable_cb_idx = -1;
100static u8 transport_cb_idx = -1;
101static u8 scsih_cb_idx = -1;
102static u8 config_cb_idx = -1;
103static int mpt2_ids;
104static int mpt3_ids;
105
106static u8 tm_tr_cb_idx = -1 ;
107static u8 tm_tr_volume_cb_idx = -1 ;
108static u8 tm_sas_control_cb_idx = -1;
109
110
111static u32 logging_level;
112MODULE_PARM_DESC(logging_level,
113 " bits for enabling additional logging info (default=0)");
114
115
116static ushort max_sectors = 0xFFFF;
117module_param(max_sectors, ushort, 0444);
118MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
119
120
121static int missing_delay[2] = {-1, -1};
122module_param_array(missing_delay, int, NULL, 0444);
123MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124
125
126#define MPT3SAS_MAX_LUN (16895)
127static u64 max_lun = MPT3SAS_MAX_LUN;
128module_param(max_lun, ullong, 0444);
129MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130
131static ushort hbas_to_enumerate;
132module_param(hbas_to_enumerate, ushort, 0444);
133MODULE_PARM_DESC(hbas_to_enumerate,
134 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
135 1 - enumerates only SAS 2.0 generation HBAs\n \
136 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
137
138
139
140
141
142
143
144
145static int diag_buffer_enable = -1;
146module_param(diag_buffer_enable, int, 0444);
147MODULE_PARM_DESC(diag_buffer_enable,
148 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
149static int disable_discovery = -1;
150module_param(disable_discovery, int, 0444);
151MODULE_PARM_DESC(disable_discovery, " disable discovery ");
152
153
154
155static int prot_mask = -1;
156module_param(prot_mask, int, 0444);
157MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
158
159static bool enable_sdev_max_qd;
160module_param(enable_sdev_max_qd, bool, 0444);
161MODULE_PARM_DESC(enable_sdev_max_qd,
162 "Enable sdev max qd as can_queue, def=disabled(0)");
163
164static int multipath_on_hba = -1;
165module_param(multipath_on_hba, int, 0);
166MODULE_PARM_DESC(multipath_on_hba,
167 "Multipath support to add same target device\n\t\t"
168 "as many times as it is visible to HBA from various paths\n\t\t"
169 "(by default:\n\t\t"
170 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
171 "\t SAS 3.5 HBA - This will be enabled)");
172
173static int host_tagset_enable = 1;
174module_param(host_tagset_enable, int, 0444);
175MODULE_PARM_DESC(host_tagset_enable,
176 "Shared host tagset enable/disable Default: enable(1)");
177
178
179static struct raid_template *mpt3sas_raid_template;
180static struct raid_template *mpt2sas_raid_template;
181
182
183
184
185
186
187
188
189struct sense_info {
190 u8 skey;
191 u8 asc;
192 u8 ascq;
193};
194
195#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
196#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
197#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
198#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
199#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215struct fw_event_work {
216 struct list_head list;
217 struct work_struct work;
218
219 struct MPT3SAS_ADAPTER *ioc;
220 u16 device_handle;
221 u8 VF_ID;
222 u8 VP_ID;
223 u8 ignore;
224 u16 event;
225 struct kref refcount;
226 char event_data[] __aligned(4);
227};
228
229static void fw_event_work_free(struct kref *r)
230{
231 kfree(container_of(r, struct fw_event_work, refcount));
232}
233
234static void fw_event_work_get(struct fw_event_work *fw_work)
235{
236 kref_get(&fw_work->refcount);
237}
238
239static void fw_event_work_put(struct fw_event_work *fw_work)
240{
241 kref_put(&fw_work->refcount, fw_event_work_free);
242}
243
244static struct fw_event_work *alloc_fw_event_work(int len)
245{
246 struct fw_event_work *fw_event;
247
248 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
249 if (!fw_event)
250 return NULL;
251
252 kref_init(&fw_event->refcount);
253 return fw_event;
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281struct _scsi_io_transfer {
282 u16 handle;
283 u8 is_raid;
284 enum dma_data_direction dir;
285 u32 data_length;
286 dma_addr_t data_dma;
287 u8 sense[SCSI_SENSE_BUFFERSIZE];
288 u32 lun;
289 u8 cdb_length;
290 u8 cdb[32];
291 u8 timeout;
292 u8 VF_ID;
293 u8 VP_ID;
294 u8 valid_reply;
295
296 u32 sense_length;
297 u16 ioc_status;
298 u8 scsi_state;
299 u8 scsi_status;
300 u32 log_info;
301 u32 transfer_length;
302};
303
304
305
306
307
308
309
310
311static int
312_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
313{
314 int ret = param_set_int(val, kp);
315 struct MPT3SAS_ADAPTER *ioc;
316
317 if (ret)
318 return ret;
319
320 pr_info("setting logging_level(0x%08x)\n", logging_level);
321 spin_lock(&gioc_lock);
322 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
323 ioc->logging_level = logging_level;
324 spin_unlock(&gioc_lock);
325 return 0;
326}
327module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
328 &logging_level, 0644);
329
330
331
332
333
334
335
336
337static inline int
338_scsih_srch_boot_sas_address(u64 sas_address,
339 Mpi2BootDeviceSasWwid_t *boot_device)
340{
341 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
342}
343
344
345
346
347
348
349
350
351static inline int
352_scsih_srch_boot_device_name(u64 device_name,
353 Mpi2BootDeviceDeviceName_t *boot_device)
354{
355 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
356}
357
358
359
360
361
362
363
364
365
366static inline int
367_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
368 Mpi2BootDeviceEnclosureSlot_t *boot_device)
369{
370 return (enclosure_logical_id == le64_to_cpu(boot_device->
371 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
372 SlotNumber)) ? 1 : 0;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386struct hba_port *
387mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
388 u8 port_id, u8 bypass_dirty_port_flag)
389{
390 struct hba_port *port, *port_next;
391
392
393
394
395
396
397 if (!ioc->multipath_on_hba)
398 port_id = MULTIPATH_DISABLED_PORT_ID;
399
400 list_for_each_entry_safe(port, port_next,
401 &ioc->port_table_list, list) {
402 if (port->port_id != port_id)
403 continue;
404 if (bypass_dirty_port_flag)
405 return port;
406 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
407 continue;
408 return port;
409 }
410
411
412
413
414
415
416 if (!ioc->multipath_on_hba) {
417 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
418 if (!port)
419 return NULL;
420
421 port->port_id = port_id;
422 ioc_info(ioc,
423 "hba_port entry: %p, port: %d is added to hba_port list\n",
424 port, port->port_id);
425 list_add_tail(&port->list,
426 &ioc->port_table_list);
427 return port;
428 }
429 return NULL;
430}
431
432
433
434
435
436
437
438
439
440struct virtual_phy *
441mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
442 struct hba_port *port, u32 phy)
443{
444 struct virtual_phy *vphy, *vphy_next;
445
446 if (!port->vphys_mask)
447 return NULL;
448
449 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
450 if (vphy->phy_mask & (1 << phy))
451 return vphy;
452 }
453 return NULL;
454}
455
456
457
458
459
460
461
462
463
464
465
466
467static int
468_scsih_is_boot_device(u64 sas_address, u64 device_name,
469 u64 enclosure_logical_id, u16 slot, u8 form,
470 Mpi2BiosPage2BootDevice_t *boot_device)
471{
472 int rc = 0;
473
474 switch (form) {
475 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
476 if (!sas_address)
477 break;
478 rc = _scsih_srch_boot_sas_address(
479 sas_address, &boot_device->SasWwid);
480 break;
481 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
482 if (!enclosure_logical_id)
483 break;
484 rc = _scsih_srch_boot_encl_slot(
485 enclosure_logical_id,
486 slot, &boot_device->EnclosureSlot);
487 break;
488 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
489 if (!device_name)
490 break;
491 rc = _scsih_srch_boot_device_name(
492 device_name, &boot_device->DeviceName);
493 break;
494 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
495 break;
496 }
497
498 return rc;
499}
500
501
502
503
504
505
506
507
508
509static int
510_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
511 u64 *sas_address)
512{
513 Mpi2SasDevicePage0_t sas_device_pg0;
514 Mpi2ConfigReply_t mpi_reply;
515 u32 ioc_status;
516
517 *sas_address = 0;
518
519 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
520 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
521 ioc_err(ioc, "failure at %s:%d/%s()!\n",
522 __FILE__, __LINE__, __func__);
523 return -ENXIO;
524 }
525
526 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
527 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
528
529
530
531 if ((handle <= ioc->sas_hba.num_phys) &&
532 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
533 MPI2_SAS_DEVICE_INFO_SEP)))
534 *sas_address = ioc->sas_hba.sas_address;
535 else
536 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
537 return 0;
538 }
539
540
541 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
542 return -ENXIO;
543
544
545 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
546 handle, ioc_status, __FILE__, __LINE__, __func__);
547 return -EIO;
548}
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563static void
564_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
565 u32 channel)
566{
567 struct _sas_device *sas_device;
568 struct _pcie_device *pcie_device;
569 struct _raid_device *raid_device;
570 u64 sas_address;
571 u64 device_name;
572 u64 enclosure_logical_id;
573 u16 slot;
574
575
576 if (!ioc->is_driver_loading)
577 return;
578
579
580 if (!ioc->bios_pg3.BiosVersion)
581 return;
582
583 if (channel == RAID_CHANNEL) {
584 raid_device = device;
585 sas_address = raid_device->wwid;
586 device_name = 0;
587 enclosure_logical_id = 0;
588 slot = 0;
589 } else if (channel == PCIE_CHANNEL) {
590 pcie_device = device;
591 sas_address = pcie_device->wwid;
592 device_name = 0;
593 enclosure_logical_id = 0;
594 slot = 0;
595 } else {
596 sas_device = device;
597 sas_address = sas_device->sas_address;
598 device_name = sas_device->device_name;
599 enclosure_logical_id = sas_device->enclosure_logical_id;
600 slot = sas_device->slot;
601 }
602
603 if (!ioc->req_boot_device.device) {
604 if (_scsih_is_boot_device(sas_address, device_name,
605 enclosure_logical_id, slot,
606 (ioc->bios_pg2.ReqBootDeviceForm &
607 MPI2_BIOSPAGE2_FORM_MASK),
608 &ioc->bios_pg2.RequestedBootDevice)) {
609 dinitprintk(ioc,
610 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
611 __func__, (u64)sas_address));
612 ioc->req_boot_device.device = device;
613 ioc->req_boot_device.channel = channel;
614 }
615 }
616
617 if (!ioc->req_alt_boot_device.device) {
618 if (_scsih_is_boot_device(sas_address, device_name,
619 enclosure_logical_id, slot,
620 (ioc->bios_pg2.ReqAltBootDeviceForm &
621 MPI2_BIOSPAGE2_FORM_MASK),
622 &ioc->bios_pg2.RequestedAltBootDevice)) {
623 dinitprintk(ioc,
624 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
625 __func__, (u64)sas_address));
626 ioc->req_alt_boot_device.device = device;
627 ioc->req_alt_boot_device.channel = channel;
628 }
629 }
630
631 if (!ioc->current_boot_device.device) {
632 if (_scsih_is_boot_device(sas_address, device_name,
633 enclosure_logical_id, slot,
634 (ioc->bios_pg2.CurrentBootDeviceForm &
635 MPI2_BIOSPAGE2_FORM_MASK),
636 &ioc->bios_pg2.CurrentBootDevice)) {
637 dinitprintk(ioc,
638 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
639 __func__, (u64)sas_address));
640 ioc->current_boot_device.device = device;
641 ioc->current_boot_device.channel = channel;
642 }
643 }
644}
645
646static struct _sas_device *
647__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
648 struct MPT3SAS_TARGET *tgt_priv)
649{
650 struct _sas_device *ret;
651
652 assert_spin_locked(&ioc->sas_device_lock);
653
654 ret = tgt_priv->sas_dev;
655 if (ret)
656 sas_device_get(ret);
657
658 return ret;
659}
660
661static struct _sas_device *
662mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
663 struct MPT3SAS_TARGET *tgt_priv)
664{
665 struct _sas_device *ret;
666 unsigned long flags;
667
668 spin_lock_irqsave(&ioc->sas_device_lock, flags);
669 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
671
672 return ret;
673}
674
675static struct _pcie_device *
676__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
677 struct MPT3SAS_TARGET *tgt_priv)
678{
679 struct _pcie_device *ret;
680
681 assert_spin_locked(&ioc->pcie_device_lock);
682
683 ret = tgt_priv->pcie_dev;
684 if (ret)
685 pcie_device_get(ret);
686
687 return ret;
688}
689
690
691
692
693
694
695
696
697
698
699
700static struct _pcie_device *
701mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
702 struct MPT3SAS_TARGET *tgt_priv)
703{
704 struct _pcie_device *ret;
705 unsigned long flags;
706
707 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
708 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
709 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
710
711 return ret;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726struct _sas_device *
727__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
728 struct sas_rphy *rphy)
729{
730 struct _sas_device *sas_device;
731
732 assert_spin_locked(&ioc->sas_device_lock);
733
734 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
735 if (sas_device->rphy != rphy)
736 continue;
737 sas_device_get(sas_device);
738 return sas_device;
739 }
740
741 sas_device = NULL;
742 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
743 if (sas_device->rphy != rphy)
744 continue;
745 sas_device_get(sas_device);
746 return sas_device;
747 }
748
749 return NULL;
750}
751
752
753
754
755
756
757
758
759
760
761struct _sas_device *
762__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
763 u64 sas_address, struct hba_port *port)
764{
765 struct _sas_device *sas_device;
766
767 if (!port)
768 return NULL;
769
770 assert_spin_locked(&ioc->sas_device_lock);
771
772 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
773 if (sas_device->sas_address != sas_address)
774 continue;
775 if (sas_device->port != port)
776 continue;
777 sas_device_get(sas_device);
778 return sas_device;
779 }
780
781 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
782 if (sas_device->sas_address != sas_address)
783 continue;
784 if (sas_device->port != port)
785 continue;
786 sas_device_get(sas_device);
787 return sas_device;
788 }
789
790 return NULL;
791}
792
793
794
795
796
797
798
799
800
801
802
803struct _sas_device *
804mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
805 u64 sas_address, struct hba_port *port)
806{
807 struct _sas_device *sas_device;
808 unsigned long flags;
809
810 spin_lock_irqsave(&ioc->sas_device_lock, flags);
811 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
812 sas_address, port);
813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
814
815 return sas_device;
816}
817
818static struct _sas_device *
819__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
820{
821 struct _sas_device *sas_device;
822
823 assert_spin_locked(&ioc->sas_device_lock);
824
825 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
826 if (sas_device->handle == handle)
827 goto found_device;
828
829 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
830 if (sas_device->handle == handle)
831 goto found_device;
832
833 return NULL;
834
835found_device:
836 sas_device_get(sas_device);
837 return sas_device;
838}
839
840
841
842
843
844
845
846
847
848
849struct _sas_device *
850mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
851{
852 struct _sas_device *sas_device;
853 unsigned long flags;
854
855 spin_lock_irqsave(&ioc->sas_device_lock, flags);
856 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
858
859 return sas_device;
860}
861
862
863
864
865
866
867
868
869static void
870_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
871 struct _sas_device *sas_device, struct scsi_device *sdev,
872 struct scsi_target *starget)
873{
874 if (sdev) {
875 if (sas_device->enclosure_handle != 0)
876 sdev_printk(KERN_INFO, sdev,
877 "enclosure logical id (0x%016llx), slot(%d) \n",
878 (unsigned long long)
879 sas_device->enclosure_logical_id,
880 sas_device->slot);
881 if (sas_device->connector_name[0] != '\0')
882 sdev_printk(KERN_INFO, sdev,
883 "enclosure level(0x%04x), connector name( %s)\n",
884 sas_device->enclosure_level,
885 sas_device->connector_name);
886 if (sas_device->is_chassis_slot_valid)
887 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
888 sas_device->chassis_slot);
889 } else if (starget) {
890 if (sas_device->enclosure_handle != 0)
891 starget_printk(KERN_INFO, starget,
892 "enclosure logical id(0x%016llx), slot(%d) \n",
893 (unsigned long long)
894 sas_device->enclosure_logical_id,
895 sas_device->slot);
896 if (sas_device->connector_name[0] != '\0')
897 starget_printk(KERN_INFO, starget,
898 "enclosure level(0x%04x), connector name( %s)\n",
899 sas_device->enclosure_level,
900 sas_device->connector_name);
901 if (sas_device->is_chassis_slot_valid)
902 starget_printk(KERN_INFO, starget,
903 "chassis slot(0x%04x)\n",
904 sas_device->chassis_slot);
905 } else {
906 if (sas_device->enclosure_handle != 0)
907 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
908 (u64)sas_device->enclosure_logical_id,
909 sas_device->slot);
910 if (sas_device->connector_name[0] != '\0')
911 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
912 sas_device->enclosure_level,
913 sas_device->connector_name);
914 if (sas_device->is_chassis_slot_valid)
915 ioc_info(ioc, "chassis slot(0x%04x)\n",
916 sas_device->chassis_slot);
917 }
918}
919
920
921
922
923
924
925
926
927
928static void
929_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
930 struct _sas_device *sas_device)
931{
932 unsigned long flags;
933
934 if (!sas_device)
935 return;
936 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
937 sas_device->handle, (u64)sas_device->sas_address);
938
939 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
940
941
942
943
944
945 spin_lock_irqsave(&ioc->sas_device_lock, flags);
946 if (!list_empty(&sas_device->list)) {
947 list_del_init(&sas_device->list);
948 sas_device_put(sas_device);
949 }
950 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
951}
952
953
954
955
956
957
958static void
959_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
960{
961 struct _sas_device *sas_device;
962 unsigned long flags;
963
964 if (ioc->shost_recovery)
965 return;
966
967 spin_lock_irqsave(&ioc->sas_device_lock, flags);
968 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
969 if (sas_device) {
970 list_del_init(&sas_device->list);
971 sas_device_put(sas_device);
972 }
973 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
974 if (sas_device) {
975 _scsih_remove_device(ioc, sas_device);
976 sas_device_put(sas_device);
977 }
978}
979
980
981
982
983
984
985
986
987
988
989void
990mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
991 u64 sas_address, struct hba_port *port)
992{
993 struct _sas_device *sas_device;
994 unsigned long flags;
995
996 if (ioc->shost_recovery)
997 return;
998
999 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1000 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1001 if (sas_device) {
1002 list_del_init(&sas_device->list);
1003 sas_device_put(sas_device);
1004 }
1005 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1006 if (sas_device) {
1007 _scsih_remove_device(ioc, sas_device);
1008 sas_device_put(sas_device);
1009 }
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static void
1021_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1022 struct _sas_device *sas_device)
1023{
1024 unsigned long flags;
1025
1026 dewtprintk(ioc,
1027 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1028 __func__, sas_device->handle,
1029 (u64)sas_device->sas_address));
1030
1031 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1032 NULL, NULL));
1033
1034 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1035 sas_device_get(sas_device);
1036 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1037 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1038
1039 if (ioc->hide_drives) {
1040 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1041 return;
1042 }
1043
1044 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1045 sas_device->sas_address_parent, sas_device->port)) {
1046 _scsih_sas_device_remove(ioc, sas_device);
1047 } else if (!sas_device->starget) {
1048
1049
1050
1051
1052
1053 if (!ioc->is_driver_loading) {
1054 mpt3sas_transport_port_remove(ioc,
1055 sas_device->sas_address,
1056 sas_device->sas_address_parent,
1057 sas_device->port);
1058 _scsih_sas_device_remove(ioc, sas_device);
1059 }
1060 } else
1061 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1062}
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static void
1073_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1074 struct _sas_device *sas_device)
1075{
1076 unsigned long flags;
1077
1078 dewtprintk(ioc,
1079 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1080 __func__, sas_device->handle,
1081 (u64)sas_device->sas_address));
1082
1083 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1084 NULL, NULL));
1085
1086 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1087 sas_device_get(sas_device);
1088 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1089 _scsih_determine_boot_device(ioc, sas_device, 0);
1090 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1091}
1092
1093
1094static struct _pcie_device *
1095__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1096{
1097 struct _pcie_device *pcie_device;
1098
1099 assert_spin_locked(&ioc->pcie_device_lock);
1100
1101 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1102 if (pcie_device->wwid == wwid)
1103 goto found_device;
1104
1105 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1106 if (pcie_device->wwid == wwid)
1107 goto found_device;
1108
1109 return NULL;
1110
1111found_device:
1112 pcie_device_get(pcie_device);
1113 return pcie_device;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127static struct _pcie_device *
1128mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1129{
1130 struct _pcie_device *pcie_device;
1131 unsigned long flags;
1132
1133 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1134 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1135 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1136
1137 return pcie_device;
1138}
1139
1140
1141static struct _pcie_device *
1142__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1143 int channel)
1144{
1145 struct _pcie_device *pcie_device;
1146
1147 assert_spin_locked(&ioc->pcie_device_lock);
1148
1149 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1150 if (pcie_device->id == id && pcie_device->channel == channel)
1151 goto found_device;
1152
1153 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1154 if (pcie_device->id == id && pcie_device->channel == channel)
1155 goto found_device;
1156
1157 return NULL;
1158
1159found_device:
1160 pcie_device_get(pcie_device);
1161 return pcie_device;
1162}
1163
1164static struct _pcie_device *
1165__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1166{
1167 struct _pcie_device *pcie_device;
1168
1169 assert_spin_locked(&ioc->pcie_device_lock);
1170
1171 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1172 if (pcie_device->handle == handle)
1173 goto found_device;
1174
1175 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1176 if (pcie_device->handle == handle)
1177 goto found_device;
1178
1179 return NULL;
1180
1181found_device:
1182 pcie_device_get(pcie_device);
1183 return pcie_device;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198struct _pcie_device *
1199mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1200{
1201 struct _pcie_device *pcie_device;
1202 unsigned long flags;
1203
1204 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1205 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1206 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1207
1208 return pcie_device;
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220static void
1221_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1222{
1223 struct _pcie_device *pcie_device;
1224 unsigned long flags;
1225 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1226
1227 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1228 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1229 if (pcie_device->shutdown_latency) {
1230 if (shutdown_latency < pcie_device->shutdown_latency)
1231 shutdown_latency =
1232 pcie_device->shutdown_latency;
1233 }
1234 }
1235 ioc->max_shutdown_latency = shutdown_latency;
1236 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1237}
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247static void
1248_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1249 struct _pcie_device *pcie_device)
1250{
1251 unsigned long flags;
1252 int was_on_pcie_device_list = 0;
1253 u8 update_latency = 0;
1254
1255 if (!pcie_device)
1256 return;
1257 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1258 pcie_device->handle, (u64)pcie_device->wwid);
1259 if (pcie_device->enclosure_handle != 0)
1260 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1261 (u64)pcie_device->enclosure_logical_id,
1262 pcie_device->slot);
1263 if (pcie_device->connector_name[0] != '\0')
1264 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1265 pcie_device->enclosure_level,
1266 pcie_device->connector_name);
1267
1268 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1269 if (!list_empty(&pcie_device->list)) {
1270 list_del_init(&pcie_device->list);
1271 was_on_pcie_device_list = 1;
1272 }
1273 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1274 update_latency = 1;
1275 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1276 if (was_on_pcie_device_list) {
1277 kfree(pcie_device->serial_number);
1278 pcie_device_put(pcie_device);
1279 }
1280
1281
1282
1283
1284
1285
1286 if (update_latency)
1287 _scsih_set_nvme_max_shutdown_latency(ioc);
1288}
1289
1290
1291
1292
1293
1294
1295
1296static void
1297_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1298{
1299 struct _pcie_device *pcie_device;
1300 unsigned long flags;
1301 int was_on_pcie_device_list = 0;
1302 u8 update_latency = 0;
1303
1304 if (ioc->shost_recovery)
1305 return;
1306
1307 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1308 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1309 if (pcie_device) {
1310 if (!list_empty(&pcie_device->list)) {
1311 list_del_init(&pcie_device->list);
1312 was_on_pcie_device_list = 1;
1313 pcie_device_put(pcie_device);
1314 }
1315 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1316 update_latency = 1;
1317 }
1318 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1319 if (was_on_pcie_device_list) {
1320 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1321 pcie_device_put(pcie_device);
1322 }
1323
1324
1325
1326
1327
1328
1329 if (update_latency)
1330 _scsih_set_nvme_max_shutdown_latency(ioc);
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340static void
1341_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1342 struct _pcie_device *pcie_device)
1343{
1344 unsigned long flags;
1345
1346 dewtprintk(ioc,
1347 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1348 __func__,
1349 pcie_device->handle, (u64)pcie_device->wwid));
1350 if (pcie_device->enclosure_handle != 0)
1351 dewtprintk(ioc,
1352 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1353 __func__,
1354 (u64)pcie_device->enclosure_logical_id,
1355 pcie_device->slot));
1356 if (pcie_device->connector_name[0] != '\0')
1357 dewtprintk(ioc,
1358 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1359 __func__, pcie_device->enclosure_level,
1360 pcie_device->connector_name));
1361
1362 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1363 pcie_device_get(pcie_device);
1364 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1365 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1366
1367 if (pcie_device->access_status ==
1368 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1369 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1370 return;
1371 }
1372 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1373 _scsih_pcie_device_remove(ioc, pcie_device);
1374 } else if (!pcie_device->starget) {
1375 if (!ioc->is_driver_loading) {
1376
1377 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1378 }
1379 } else
1380 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391static void
1392_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1393 struct _pcie_device *pcie_device)
1394{
1395 unsigned long flags;
1396
1397 dewtprintk(ioc,
1398 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1399 __func__,
1400 pcie_device->handle, (u64)pcie_device->wwid));
1401 if (pcie_device->enclosure_handle != 0)
1402 dewtprintk(ioc,
1403 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1404 __func__,
1405 (u64)pcie_device->enclosure_logical_id,
1406 pcie_device->slot));
1407 if (pcie_device->connector_name[0] != '\0')
1408 dewtprintk(ioc,
1409 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1410 __func__, pcie_device->enclosure_level,
1411 pcie_device->connector_name));
1412
1413 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1414 pcie_device_get(pcie_device);
1415 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1416 if (pcie_device->access_status !=
1417 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1418 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1419 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static struct _raid_device *
1432_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1433{
1434 struct _raid_device *raid_device, *r;
1435
1436 r = NULL;
1437 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1438 if (raid_device->id == id && raid_device->channel == channel) {
1439 r = raid_device;
1440 goto out;
1441 }
1442 }
1443
1444 out:
1445 return r;
1446}
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457struct _raid_device *
1458mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1459{
1460 struct _raid_device *raid_device, *r;
1461
1462 r = NULL;
1463 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1464 if (raid_device->handle != handle)
1465 continue;
1466 r = raid_device;
1467 goto out;
1468 }
1469
1470 out:
1471 return r;
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483static struct _raid_device *
1484_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1485{
1486 struct _raid_device *raid_device, *r;
1487
1488 r = NULL;
1489 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1490 if (raid_device->wwid != wwid)
1491 continue;
1492 r = raid_device;
1493 goto out;
1494 }
1495
1496 out:
1497 return r;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507static void
1508_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1509 struct _raid_device *raid_device)
1510{
1511 unsigned long flags;
1512
1513 dewtprintk(ioc,
1514 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1515 __func__,
1516 raid_device->handle, (u64)raid_device->wwid));
1517
1518 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1519 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1520 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1521}
1522
1523
1524
1525
1526
1527
1528
1529static void
1530_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1531 struct _raid_device *raid_device)
1532{
1533 unsigned long flags;
1534
1535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1536 list_del(&raid_device->list);
1537 kfree(raid_device);
1538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550struct _sas_node *
1551mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1552{
1553 struct _sas_node *sas_expander, *r;
1554
1555 r = NULL;
1556 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1557 if (sas_expander->handle != handle)
1558 continue;
1559 r = sas_expander;
1560 goto out;
1561 }
1562 out:
1563 return r;
1564}
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575static struct _enclosure_node *
1576mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1577{
1578 struct _enclosure_node *enclosure_dev, *r;
1579
1580 r = NULL;
1581 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1582 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1583 continue;
1584 r = enclosure_dev;
1585 goto out;
1586 }
1587out:
1588 return r;
1589}
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600struct _sas_node *
1601mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1602 u64 sas_address, struct hba_port *port)
1603{
1604 struct _sas_node *sas_expander, *r = NULL;
1605
1606 if (!port)
1607 return r;
1608
1609 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1610 if (sas_expander->sas_address != sas_address)
1611 continue;
1612 if (sas_expander->port != port)
1613 continue;
1614 r = sas_expander;
1615 goto out;
1616 }
1617 out:
1618 return r;
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629static void
1630_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1631 struct _sas_node *sas_expander)
1632{
1633 unsigned long flags;
1634
1635 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1636 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1637 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647static int
1648_scsih_is_end_device(u32 device_info)
1649{
1650 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1651 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1652 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1654 return 1;
1655 else
1656 return 0;
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667static int
1668_scsih_is_nvme_pciescsi_device(u32 device_info)
1669{
1670 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1671 == MPI26_PCIE_DEVINFO_NVME) ||
1672 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1673 == MPI26_PCIE_DEVINFO_SCSI))
1674 return 1;
1675 else
1676 return 0;
1677}
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689static u8
1690_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1691 int channel)
1692{
1693 int smid;
1694 struct scsi_cmnd *scmd;
1695
1696 for (smid = 1;
1697 smid <= ioc->shost->can_queue; smid++) {
1698 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1699 if (!scmd)
1700 continue;
1701 if (scmd->device->id == id &&
1702 scmd->device->channel == channel)
1703 return 1;
1704 }
1705 return 0;
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719static u8
1720_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1721 unsigned int lun, int channel)
1722{
1723 int smid;
1724 struct scsi_cmnd *scmd;
1725
1726 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1727
1728 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1729 if (!scmd)
1730 continue;
1731 if (scmd->device->id == id &&
1732 scmd->device->channel == channel &&
1733 scmd->device->lun == lun)
1734 return 1;
1735 }
1736 return 0;
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747struct scsi_cmnd *
1748mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1749{
1750 struct scsi_cmnd *scmd = NULL;
1751 struct scsiio_tracker *st;
1752 Mpi25SCSIIORequest_t *mpi_request;
1753 u16 tag = smid - 1;
1754
1755 if (smid > 0 &&
1756 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1757 u32 unique_tag =
1758 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1759
1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1761
1762
1763
1764
1765
1766
1767
1768 if (!mpi_request->DevHandle)
1769 return scmd;
1770
1771 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1772 if (scmd) {
1773 st = scsi_cmd_priv(scmd);
1774 if (st->cb_idx == 0xFF || st->smid == 0)
1775 scmd = NULL;
1776 }
1777 }
1778 return scmd;
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788static int
1789scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1790{
1791 struct Scsi_Host *shost = sdev->host;
1792 int max_depth;
1793 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1794 struct MPT3SAS_DEVICE *sas_device_priv_data;
1795 struct MPT3SAS_TARGET *sas_target_priv_data;
1796 struct _sas_device *sas_device;
1797 unsigned long flags;
1798
1799 max_depth = shost->can_queue;
1800
1801
1802
1803
1804
1805 if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1806 goto not_sata;
1807
1808 sas_device_priv_data = sdev->hostdata;
1809 if (!sas_device_priv_data)
1810 goto not_sata;
1811 sas_target_priv_data = sas_device_priv_data->sas_target;
1812 if (!sas_target_priv_data)
1813 goto not_sata;
1814 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1815 goto not_sata;
1816
1817 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1818 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1819 if (sas_device) {
1820 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1821 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1822
1823 sas_device_put(sas_device);
1824 }
1825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1826
1827 not_sata:
1828
1829 if (!sdev->tagged_supported)
1830 max_depth = 1;
1831 if (qdepth > max_depth)
1832 qdepth = max_depth;
1833 scsi_change_queue_depth(sdev, qdepth);
1834 sdev_printk(KERN_INFO, sdev,
1835 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1836 sdev->queue_depth, sdev->tagged_supported,
1837 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1838 return sdev->queue_depth;
1839}
1840
1841
1842
1843
1844
1845
1846
1847
1848void
1849mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1850{
1851 struct Scsi_Host *shost = sdev->host;
1852 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1853
1854 if (ioc->enable_sdev_max_qd)
1855 qdepth = shost->can_queue;
1856
1857 scsih_change_queue_depth(sdev, qdepth);
1858}
1859
1860
1861
1862
1863
1864
1865
1866
1867static int
1868scsih_target_alloc(struct scsi_target *starget)
1869{
1870 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1871 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1872 struct MPT3SAS_TARGET *sas_target_priv_data;
1873 struct _sas_device *sas_device;
1874 struct _raid_device *raid_device;
1875 struct _pcie_device *pcie_device;
1876 unsigned long flags;
1877 struct sas_rphy *rphy;
1878
1879 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1880 GFP_KERNEL);
1881 if (!sas_target_priv_data)
1882 return -ENOMEM;
1883
1884 starget->hostdata = sas_target_priv_data;
1885 sas_target_priv_data->starget = starget;
1886 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1887
1888
1889 if (starget->channel == RAID_CHANNEL) {
1890 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1891 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1892 starget->channel);
1893 if (raid_device) {
1894 sas_target_priv_data->handle = raid_device->handle;
1895 sas_target_priv_data->sas_address = raid_device->wwid;
1896 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1897 if (ioc->is_warpdrive)
1898 sas_target_priv_data->raid_device = raid_device;
1899 raid_device->starget = starget;
1900 }
1901 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1902 return 0;
1903 }
1904
1905
1906 if (starget->channel == PCIE_CHANNEL) {
1907 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1908 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1909 starget->channel);
1910 if (pcie_device) {
1911 sas_target_priv_data->handle = pcie_device->handle;
1912 sas_target_priv_data->sas_address = pcie_device->wwid;
1913 sas_target_priv_data->port = NULL;
1914 sas_target_priv_data->pcie_dev = pcie_device;
1915 pcie_device->starget = starget;
1916 pcie_device->id = starget->id;
1917 pcie_device->channel = starget->channel;
1918 sas_target_priv_data->flags |=
1919 MPT_TARGET_FLAGS_PCIE_DEVICE;
1920 if (pcie_device->fast_path)
1921 sas_target_priv_data->flags |=
1922 MPT_TARGET_FASTPATH_IO;
1923 }
1924 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1925 return 0;
1926 }
1927
1928
1929 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1930 rphy = dev_to_rphy(starget->dev.parent);
1931 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1932
1933 if (sas_device) {
1934 sas_target_priv_data->handle = sas_device->handle;
1935 sas_target_priv_data->sas_address = sas_device->sas_address;
1936 sas_target_priv_data->port = sas_device->port;
1937 sas_target_priv_data->sas_dev = sas_device;
1938 sas_device->starget = starget;
1939 sas_device->id = starget->id;
1940 sas_device->channel = starget->channel;
1941 if (test_bit(sas_device->handle, ioc->pd_handles))
1942 sas_target_priv_data->flags |=
1943 MPT_TARGET_FLAGS_RAID_COMPONENT;
1944 if (sas_device->fast_path)
1945 sas_target_priv_data->flags |=
1946 MPT_TARGET_FASTPATH_IO;
1947 }
1948 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1949
1950 return 0;
1951}
1952
1953
1954
1955
1956
1957static void
1958scsih_target_destroy(struct scsi_target *starget)
1959{
1960 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1961 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1962 struct MPT3SAS_TARGET *sas_target_priv_data;
1963 struct _sas_device *sas_device;
1964 struct _raid_device *raid_device;
1965 struct _pcie_device *pcie_device;
1966 unsigned long flags;
1967
1968 sas_target_priv_data = starget->hostdata;
1969 if (!sas_target_priv_data)
1970 return;
1971
1972 if (starget->channel == RAID_CHANNEL) {
1973 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1974 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1975 starget->channel);
1976 if (raid_device) {
1977 raid_device->starget = NULL;
1978 raid_device->sdev = NULL;
1979 }
1980 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1981 goto out;
1982 }
1983
1984 if (starget->channel == PCIE_CHANNEL) {
1985 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1986 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1987 sas_target_priv_data);
1988 if (pcie_device && (pcie_device->starget == starget) &&
1989 (pcie_device->id == starget->id) &&
1990 (pcie_device->channel == starget->channel))
1991 pcie_device->starget = NULL;
1992
1993 if (pcie_device) {
1994
1995
1996
1997 sas_target_priv_data->pcie_dev = NULL;
1998 pcie_device_put(pcie_device);
1999 pcie_device_put(pcie_device);
2000 }
2001 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2002 goto out;
2003 }
2004
2005 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2006 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2007 if (sas_device && (sas_device->starget == starget) &&
2008 (sas_device->id == starget->id) &&
2009 (sas_device->channel == starget->channel))
2010 sas_device->starget = NULL;
2011
2012 if (sas_device) {
2013
2014
2015
2016 sas_target_priv_data->sas_dev = NULL;
2017 sas_device_put(sas_device);
2018
2019 sas_device_put(sas_device);
2020 }
2021 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2022
2023 out:
2024 kfree(sas_target_priv_data);
2025 starget->hostdata = NULL;
2026}
2027
2028
2029
2030
2031
2032
2033
2034
2035static int
2036scsih_slave_alloc(struct scsi_device *sdev)
2037{
2038 struct Scsi_Host *shost;
2039 struct MPT3SAS_ADAPTER *ioc;
2040 struct MPT3SAS_TARGET *sas_target_priv_data;
2041 struct MPT3SAS_DEVICE *sas_device_priv_data;
2042 struct scsi_target *starget;
2043 struct _raid_device *raid_device;
2044 struct _sas_device *sas_device;
2045 struct _pcie_device *pcie_device;
2046 unsigned long flags;
2047
2048 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2049 GFP_KERNEL);
2050 if (!sas_device_priv_data)
2051 return -ENOMEM;
2052
2053 sas_device_priv_data->lun = sdev->lun;
2054 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2055
2056 starget = scsi_target(sdev);
2057 sas_target_priv_data = starget->hostdata;
2058 sas_target_priv_data->num_luns++;
2059 sas_device_priv_data->sas_target = sas_target_priv_data;
2060 sdev->hostdata = sas_device_priv_data;
2061 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2062 sdev->no_uld_attach = 1;
2063
2064 shost = dev_to_shost(&starget->dev);
2065 ioc = shost_priv(shost);
2066 if (starget->channel == RAID_CHANNEL) {
2067 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2068 raid_device = _scsih_raid_device_find_by_id(ioc,
2069 starget->id, starget->channel);
2070 if (raid_device)
2071 raid_device->sdev = sdev;
2072 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2073 }
2074 if (starget->channel == PCIE_CHANNEL) {
2075 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2076 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2077 sas_target_priv_data->sas_address);
2078 if (pcie_device && (pcie_device->starget == NULL)) {
2079 sdev_printk(KERN_INFO, sdev,
2080 "%s : pcie_device->starget set to starget @ %d\n",
2081 __func__, __LINE__);
2082 pcie_device->starget = starget;
2083 }
2084
2085 if (pcie_device)
2086 pcie_device_put(pcie_device);
2087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2088
2089 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2090 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2091 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2092 sas_target_priv_data->sas_address,
2093 sas_target_priv_data->port);
2094 if (sas_device && (sas_device->starget == NULL)) {
2095 sdev_printk(KERN_INFO, sdev,
2096 "%s : sas_device->starget set to starget @ %d\n",
2097 __func__, __LINE__);
2098 sas_device->starget = starget;
2099 }
2100
2101 if (sas_device)
2102 sas_device_put(sas_device);
2103
2104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2105 }
2106
2107 return 0;
2108}
2109
2110
2111
2112
2113
2114static void
2115scsih_slave_destroy(struct scsi_device *sdev)
2116{
2117 struct MPT3SAS_TARGET *sas_target_priv_data;
2118 struct scsi_target *starget;
2119 struct Scsi_Host *shost;
2120 struct MPT3SAS_ADAPTER *ioc;
2121 struct _sas_device *sas_device;
2122 struct _pcie_device *pcie_device;
2123 unsigned long flags;
2124
2125 if (!sdev->hostdata)
2126 return;
2127
2128 starget = scsi_target(sdev);
2129 sas_target_priv_data = starget->hostdata;
2130 sas_target_priv_data->num_luns--;
2131
2132 shost = dev_to_shost(&starget->dev);
2133 ioc = shost_priv(shost);
2134
2135 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2136 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2137 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2138 sas_target_priv_data);
2139 if (pcie_device && !sas_target_priv_data->num_luns)
2140 pcie_device->starget = NULL;
2141
2142 if (pcie_device)
2143 pcie_device_put(pcie_device);
2144
2145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2146
2147 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2148 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2149 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2150 sas_target_priv_data);
2151 if (sas_device && !sas_target_priv_data->num_luns)
2152 sas_device->starget = NULL;
2153
2154 if (sas_device)
2155 sas_device_put(sas_device);
2156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2157 }
2158
2159 kfree(sdev->hostdata);
2160 sdev->hostdata = NULL;
2161}
2162
2163
2164
2165
2166
2167
2168
2169static void
2170_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2171 u16 handle, struct scsi_device *sdev)
2172{
2173 Mpi2ConfigReply_t mpi_reply;
2174 Mpi2SasDevicePage0_t sas_device_pg0;
2175 u32 ioc_status;
2176 u16 flags;
2177 u32 device_info;
2178
2179 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2180 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2181 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2182 __FILE__, __LINE__, __func__);
2183 return;
2184 }
2185
2186 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2187 MPI2_IOCSTATUS_MASK;
2188 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2189 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2190 __FILE__, __LINE__, __func__);
2191 return;
2192 }
2193
2194 flags = le16_to_cpu(sas_device_pg0.Flags);
2195 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2196
2197 sdev_printk(KERN_INFO, sdev,
2198 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2199 "sw_preserve(%s)\n",
2200 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2203 "n",
2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2207}
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220static int
2221scsih_is_raid(struct device *dev)
2222{
2223 struct scsi_device *sdev = to_scsi_device(dev);
2224 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2225
2226 if (ioc->is_warpdrive)
2227 return 0;
2228 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2229}
2230
2231static int
2232scsih_is_nvme(struct device *dev)
2233{
2234 struct scsi_device *sdev = to_scsi_device(dev);
2235
2236 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2237}
2238
2239
2240
2241
2242
2243static void
2244scsih_get_resync(struct device *dev)
2245{
2246 struct scsi_device *sdev = to_scsi_device(dev);
2247 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2248 static struct _raid_device *raid_device;
2249 unsigned long flags;
2250 Mpi2RaidVolPage0_t vol_pg0;
2251 Mpi2ConfigReply_t mpi_reply;
2252 u32 volume_status_flags;
2253 u8 percent_complete;
2254 u16 handle;
2255
2256 percent_complete = 0;
2257 handle = 0;
2258 if (ioc->is_warpdrive)
2259 goto out;
2260
2261 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2262 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2263 sdev->channel);
2264 if (raid_device) {
2265 handle = raid_device->handle;
2266 percent_complete = raid_device->percent_complete;
2267 }
2268 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2269
2270 if (!handle)
2271 goto out;
2272
2273 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2274 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2275 sizeof(Mpi2RaidVolPage0_t))) {
2276 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2277 __FILE__, __LINE__, __func__);
2278 percent_complete = 0;
2279 goto out;
2280 }
2281
2282 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2283 if (!(volume_status_flags &
2284 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2285 percent_complete = 0;
2286
2287 out:
2288
2289 switch (ioc->hba_mpi_version_belonged) {
2290 case MPI2_VERSION:
2291 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2292 break;
2293 case MPI25_VERSION:
2294 case MPI26_VERSION:
2295 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2296 break;
2297 }
2298}
2299
2300
2301
2302
2303
2304static void
2305scsih_get_state(struct device *dev)
2306{
2307 struct scsi_device *sdev = to_scsi_device(dev);
2308 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2309 static struct _raid_device *raid_device;
2310 unsigned long flags;
2311 Mpi2RaidVolPage0_t vol_pg0;
2312 Mpi2ConfigReply_t mpi_reply;
2313 u32 volstate;
2314 enum raid_state state = RAID_STATE_UNKNOWN;
2315 u16 handle = 0;
2316
2317 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2318 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2319 sdev->channel);
2320 if (raid_device)
2321 handle = raid_device->handle;
2322 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2323
2324 if (!raid_device)
2325 goto out;
2326
2327 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2328 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2329 sizeof(Mpi2RaidVolPage0_t))) {
2330 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2331 __FILE__, __LINE__, __func__);
2332 goto out;
2333 }
2334
2335 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2336 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2337 state = RAID_STATE_RESYNCING;
2338 goto out;
2339 }
2340
2341 switch (vol_pg0.VolumeState) {
2342 case MPI2_RAID_VOL_STATE_OPTIMAL:
2343 case MPI2_RAID_VOL_STATE_ONLINE:
2344 state = RAID_STATE_ACTIVE;
2345 break;
2346 case MPI2_RAID_VOL_STATE_DEGRADED:
2347 state = RAID_STATE_DEGRADED;
2348 break;
2349 case MPI2_RAID_VOL_STATE_FAILED:
2350 case MPI2_RAID_VOL_STATE_MISSING:
2351 state = RAID_STATE_OFFLINE;
2352 break;
2353 }
2354 out:
2355 switch (ioc->hba_mpi_version_belonged) {
2356 case MPI2_VERSION:
2357 raid_set_state(mpt2sas_raid_template, dev, state);
2358 break;
2359 case MPI25_VERSION:
2360 case MPI26_VERSION:
2361 raid_set_state(mpt3sas_raid_template, dev, state);
2362 break;
2363 }
2364}
2365
2366
2367
2368
2369
2370
2371
2372static void
2373_scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2374 struct scsi_device *sdev, u8 volume_type)
2375{
2376 enum raid_level level = RAID_LEVEL_UNKNOWN;
2377
2378 switch (volume_type) {
2379 case MPI2_RAID_VOL_TYPE_RAID0:
2380 level = RAID_LEVEL_0;
2381 break;
2382 case MPI2_RAID_VOL_TYPE_RAID10:
2383 level = RAID_LEVEL_10;
2384 break;
2385 case MPI2_RAID_VOL_TYPE_RAID1E:
2386 level = RAID_LEVEL_1E;
2387 break;
2388 case MPI2_RAID_VOL_TYPE_RAID1:
2389 level = RAID_LEVEL_1;
2390 break;
2391 }
2392
2393 switch (ioc->hba_mpi_version_belonged) {
2394 case MPI2_VERSION:
2395 raid_set_level(mpt2sas_raid_template,
2396 &sdev->sdev_gendev, level);
2397 break;
2398 case MPI25_VERSION:
2399 case MPI26_VERSION:
2400 raid_set_level(mpt3sas_raid_template,
2401 &sdev->sdev_gendev, level);
2402 break;
2403 }
2404}
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414static int
2415_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2416 struct _raid_device *raid_device)
2417{
2418 Mpi2RaidVolPage0_t *vol_pg0;
2419 Mpi2RaidPhysDiskPage0_t pd_pg0;
2420 Mpi2SasDevicePage0_t sas_device_pg0;
2421 Mpi2ConfigReply_t mpi_reply;
2422 u16 sz;
2423 u8 num_pds;
2424
2425 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2426 &num_pds)) || !num_pds) {
2427 dfailprintk(ioc,
2428 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2429 __FILE__, __LINE__, __func__));
2430 return 1;
2431 }
2432
2433 raid_device->num_pds = num_pds;
2434 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2435 sizeof(Mpi2RaidVol0PhysDisk_t));
2436 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2437 if (!vol_pg0) {
2438 dfailprintk(ioc,
2439 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2440 __FILE__, __LINE__, __func__));
2441 return 1;
2442 }
2443
2444 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2445 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2446 dfailprintk(ioc,
2447 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2448 __FILE__, __LINE__, __func__));
2449 kfree(vol_pg0);
2450 return 1;
2451 }
2452
2453 raid_device->volume_type = vol_pg0->VolumeType;
2454
2455
2456
2457
2458 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2459 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2460 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2461 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2462 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2463 le16_to_cpu(pd_pg0.DevHandle)))) {
2464 raid_device->device_info =
2465 le32_to_cpu(sas_device_pg0.DeviceInfo);
2466 }
2467 }
2468
2469 kfree(vol_pg0);
2470 return 0;
2471}
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482static void
2483_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2484{
2485
2486
2487 if (sdev->type != TYPE_TAPE)
2488 return;
2489
2490 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2491 return;
2492
2493 sas_enable_tlr(sdev);
2494 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2495 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2496 return;
2497
2498}
2499
2500
2501
2502
2503
2504
2505
2506
2507static int
2508scsih_slave_configure(struct scsi_device *sdev)
2509{
2510 struct Scsi_Host *shost = sdev->host;
2511 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2512 struct MPT3SAS_DEVICE *sas_device_priv_data;
2513 struct MPT3SAS_TARGET *sas_target_priv_data;
2514 struct _sas_device *sas_device;
2515 struct _pcie_device *pcie_device;
2516 struct _raid_device *raid_device;
2517 unsigned long flags;
2518 int qdepth;
2519 u8 ssp_target = 0;
2520 char *ds = "";
2521 char *r_level = "";
2522 u16 handle, volume_handle = 0;
2523 u64 volume_wwid = 0;
2524
2525 qdepth = 1;
2526 sas_device_priv_data = sdev->hostdata;
2527 sas_device_priv_data->configured_lun = 1;
2528 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2529 sas_target_priv_data = sas_device_priv_data->sas_target;
2530 handle = sas_target_priv_data->handle;
2531
2532
2533 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2534
2535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2536 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2537 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2538 if (!raid_device) {
2539 dfailprintk(ioc,
2540 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2541 __FILE__, __LINE__, __func__));
2542 return 1;
2543 }
2544
2545 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2546 dfailprintk(ioc,
2547 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2548 __FILE__, __LINE__, __func__));
2549 return 1;
2550 }
2551
2552
2553
2554
2555 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2556
2557
2558
2559
2560
2561
2562 if (raid_device->device_info &
2563 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2564 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2565 ds = "SSP";
2566 } else {
2567 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2568 if (raid_device->device_info &
2569 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2570 ds = "SATA";
2571 else
2572 ds = "STP";
2573 }
2574
2575 switch (raid_device->volume_type) {
2576 case MPI2_RAID_VOL_TYPE_RAID0:
2577 r_level = "RAID0";
2578 break;
2579 case MPI2_RAID_VOL_TYPE_RAID1E:
2580 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2581 if (ioc->manu_pg10.OEMIdentifier &&
2582 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2583 MFG10_GF0_R10_DISPLAY) &&
2584 !(raid_device->num_pds % 2))
2585 r_level = "RAID10";
2586 else
2587 r_level = "RAID1E";
2588 break;
2589 case MPI2_RAID_VOL_TYPE_RAID1:
2590 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2591 r_level = "RAID1";
2592 break;
2593 case MPI2_RAID_VOL_TYPE_RAID10:
2594 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2595 r_level = "RAID10";
2596 break;
2597 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2598 default:
2599 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2600 r_level = "RAIDX";
2601 break;
2602 }
2603
2604 if (!ioc->hide_ir_msg)
2605 sdev_printk(KERN_INFO, sdev,
2606 "%s: handle(0x%04x), wwid(0x%016llx),"
2607 " pd_count(%d), type(%s)\n",
2608 r_level, raid_device->handle,
2609 (unsigned long long)raid_device->wwid,
2610 raid_device->num_pds, ds);
2611
2612 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2613 blk_queue_max_hw_sectors(sdev->request_queue,
2614 MPT3SAS_RAID_MAX_SECTORS);
2615 sdev_printk(KERN_INFO, sdev,
2616 "Set queue's max_sector to: %u\n",
2617 MPT3SAS_RAID_MAX_SECTORS);
2618 }
2619
2620 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2621
2622
2623 if (!ioc->is_warpdrive)
2624 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2625 return 0;
2626 }
2627
2628
2629 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2630 if (mpt3sas_config_get_volume_handle(ioc, handle,
2631 &volume_handle)) {
2632 dfailprintk(ioc,
2633 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2634 __FILE__, __LINE__, __func__));
2635 return 1;
2636 }
2637 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2638 volume_handle, &volume_wwid)) {
2639 dfailprintk(ioc,
2640 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2641 __FILE__, __LINE__, __func__));
2642 return 1;
2643 }
2644 }
2645
2646
2647 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2648 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2649 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2650 sas_device_priv_data->sas_target->sas_address);
2651 if (!pcie_device) {
2652 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2653 dfailprintk(ioc,
2654 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2655 __FILE__, __LINE__, __func__));
2656 return 1;
2657 }
2658
2659 qdepth = ioc->max_nvme_qd;
2660 ds = "NVMe";
2661 sdev_printk(KERN_INFO, sdev,
2662 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2663 ds, handle, (unsigned long long)pcie_device->wwid,
2664 pcie_device->port_num);
2665 if (pcie_device->enclosure_handle != 0)
2666 sdev_printk(KERN_INFO, sdev,
2667 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2668 ds,
2669 (unsigned long long)pcie_device->enclosure_logical_id,
2670 pcie_device->slot);
2671 if (pcie_device->connector_name[0] != '\0')
2672 sdev_printk(KERN_INFO, sdev,
2673 "%s: enclosure level(0x%04x),"
2674 "connector name( %s)\n", ds,
2675 pcie_device->enclosure_level,
2676 pcie_device->connector_name);
2677
2678 if (pcie_device->nvme_mdts)
2679 blk_queue_max_hw_sectors(sdev->request_queue,
2680 pcie_device->nvme_mdts/512);
2681
2682 pcie_device_put(pcie_device);
2683 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2684 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2685
2686
2687
2688
2689 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2690 sdev->request_queue);
2691 blk_queue_virt_boundary(sdev->request_queue,
2692 ioc->page_size - 1);
2693 return 0;
2694 }
2695
2696 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2697 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2698 sas_device_priv_data->sas_target->sas_address,
2699 sas_device_priv_data->sas_target->port);
2700 if (!sas_device) {
2701 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2702 dfailprintk(ioc,
2703 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2704 __FILE__, __LINE__, __func__));
2705 return 1;
2706 }
2707
2708 sas_device->volume_handle = volume_handle;
2709 sas_device->volume_wwid = volume_wwid;
2710 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2711 qdepth = (sas_device->port_type > 1) ?
2712 ioc->max_wideport_qd : ioc->max_narrowport_qd;
2713 ssp_target = 1;
2714 if (sas_device->device_info &
2715 MPI2_SAS_DEVICE_INFO_SEP) {
2716 sdev_printk(KERN_WARNING, sdev,
2717 "set ignore_delay_remove for handle(0x%04x)\n",
2718 sas_device_priv_data->sas_target->handle);
2719 sas_device_priv_data->ignore_delay_remove = 1;
2720 ds = "SES";
2721 } else
2722 ds = "SSP";
2723 } else {
2724 qdepth = ioc->max_sata_qd;
2725 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2726 ds = "STP";
2727 else if (sas_device->device_info &
2728 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2729 ds = "SATA";
2730 }
2731
2732 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2733 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2734 ds, handle, (unsigned long long)sas_device->sas_address,
2735 sas_device->phy, (unsigned long long)sas_device->device_name);
2736
2737 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2738
2739 sas_device_put(sas_device);
2740 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2741
2742 if (!ssp_target)
2743 _scsih_display_sata_capabilities(ioc, handle, sdev);
2744
2745
2746 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2747
2748 if (ssp_target) {
2749 sas_read_port_mode_page(sdev);
2750 _scsih_enable_tlr(ioc, sdev);
2751 }
2752
2753 return 0;
2754}
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766static int
2767scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2768 sector_t capacity, int params[])
2769{
2770 int heads;
2771 int sectors;
2772 sector_t cylinders;
2773 ulong dummy;
2774
2775 heads = 64;
2776 sectors = 32;
2777
2778 dummy = heads * sectors;
2779 cylinders = capacity;
2780 sector_div(cylinders, dummy);
2781
2782
2783
2784
2785
2786 if ((ulong)capacity >= 0x200000) {
2787 heads = 255;
2788 sectors = 63;
2789 dummy = heads * sectors;
2790 cylinders = capacity;
2791 sector_div(cylinders, dummy);
2792 }
2793
2794
2795 params[0] = heads;
2796 params[1] = sectors;
2797 params[2] = cylinders;
2798
2799 return 0;
2800}
2801
2802
2803
2804
2805
2806
2807static void
2808_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2809{
2810 char *desc;
2811
2812 switch (response_code) {
2813 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2814 desc = "task management request completed";
2815 break;
2816 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2817 desc = "invalid frame";
2818 break;
2819 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2820 desc = "task management request not supported";
2821 break;
2822 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2823 desc = "task management request failed";
2824 break;
2825 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2826 desc = "task management request succeeded";
2827 break;
2828 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2829 desc = "invalid lun";
2830 break;
2831 case 0xA:
2832 desc = "overlapped tag attempted";
2833 break;
2834 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2835 desc = "task queued, however not sent to target";
2836 break;
2837 default:
2838 desc = "unknown";
2839 break;
2840 }
2841 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2842}
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857static u8
2858_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2859{
2860 MPI2DefaultReply_t *mpi_reply;
2861
2862 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2863 return 1;
2864 if (ioc->tm_cmds.smid != smid)
2865 return 1;
2866 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2867 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2868 if (mpi_reply) {
2869 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2870 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2871 }
2872 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2873 complete(&ioc->tm_cmds.done);
2874 return 1;
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884void
2885mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2886{
2887 struct MPT3SAS_DEVICE *sas_device_priv_data;
2888 struct scsi_device *sdev;
2889 u8 skip = 0;
2890
2891 shost_for_each_device(sdev, ioc->shost) {
2892 if (skip)
2893 continue;
2894 sas_device_priv_data = sdev->hostdata;
2895 if (!sas_device_priv_data)
2896 continue;
2897 if (sas_device_priv_data->sas_target->handle == handle) {
2898 sas_device_priv_data->sas_target->tm_busy = 1;
2899 skip = 1;
2900 ioc->ignore_loginfos = 1;
2901 }
2902 }
2903}
2904
2905
2906
2907
2908
2909
2910
2911
2912void
2913mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2914{
2915 struct MPT3SAS_DEVICE *sas_device_priv_data;
2916 struct scsi_device *sdev;
2917 u8 skip = 0;
2918
2919 shost_for_each_device(sdev, ioc->shost) {
2920 if (skip)
2921 continue;
2922 sas_device_priv_data = sdev->hostdata;
2923 if (!sas_device_priv_data)
2924 continue;
2925 if (sas_device_priv_data->sas_target->handle == handle) {
2926 sas_device_priv_data->sas_target->tm_busy = 0;
2927 skip = 1;
2928 ioc->ignore_loginfos = 0;
2929 }
2930 }
2931}
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945static int
2946scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2947 uint id, uint lun, u8 type, u16 smid_task)
2948{
2949
2950 if (smid_task <= ioc->shost->can_queue) {
2951 switch (type) {
2952 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2953 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2954 id, channel)))
2955 return SUCCESS;
2956 break;
2957 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2958 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2959 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2960 lun, channel)))
2961 return SUCCESS;
2962 break;
2963 default:
2964 return SUCCESS;
2965 }
2966 } else if (smid_task == ioc->scsih_cmds.smid) {
2967 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2968 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2969 return SUCCESS;
2970 } else if (smid_task == ioc->ctl_cmds.smid) {
2971 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2972 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2973 return SUCCESS;
2974 }
2975
2976 return FAILED;
2977}
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996static int
2997scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2998 uint channel, uint id, uint lun, u8 type, u16 smid_task)
2999{
3000 int rc;
3001
3002 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3003 if (rc == SUCCESS)
3004 return rc;
3005
3006 ioc_info(ioc,
3007 "Poll ReplyDescriptor queues for completion of"
3008 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3009 smid_task, type, handle);
3010
3011
3012
3013
3014
3015
3016 mpt3sas_base_mask_interrupts(ioc);
3017 mpt3sas_base_sync_reply_irqs(ioc, 1);
3018 mpt3sas_base_unmask_interrupts(ioc);
3019
3020 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3021}
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044int
3045mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3046 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3047 u8 timeout, u8 tr_method)
3048{
3049 Mpi2SCSITaskManagementRequest_t *mpi_request;
3050 Mpi2SCSITaskManagementReply_t *mpi_reply;
3051 Mpi25SCSIIORequest_t *request;
3052 u16 smid = 0;
3053 u32 ioc_state;
3054 int rc;
3055 u8 issue_reset = 0;
3056
3057 lockdep_assert_held(&ioc->tm_cmds.mutex);
3058
3059 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3060 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3061 return FAILED;
3062 }
3063
3064 if (ioc->shost_recovery || ioc->remove_host ||
3065 ioc->pci_error_recovery) {
3066 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3067 return FAILED;
3068 }
3069
3070 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3071 if (ioc_state & MPI2_DOORBELL_USED) {
3072 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3073 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3074 return (!rc) ? SUCCESS : FAILED;
3075 }
3076
3077 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3078 mpt3sas_print_fault_code(ioc, ioc_state &
3079 MPI2_DOORBELL_DATA_MASK);
3080 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3081 return (!rc) ? SUCCESS : FAILED;
3082 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3083 MPI2_IOC_STATE_COREDUMP) {
3084 mpt3sas_print_coredump_info(ioc, ioc_state &
3085 MPI2_DOORBELL_DATA_MASK);
3086 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3087 return (!rc) ? SUCCESS : FAILED;
3088 }
3089
3090 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3091 if (!smid) {
3092 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3093 return FAILED;
3094 }
3095
3096 dtmprintk(ioc,
3097 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3098 handle, type, smid_task, timeout, tr_method));
3099 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3100 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3101 ioc->tm_cmds.smid = smid;
3102 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3103 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3104 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3105 mpi_request->DevHandle = cpu_to_le16(handle);
3106 mpi_request->TaskType = type;
3107 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3108 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3109 mpi_request->MsgFlags = tr_method;
3110 mpi_request->TaskMID = cpu_to_le16(smid_task);
3111 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3112 mpt3sas_scsih_set_tm_flag(ioc, handle);
3113 init_completion(&ioc->tm_cmds.done);
3114 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3115 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3116 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3117 mpt3sas_check_cmd_timeout(ioc,
3118 ioc->tm_cmds.status, mpi_request,
3119 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3120 if (issue_reset) {
3121 rc = mpt3sas_base_hard_reset_handler(ioc,
3122 FORCE_BIG_HAMMER);
3123 rc = (!rc) ? SUCCESS : FAILED;
3124 goto out;
3125 }
3126 }
3127
3128
3129 mpt3sas_base_sync_reply_irqs(ioc, 0);
3130
3131 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3132 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3133 mpi_reply = ioc->tm_cmds.reply;
3134 dtmprintk(ioc,
3135 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3136 le16_to_cpu(mpi_reply->IOCStatus),
3137 le32_to_cpu(mpi_reply->IOCLogInfo),
3138 le32_to_cpu(mpi_reply->TerminationCount)));
3139 if (ioc->logging_level & MPT_DEBUG_TM) {
3140 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3141 if (mpi_reply->IOCStatus)
3142 _debug_dump_mf(mpi_request,
3143 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3144 }
3145 }
3146
3147 switch (type) {
3148 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3149 rc = SUCCESS;
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3162 if (le16_to_cpu(request->DevHandle) != handle)
3163 break;
3164
3165 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3166 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3167 handle, timeout, tr_method, smid_task, msix_task);
3168 rc = FAILED;
3169 break;
3170
3171 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3172 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3173 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3174 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3175 type, smid_task);
3176 break;
3177 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3178 rc = SUCCESS;
3179 break;
3180 default:
3181 rc = FAILED;
3182 break;
3183 }
3184
3185out:
3186 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3187 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3188 return rc;
3189}
3190
3191int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3192 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3193 u16 msix_task, u8 timeout, u8 tr_method)
3194{
3195 int ret;
3196
3197 mutex_lock(&ioc->tm_cmds.mutex);
3198 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3199 smid_task, msix_task, timeout, tr_method);
3200 mutex_unlock(&ioc->tm_cmds.mutex);
3201
3202 return ret;
3203}
3204
3205
3206
3207
3208
3209
3210
3211
3212static void
3213_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3214{
3215 struct scsi_target *starget = scmd->device->sdev_target;
3216 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3217 struct _sas_device *sas_device = NULL;
3218 struct _pcie_device *pcie_device = NULL;
3219 unsigned long flags;
3220 char *device_str = NULL;
3221
3222 if (!priv_target)
3223 return;
3224 if (ioc->hide_ir_msg)
3225 device_str = "WarpDrive";
3226 else
3227 device_str = "volume";
3228
3229 scsi_print_command(scmd);
3230 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3231 starget_printk(KERN_INFO, starget,
3232 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3233 device_str, priv_target->handle,
3234 device_str, (unsigned long long)priv_target->sas_address);
3235
3236 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3237 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3238 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3239 if (pcie_device) {
3240 starget_printk(KERN_INFO, starget,
3241 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3242 pcie_device->handle,
3243 (unsigned long long)pcie_device->wwid,
3244 pcie_device->port_num);
3245 if (pcie_device->enclosure_handle != 0)
3246 starget_printk(KERN_INFO, starget,
3247 "enclosure logical id(0x%016llx), slot(%d)\n",
3248 (unsigned long long)
3249 pcie_device->enclosure_logical_id,
3250 pcie_device->slot);
3251 if (pcie_device->connector_name[0] != '\0')
3252 starget_printk(KERN_INFO, starget,
3253 "enclosure level(0x%04x), connector name( %s)\n",
3254 pcie_device->enclosure_level,
3255 pcie_device->connector_name);
3256 pcie_device_put(pcie_device);
3257 }
3258 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3259
3260 } else {
3261 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3262 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3263 if (sas_device) {
3264 if (priv_target->flags &
3265 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3266 starget_printk(KERN_INFO, starget,
3267 "volume handle(0x%04x), "
3268 "volume wwid(0x%016llx)\n",
3269 sas_device->volume_handle,
3270 (unsigned long long)sas_device->volume_wwid);
3271 }
3272 starget_printk(KERN_INFO, starget,
3273 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3274 sas_device->handle,
3275 (unsigned long long)sas_device->sas_address,
3276 sas_device->phy);
3277
3278 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3279 NULL, starget);
3280
3281 sas_device_put(sas_device);
3282 }
3283 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3284 }
3285}
3286
3287
3288
3289
3290
3291
3292
3293static int
3294scsih_abort(struct scsi_cmnd *scmd)
3295{
3296 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3297 struct MPT3SAS_DEVICE *sas_device_priv_data;
3298 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3299 u16 handle;
3300 int r;
3301
3302 u8 timeout = 30;
3303 struct _pcie_device *pcie_device = NULL;
3304 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3305 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3306 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3307 (scmd->request->timeout / HZ) * 1000);
3308 _scsih_tm_display_info(ioc, scmd);
3309
3310 sas_device_priv_data = scmd->device->hostdata;
3311 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3312 ioc->remove_host) {
3313 sdev_printk(KERN_INFO, scmd->device,
3314 "device been deleted! scmd(0x%p)\n", scmd);
3315 scmd->result = DID_NO_CONNECT << 16;
3316 scmd->scsi_done(scmd);
3317 r = SUCCESS;
3318 goto out;
3319 }
3320
3321
3322 if (st == NULL || st->cb_idx == 0xFF) {
3323 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3324 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3325 scmd->result = DID_RESET << 16;
3326 r = SUCCESS;
3327 goto out;
3328 }
3329
3330
3331 if (sas_device_priv_data->sas_target->flags &
3332 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3333 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3334 scmd->result = DID_RESET << 16;
3335 r = FAILED;
3336 goto out;
3337 }
3338
3339 mpt3sas_halt_firmware(ioc);
3340
3341 handle = sas_device_priv_data->sas_target->handle;
3342 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3343 if (pcie_device && (!ioc->tm_custom_handling) &&
3344 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3345 timeout = ioc->nvme_abort_timeout;
3346 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3347 scmd->device->id, scmd->device->lun,
3348 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3349 st->smid, st->msix_io, timeout, 0);
3350
3351 if (r == SUCCESS && st->cb_idx != 0xFF)
3352 r = FAILED;
3353 out:
3354 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3355 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3356 if (pcie_device)
3357 pcie_device_put(pcie_device);
3358 return r;
3359}
3360
3361
3362
3363
3364
3365
3366
3367static int
3368scsih_dev_reset(struct scsi_cmnd *scmd)
3369{
3370 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3371 struct MPT3SAS_DEVICE *sas_device_priv_data;
3372 struct _sas_device *sas_device = NULL;
3373 struct _pcie_device *pcie_device = NULL;
3374 u16 handle;
3375 u8 tr_method = 0;
3376 u8 tr_timeout = 30;
3377 int r;
3378
3379 struct scsi_target *starget = scmd->device->sdev_target;
3380 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3381
3382 sdev_printk(KERN_INFO, scmd->device,
3383 "attempting device reset! scmd(0x%p)\n", scmd);
3384 _scsih_tm_display_info(ioc, scmd);
3385
3386 sas_device_priv_data = scmd->device->hostdata;
3387 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3388 ioc->remove_host) {
3389 sdev_printk(KERN_INFO, scmd->device,
3390 "device been deleted! scmd(0x%p)\n", scmd);
3391 scmd->result = DID_NO_CONNECT << 16;
3392 scmd->scsi_done(scmd);
3393 r = SUCCESS;
3394 goto out;
3395 }
3396
3397
3398 handle = 0;
3399 if (sas_device_priv_data->sas_target->flags &
3400 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3401 sas_device = mpt3sas_get_sdev_from_target(ioc,
3402 target_priv_data);
3403 if (sas_device)
3404 handle = sas_device->volume_handle;
3405 } else
3406 handle = sas_device_priv_data->sas_target->handle;
3407
3408 if (!handle) {
3409 scmd->result = DID_RESET << 16;
3410 r = FAILED;
3411 goto out;
3412 }
3413
3414 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3415
3416 if (pcie_device && (!ioc->tm_custom_handling) &&
3417 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3418 tr_timeout = pcie_device->reset_timeout;
3419 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3420 } else
3421 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3422
3423 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3424 scmd->device->id, scmd->device->lun,
3425 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3426 tr_timeout, tr_method);
3427
3428 if (r == SUCCESS && scsi_device_busy(scmd->device))
3429 r = FAILED;
3430 out:
3431 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3432 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3433
3434 if (sas_device)
3435 sas_device_put(sas_device);
3436 if (pcie_device)
3437 pcie_device_put(pcie_device);
3438
3439 return r;
3440}
3441
3442
3443
3444
3445
3446
3447
3448static int
3449scsih_target_reset(struct scsi_cmnd *scmd)
3450{
3451 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3452 struct MPT3SAS_DEVICE *sas_device_priv_data;
3453 struct _sas_device *sas_device = NULL;
3454 struct _pcie_device *pcie_device = NULL;
3455 u16 handle;
3456 u8 tr_method = 0;
3457 u8 tr_timeout = 30;
3458 int r;
3459 struct scsi_target *starget = scmd->device->sdev_target;
3460 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3461
3462 starget_printk(KERN_INFO, starget,
3463 "attempting target reset! scmd(0x%p)\n", scmd);
3464 _scsih_tm_display_info(ioc, scmd);
3465
3466 sas_device_priv_data = scmd->device->hostdata;
3467 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3468 ioc->remove_host) {
3469 starget_printk(KERN_INFO, starget,
3470 "target been deleted! scmd(0x%p)\n", scmd);
3471 scmd->result = DID_NO_CONNECT << 16;
3472 scmd->scsi_done(scmd);
3473 r = SUCCESS;
3474 goto out;
3475 }
3476
3477
3478 handle = 0;
3479 if (sas_device_priv_data->sas_target->flags &
3480 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3481 sas_device = mpt3sas_get_sdev_from_target(ioc,
3482 target_priv_data);
3483 if (sas_device)
3484 handle = sas_device->volume_handle;
3485 } else
3486 handle = sas_device_priv_data->sas_target->handle;
3487
3488 if (!handle) {
3489 scmd->result = DID_RESET << 16;
3490 r = FAILED;
3491 goto out;
3492 }
3493
3494 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3495
3496 if (pcie_device && (!ioc->tm_custom_handling) &&
3497 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3498 tr_timeout = pcie_device->reset_timeout;
3499 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3500 } else
3501 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3502 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3503 scmd->device->id, 0,
3504 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3505 tr_timeout, tr_method);
3506
3507 if (r == SUCCESS && atomic_read(&starget->target_busy))
3508 r = FAILED;
3509 out:
3510 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3511 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3512
3513 if (sas_device)
3514 sas_device_put(sas_device);
3515 if (pcie_device)
3516 pcie_device_put(pcie_device);
3517 return r;
3518}
3519
3520
3521
3522
3523
3524
3525
3526
3527static int
3528scsih_host_reset(struct scsi_cmnd *scmd)
3529{
3530 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3531 int r, retval;
3532
3533 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3534 scsi_print_command(scmd);
3535
3536 if (ioc->is_driver_loading || ioc->remove_host) {
3537 ioc_info(ioc, "Blocking the host reset\n");
3538 r = FAILED;
3539 goto out;
3540 }
3541
3542 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3543 r = (retval < 0) ? FAILED : SUCCESS;
3544out:
3545 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3546 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3547
3548 return r;
3549}
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560static void
3561_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3562{
3563 unsigned long flags;
3564
3565 if (ioc->firmware_event_thread == NULL)
3566 return;
3567
3568 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3569 fw_event_work_get(fw_event);
3570 INIT_LIST_HEAD(&fw_event->list);
3571 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3572 INIT_WORK(&fw_event->work, _firmware_event_work);
3573 fw_event_work_get(fw_event);
3574 queue_work(ioc->firmware_event_thread, &fw_event->work);
3575 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3576}
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586static void
3587_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3588 *fw_event)
3589{
3590 unsigned long flags;
3591
3592 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3593 if (!list_empty(&fw_event->list)) {
3594 list_del_init(&fw_event->list);
3595 fw_event_work_put(fw_event);
3596 }
3597 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3598}
3599
3600
3601
3602
3603
3604
3605
3606void
3607mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3608 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3609{
3610 struct fw_event_work *fw_event;
3611 u16 sz;
3612
3613 if (ioc->is_driver_loading)
3614 return;
3615 sz = sizeof(*event_data);
3616 fw_event = alloc_fw_event_work(sz);
3617 if (!fw_event)
3618 return;
3619 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3620 fw_event->ioc = ioc;
3621 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3622 _scsih_fw_event_add(ioc, fw_event);
3623 fw_event_work_put(fw_event);
3624}
3625
3626
3627
3628
3629
3630static void
3631_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3632{
3633 struct fw_event_work *fw_event;
3634
3635 fw_event = alloc_fw_event_work(0);
3636 if (!fw_event)
3637 return;
3638 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3639 fw_event->ioc = ioc;
3640 _scsih_fw_event_add(ioc, fw_event);
3641 fw_event_work_put(fw_event);
3642}
3643
3644
3645
3646
3647
3648void
3649mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3650{
3651 struct fw_event_work *fw_event;
3652
3653 fw_event = alloc_fw_event_work(0);
3654 if (!fw_event)
3655 return;
3656 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3657 fw_event->ioc = ioc;
3658 _scsih_fw_event_add(ioc, fw_event);
3659 fw_event_work_put(fw_event);
3660}
3661
3662static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3663{
3664 unsigned long flags;
3665 struct fw_event_work *fw_event = NULL;
3666
3667 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3668 if (!list_empty(&ioc->fw_event_list)) {
3669 fw_event = list_first_entry(&ioc->fw_event_list,
3670 struct fw_event_work, list);
3671 list_del_init(&fw_event->list);
3672 }
3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3674
3675 return fw_event;
3676}
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687static void
3688_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3689{
3690 struct fw_event_work *fw_event;
3691
3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3693 !ioc->firmware_event_thread)
3694 return;
3695
3696
3697
3698
3699
3700
3701 if (ioc->shost_recovery && ioc->current_event)
3702 ioc->current_event->ignore = 1;
3703
3704 ioc->fw_events_cleanup = 1;
3705 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3706 (fw_event = ioc->current_event)) {
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722 if (fw_event == ioc->current_event &&
3723 ioc->current_event->event !=
3724 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3725 ioc->current_event = NULL;
3726 continue;
3727 }
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3738 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3739 ioc->start_scan = 0;
3740 }
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750 if (cancel_work_sync(&fw_event->work))
3751 fw_event_work_put(fw_event);
3752
3753 fw_event_work_put(fw_event);
3754 }
3755 ioc->fw_events_cleanup = 0;
3756}
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766static void
3767_scsih_internal_device_block(struct scsi_device *sdev,
3768 struct MPT3SAS_DEVICE *sas_device_priv_data)
3769{
3770 int r = 0;
3771
3772 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3773 sas_device_priv_data->sas_target->handle);
3774 sas_device_priv_data->block = 1;
3775
3776 r = scsi_internal_device_block_nowait(sdev);
3777 if (r == -EINVAL)
3778 sdev_printk(KERN_WARNING, sdev,
3779 "device_block failed with return(%d) for handle(0x%04x)\n",
3780 r, sas_device_priv_data->sas_target->handle);
3781}
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791static void
3792_scsih_internal_device_unblock(struct scsi_device *sdev,
3793 struct MPT3SAS_DEVICE *sas_device_priv_data)
3794{
3795 int r = 0;
3796
3797 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3798 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3799 sas_device_priv_data->block = 0;
3800 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3801 if (r == -EINVAL) {
3802
3803
3804
3805
3806
3807 sdev_printk(KERN_WARNING, sdev,
3808 "device_unblock failed with return(%d) for handle(0x%04x) "
3809 "performing a block followed by an unblock\n",
3810 r, sas_device_priv_data->sas_target->handle);
3811 sas_device_priv_data->block = 1;
3812 r = scsi_internal_device_block_nowait(sdev);
3813 if (r)
3814 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3815 "failed with return(%d) for handle(0x%04x)\n",
3816 r, sas_device_priv_data->sas_target->handle);
3817
3818 sas_device_priv_data->block = 0;
3819 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3820 if (r)
3821 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3822 " failed with return(%d) for handle(0x%04x)\n",
3823 r, sas_device_priv_data->sas_target->handle);
3824 }
3825}
3826
3827
3828
3829
3830
3831
3832
3833static void
3834_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3835{
3836 struct MPT3SAS_DEVICE *sas_device_priv_data;
3837 struct scsi_device *sdev;
3838
3839 shost_for_each_device(sdev, ioc->shost) {
3840 sas_device_priv_data = sdev->hostdata;
3841 if (!sas_device_priv_data)
3842 continue;
3843 if (!sas_device_priv_data->block)
3844 continue;
3845
3846 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3847 "device_running, handle(0x%04x)\n",
3848 sas_device_priv_data->sas_target->handle));
3849 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3850 }
3851}
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862static void
3863_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3864 u64 sas_address, struct hba_port *port)
3865{
3866 struct MPT3SAS_DEVICE *sas_device_priv_data;
3867 struct scsi_device *sdev;
3868
3869 shost_for_each_device(sdev, ioc->shost) {
3870 sas_device_priv_data = sdev->hostdata;
3871 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3872 continue;
3873 if (sas_device_priv_data->sas_target->sas_address
3874 != sas_address)
3875 continue;
3876 if (sas_device_priv_data->sas_target->port != port)
3877 continue;
3878 if (sas_device_priv_data->block)
3879 _scsih_internal_device_unblock(sdev,
3880 sas_device_priv_data);
3881 }
3882}
3883
3884
3885
3886
3887
3888
3889
3890static void
3891_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3892{
3893 struct MPT3SAS_DEVICE *sas_device_priv_data;
3894 struct scsi_device *sdev;
3895
3896 shost_for_each_device(sdev, ioc->shost) {
3897 sas_device_priv_data = sdev->hostdata;
3898 if (!sas_device_priv_data)
3899 continue;
3900 if (sas_device_priv_data->block)
3901 continue;
3902 if (sas_device_priv_data->ignore_delay_remove) {
3903 sdev_printk(KERN_INFO, sdev,
3904 "%s skip device_block for SES handle(0x%04x)\n",
3905 __func__, sas_device_priv_data->sas_target->handle);
3906 continue;
3907 }
3908 _scsih_internal_device_block(sdev, sas_device_priv_data);
3909 }
3910}
3911
3912
3913
3914
3915
3916
3917
3918
3919static void
3920_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3921{
3922 struct MPT3SAS_DEVICE *sas_device_priv_data;
3923 struct scsi_device *sdev;
3924 struct _sas_device *sas_device;
3925
3926 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3927
3928 shost_for_each_device(sdev, ioc->shost) {
3929 sas_device_priv_data = sdev->hostdata;
3930 if (!sas_device_priv_data)
3931 continue;
3932 if (sas_device_priv_data->sas_target->handle != handle)
3933 continue;
3934 if (sas_device_priv_data->block)
3935 continue;
3936 if (sas_device && sas_device->pend_sas_rphy_add)
3937 continue;
3938 if (sas_device_priv_data->ignore_delay_remove) {
3939 sdev_printk(KERN_INFO, sdev,
3940 "%s skip device_block for SES handle(0x%04x)\n",
3941 __func__, sas_device_priv_data->sas_target->handle);
3942 continue;
3943 }
3944 _scsih_internal_device_block(sdev, sas_device_priv_data);
3945 }
3946
3947 if (sas_device)
3948 sas_device_put(sas_device);
3949}
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960static void
3961_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3962 struct _sas_node *sas_expander)
3963{
3964 struct _sas_port *mpt3sas_port;
3965 struct _sas_device *sas_device;
3966 struct _sas_node *expander_sibling;
3967 unsigned long flags;
3968
3969 if (!sas_expander)
3970 return;
3971
3972 list_for_each_entry(mpt3sas_port,
3973 &sas_expander->sas_port_list, port_list) {
3974 if (mpt3sas_port->remote_identify.device_type ==
3975 SAS_END_DEVICE) {
3976 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3977 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3978 mpt3sas_port->remote_identify.sas_address,
3979 mpt3sas_port->hba_port);
3980 if (sas_device) {
3981 set_bit(sas_device->handle,
3982 ioc->blocking_handles);
3983 sas_device_put(sas_device);
3984 }
3985 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3986 }
3987 }
3988
3989 list_for_each_entry(mpt3sas_port,
3990 &sas_expander->sas_port_list, port_list) {
3991
3992 if (mpt3sas_port->remote_identify.device_type ==
3993 SAS_EDGE_EXPANDER_DEVICE ||
3994 mpt3sas_port->remote_identify.device_type ==
3995 SAS_FANOUT_EXPANDER_DEVICE) {
3996 expander_sibling =
3997 mpt3sas_scsih_expander_find_by_sas_address(
3998 ioc, mpt3sas_port->remote_identify.sas_address,
3999 mpt3sas_port->hba_port);
4000 _scsih_block_io_to_children_attached_to_ex(ioc,
4001 expander_sibling);
4002 }
4003 }
4004}
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014static void
4015_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4016 Mpi2EventDataSasTopologyChangeList_t *event_data)
4017{
4018 int i;
4019 u16 handle;
4020 u16 reason_code;
4021
4022 for (i = 0; i < event_data->NumEntries; i++) {
4023 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4024 if (!handle)
4025 continue;
4026 reason_code = event_data->PHY[i].PhyStatus &
4027 MPI2_EVENT_SAS_TOPO_RC_MASK;
4028 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4029 _scsih_block_io_device(ioc, handle);
4030 }
4031}
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041static void
4042_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4043 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4044{
4045 int i;
4046 u16 handle;
4047 u16 reason_code;
4048
4049 for (i = 0; i < event_data->NumEntries; i++) {
4050 handle =
4051 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4052 if (!handle)
4053 continue;
4054 reason_code = event_data->PortEntry[i].PortStatus;
4055 if (reason_code ==
4056 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4057 _scsih_block_io_device(ioc, handle);
4058 }
4059}
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075static void
4076_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4077{
4078 Mpi2SCSITaskManagementRequest_t *mpi_request;
4079 u16 smid;
4080 struct _sas_device *sas_device = NULL;
4081 struct _pcie_device *pcie_device = NULL;
4082 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4083 u64 sas_address = 0;
4084 unsigned long flags;
4085 struct _tr_list *delayed_tr;
4086 u32 ioc_state;
4087 u8 tr_method = 0;
4088 struct hba_port *port = NULL;
4089
4090 if (ioc->pci_error_recovery) {
4091 dewtprintk(ioc,
4092 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4093 __func__, handle));
4094 return;
4095 }
4096 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4097 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4098 dewtprintk(ioc,
4099 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4100 __func__, handle));
4101 return;
4102 }
4103
4104
4105 if (test_bit(handle, ioc->pd_handles))
4106 return;
4107
4108 clear_bit(handle, ioc->pend_os_device_add);
4109
4110 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4111 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4112 if (sas_device && sas_device->starget &&
4113 sas_device->starget->hostdata) {
4114 sas_target_priv_data = sas_device->starget->hostdata;
4115 sas_target_priv_data->deleted = 1;
4116 sas_address = sas_device->sas_address;
4117 port = sas_device->port;
4118 }
4119 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4120 if (!sas_device) {
4121 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4122 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4123 if (pcie_device && pcie_device->starget &&
4124 pcie_device->starget->hostdata) {
4125 sas_target_priv_data = pcie_device->starget->hostdata;
4126 sas_target_priv_data->deleted = 1;
4127 sas_address = pcie_device->wwid;
4128 }
4129 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4130 if (pcie_device && (!ioc->tm_custom_handling) &&
4131 (!(mpt3sas_scsih_is_pcie_scsi_device(
4132 pcie_device->device_info))))
4133 tr_method =
4134 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4135 else
4136 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4137 }
4138 if (sas_target_priv_data) {
4139 dewtprintk(ioc,
4140 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4141 handle, (u64)sas_address));
4142 if (sas_device) {
4143 if (sas_device->enclosure_handle != 0)
4144 dewtprintk(ioc,
4145 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4146 (u64)sas_device->enclosure_logical_id,
4147 sas_device->slot));
4148 if (sas_device->connector_name[0] != '\0')
4149 dewtprintk(ioc,
4150 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4151 sas_device->enclosure_level,
4152 sas_device->connector_name));
4153 } else if (pcie_device) {
4154 if (pcie_device->enclosure_handle != 0)
4155 dewtprintk(ioc,
4156 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4157 (u64)pcie_device->enclosure_logical_id,
4158 pcie_device->slot));
4159 if (pcie_device->connector_name[0] != '\0')
4160 dewtprintk(ioc,
4161 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4162 pcie_device->enclosure_level,
4163 pcie_device->connector_name));
4164 }
4165 _scsih_ublock_io_device(ioc, sas_address, port);
4166 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4167 }
4168
4169 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4170 if (!smid) {
4171 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4172 if (!delayed_tr)
4173 goto out;
4174 INIT_LIST_HEAD(&delayed_tr->list);
4175 delayed_tr->handle = handle;
4176 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4177 dewtprintk(ioc,
4178 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4179 handle));
4180 goto out;
4181 }
4182
4183 dewtprintk(ioc,
4184 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4185 handle, smid, ioc->tm_tr_cb_idx));
4186 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4187 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4188 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4189 mpi_request->DevHandle = cpu_to_le16(handle);
4190 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4191 mpi_request->MsgFlags = tr_method;
4192 set_bit(handle, ioc->device_remove_in_progress);
4193 ioc->put_smid_hi_priority(ioc, smid, 0);
4194 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4195
4196out:
4197 if (sas_device)
4198 sas_device_put(sas_device);
4199 if (pcie_device)
4200 pcie_device_put(pcie_device);
4201}
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219static u8
4220_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4221 u32 reply)
4222{
4223 u16 handle;
4224 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4225 Mpi2SCSITaskManagementReply_t *mpi_reply =
4226 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4227 Mpi2SasIoUnitControlRequest_t *mpi_request;
4228 u16 smid_sas_ctrl;
4229 u32 ioc_state;
4230 struct _sc_list *delayed_sc;
4231
4232 if (ioc->pci_error_recovery) {
4233 dewtprintk(ioc,
4234 ioc_info(ioc, "%s: host in pci error recovery\n",
4235 __func__));
4236 return 1;
4237 }
4238 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4239 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4240 dewtprintk(ioc,
4241 ioc_info(ioc, "%s: host is not operational\n",
4242 __func__));
4243 return 1;
4244 }
4245 if (unlikely(!mpi_reply)) {
4246 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4247 __FILE__, __LINE__, __func__);
4248 return 1;
4249 }
4250 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4251 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4252 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4253 dewtprintk(ioc,
4254 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4255 handle,
4256 le16_to_cpu(mpi_reply->DevHandle), smid));
4257 return 0;
4258 }
4259
4260 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4261 dewtprintk(ioc,
4262 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4263 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4264 le32_to_cpu(mpi_reply->IOCLogInfo),
4265 le32_to_cpu(mpi_reply->TerminationCount)));
4266
4267 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4268 if (!smid_sas_ctrl) {
4269 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4270 if (!delayed_sc)
4271 return _scsih_check_for_pending_tm(ioc, smid);
4272 INIT_LIST_HEAD(&delayed_sc->list);
4273 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4274 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4275 dewtprintk(ioc,
4276 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4277 handle));
4278 return _scsih_check_for_pending_tm(ioc, smid);
4279 }
4280
4281 dewtprintk(ioc,
4282 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4283 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4284 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4285 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4286 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4287 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4288 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4289 ioc->put_smid_default(ioc, smid_sas_ctrl);
4290
4291 return _scsih_check_for_pending_tm(ioc, smid);
4292}
4293
4294
4295
4296
4297
4298
4299
4300
4301inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4302 struct scsi_cmnd *scmd)
4303{
4304
4305 if (ioc->pci_error_recovery)
4306 return false;
4307
4308 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4309 if (ioc->remove_host)
4310 return false;
4311
4312 return true;
4313 }
4314
4315 if (ioc->remove_host) {
4316
4317 switch (scmd->cmnd[0]) {
4318 case SYNCHRONIZE_CACHE:
4319 case START_STOP:
4320 return true;
4321 default:
4322 return false;
4323 }
4324 }
4325
4326 return true;
4327}
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344static u8
4345_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4346 u8 msix_index, u32 reply)
4347{
4348 Mpi2SasIoUnitControlReply_t *mpi_reply =
4349 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4350
4351 if (likely(mpi_reply)) {
4352 dewtprintk(ioc,
4353 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4354 le16_to_cpu(mpi_reply->DevHandle), smid,
4355 le16_to_cpu(mpi_reply->IOCStatus),
4356 le32_to_cpu(mpi_reply->IOCLogInfo)));
4357 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4358 MPI2_IOCSTATUS_SUCCESS) {
4359 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4360 ioc->device_remove_in_progress);
4361 }
4362 } else {
4363 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4364 __FILE__, __LINE__, __func__);
4365 }
4366 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4367}
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379static void
4380_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4381{
4382 Mpi2SCSITaskManagementRequest_t *mpi_request;
4383 u16 smid;
4384 struct _tr_list *delayed_tr;
4385
4386 if (ioc->pci_error_recovery) {
4387 dewtprintk(ioc,
4388 ioc_info(ioc, "%s: host reset in progress!\n",
4389 __func__));
4390 return;
4391 }
4392
4393 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4394 if (!smid) {
4395 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4396 if (!delayed_tr)
4397 return;
4398 INIT_LIST_HEAD(&delayed_tr->list);
4399 delayed_tr->handle = handle;
4400 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4401 dewtprintk(ioc,
4402 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4403 handle));
4404 return;
4405 }
4406
4407 dewtprintk(ioc,
4408 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4409 handle, smid, ioc->tm_tr_volume_cb_idx));
4410 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4411 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4412 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4413 mpi_request->DevHandle = cpu_to_le16(handle);
4414 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4415 ioc->put_smid_hi_priority(ioc, smid, 0);
4416}
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429static u8
4430_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4431 u8 msix_index, u32 reply)
4432{
4433 u16 handle;
4434 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4435 Mpi2SCSITaskManagementReply_t *mpi_reply =
4436 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4437
4438 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4439 dewtprintk(ioc,
4440 ioc_info(ioc, "%s: host reset in progress!\n",
4441 __func__));
4442 return 1;
4443 }
4444 if (unlikely(!mpi_reply)) {
4445 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4446 __FILE__, __LINE__, __func__);
4447 return 1;
4448 }
4449
4450 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4451 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4452 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4453 dewtprintk(ioc,
4454 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4455 handle, le16_to_cpu(mpi_reply->DevHandle),
4456 smid));
4457 return 0;
4458 }
4459
4460 dewtprintk(ioc,
4461 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4462 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4463 le32_to_cpu(mpi_reply->IOCLogInfo),
4464 le32_to_cpu(mpi_reply->TerminationCount)));
4465
4466 return _scsih_check_for_pending_tm(ioc, smid);
4467}
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478static void
4479_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4480 U32 event_context)
4481{
4482 Mpi2EventAckRequest_t *ack_request;
4483 int i = smid - ioc->internal_smid;
4484 unsigned long flags;
4485
4486
4487
4488
4489
4490 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4491 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4492 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4493
4494 dewtprintk(ioc,
4495 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4496 le16_to_cpu(event), smid, ioc->base_cb_idx));
4497 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4498 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4499 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4500 ack_request->Event = event;
4501 ack_request->EventContext = event_context;
4502 ack_request->VF_ID = 0;
4503 ack_request->VP_ID = 0;
4504 ioc->put_smid_default(ioc, smid);
4505}
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516static void
4517_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4518 u16 smid, u16 handle)
4519{
4520 Mpi2SasIoUnitControlRequest_t *mpi_request;
4521 u32 ioc_state;
4522 int i = smid - ioc->internal_smid;
4523 unsigned long flags;
4524
4525 if (ioc->remove_host) {
4526 dewtprintk(ioc,
4527 ioc_info(ioc, "%s: host has been removed\n",
4528 __func__));
4529 return;
4530 } else if (ioc->pci_error_recovery) {
4531 dewtprintk(ioc,
4532 ioc_info(ioc, "%s: host in pci error recovery\n",
4533 __func__));
4534 return;
4535 }
4536 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4537 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4538 dewtprintk(ioc,
4539 ioc_info(ioc, "%s: host is not operational\n",
4540 __func__));
4541 return;
4542 }
4543
4544
4545
4546
4547
4548 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4549 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4550 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4551
4552 dewtprintk(ioc,
4553 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4554 handle, smid, ioc->tm_sas_control_cb_idx));
4555 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4556 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4557 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4558 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4559 mpi_request->DevHandle = cpu_to_le16(handle);
4560 ioc->put_smid_default(ioc, smid);
4561}
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576u8
4577mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4578{
4579 struct _sc_list *delayed_sc;
4580 struct _event_ack_list *delayed_event_ack;
4581
4582 if (!list_empty(&ioc->delayed_event_ack_list)) {
4583 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4584 struct _event_ack_list, list);
4585 _scsih_issue_delayed_event_ack(ioc, smid,
4586 delayed_event_ack->Event, delayed_event_ack->EventContext);
4587 list_del(&delayed_event_ack->list);
4588 kfree(delayed_event_ack);
4589 return 0;
4590 }
4591
4592 if (!list_empty(&ioc->delayed_sc_list)) {
4593 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4594 struct _sc_list, list);
4595 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4596 delayed_sc->handle);
4597 list_del(&delayed_sc->list);
4598 kfree(delayed_sc);
4599 return 0;
4600 }
4601 return 1;
4602}
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615static u8
4616_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4617{
4618 struct _tr_list *delayed_tr;
4619
4620 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4621 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4622 struct _tr_list, list);
4623 mpt3sas_base_free_smid(ioc, smid);
4624 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4625 list_del(&delayed_tr->list);
4626 kfree(delayed_tr);
4627 return 0;
4628 }
4629
4630 if (!list_empty(&ioc->delayed_tr_list)) {
4631 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4632 struct _tr_list, list);
4633 mpt3sas_base_free_smid(ioc, smid);
4634 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4635 list_del(&delayed_tr->list);
4636 kfree(delayed_tr);
4637 return 0;
4638 }
4639
4640 return 1;
4641}
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654static void
4655_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4656 Mpi2EventDataSasTopologyChangeList_t *event_data)
4657{
4658 struct fw_event_work *fw_event;
4659 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4660 u16 expander_handle;
4661 struct _sas_node *sas_expander;
4662 unsigned long flags;
4663 int i, reason_code;
4664 u16 handle;
4665
4666 for (i = 0 ; i < event_data->NumEntries; i++) {
4667 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4668 if (!handle)
4669 continue;
4670 reason_code = event_data->PHY[i].PhyStatus &
4671 MPI2_EVENT_SAS_TOPO_RC_MASK;
4672 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4673 _scsih_tm_tr_send(ioc, handle);
4674 }
4675
4676 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4677 if (expander_handle < ioc->sas_hba.num_phys) {
4678 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4679 return;
4680 }
4681 if (event_data->ExpStatus ==
4682 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4683
4684 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4685 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4686 expander_handle);
4687 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4688 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4689 do {
4690 handle = find_first_bit(ioc->blocking_handles,
4691 ioc->facts.MaxDevHandle);
4692 if (handle < ioc->facts.MaxDevHandle)
4693 _scsih_block_io_device(ioc, handle);
4694 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4695 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4696 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4697
4698 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4699 return;
4700
4701
4702 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4703 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4704 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4705 fw_event->ignore)
4706 continue;
4707 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4708 fw_event->event_data;
4709 if (local_event_data->ExpStatus ==
4710 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4711 local_event_data->ExpStatus ==
4712 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4713 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4714 expander_handle) {
4715 dewtprintk(ioc,
4716 ioc_info(ioc, "setting ignoring flag\n"));
4717 fw_event->ignore = 1;
4718 }
4719 }
4720 }
4721 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4722}
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735static void
4736_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4737 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4738{
4739 struct fw_event_work *fw_event;
4740 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4741 unsigned long flags;
4742 int i, reason_code;
4743 u16 handle, switch_handle;
4744
4745 for (i = 0; i < event_data->NumEntries; i++) {
4746 handle =
4747 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4748 if (!handle)
4749 continue;
4750 reason_code = event_data->PortEntry[i].PortStatus;
4751 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4752 _scsih_tm_tr_send(ioc, handle);
4753 }
4754
4755 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4756 if (!switch_handle) {
4757 _scsih_block_io_to_pcie_children_attached_directly(
4758 ioc, event_data);
4759 return;
4760 }
4761
4762 if ((event_data->SwitchStatus
4763 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4764 (event_data->SwitchStatus ==
4765 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4766 _scsih_block_io_to_pcie_children_attached_directly(
4767 ioc, event_data);
4768
4769 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4770 return;
4771
4772
4773 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4774 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4775 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4776 fw_event->ignore)
4777 continue;
4778 local_event_data =
4779 (Mpi26EventDataPCIeTopologyChangeList_t *)
4780 fw_event->event_data;
4781 if (local_event_data->SwitchStatus ==
4782 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4783 local_event_data->SwitchStatus ==
4784 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4785 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4786 switch_handle) {
4787 dewtprintk(ioc,
4788 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4789 fw_event->ignore = 1;
4790 }
4791 }
4792 }
4793 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4794}
4795
4796
4797
4798
4799
4800
4801
4802
4803static void
4804_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4805{
4806 struct _raid_device *raid_device;
4807 struct MPT3SAS_TARGET *sas_target_priv_data;
4808 unsigned long flags;
4809
4810 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4811 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4812 if (raid_device && raid_device->starget &&
4813 raid_device->starget->hostdata) {
4814 sas_target_priv_data =
4815 raid_device->starget->hostdata;
4816 sas_target_priv_data->deleted = 1;
4817 dewtprintk(ioc,
4818 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4819 handle, (u64)raid_device->wwid));
4820 }
4821 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4822}
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834static void
4835_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4836{
4837 if (!handle || handle == *a || handle == *b)
4838 return;
4839 if (!*a)
4840 *a = handle;
4841 else if (!*b)
4842 *b = handle;
4843}
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857static void
4858_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4859 Mpi2EventDataIrConfigChangeList_t *event_data)
4860{
4861 Mpi2EventIrConfigElement_t *element;
4862 int i;
4863 u16 handle, volume_handle, a, b;
4864 struct _tr_list *delayed_tr;
4865
4866 a = 0;
4867 b = 0;
4868
4869 if (ioc->is_warpdrive)
4870 return;
4871
4872
4873 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4874 for (i = 0; i < event_data->NumElements; i++, element++) {
4875 if (le32_to_cpu(event_data->Flags) &
4876 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4877 continue;
4878 if (element->ReasonCode ==
4879 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4880 element->ReasonCode ==
4881 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4882 volume_handle = le16_to_cpu(element->VolDevHandle);
4883 _scsih_set_volume_delete_flag(ioc, volume_handle);
4884 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4885 }
4886 }
4887
4888
4889 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4890 for (i = 0; i < event_data->NumElements; i++, element++) {
4891 if (le32_to_cpu(event_data->Flags) &
4892 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4893 continue;
4894 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4895 volume_handle = le16_to_cpu(element->VolDevHandle);
4896 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4897 }
4898 }
4899
4900 if (a)
4901 _scsih_tm_tr_volume_send(ioc, a);
4902 if (b)
4903 _scsih_tm_tr_volume_send(ioc, b);
4904
4905
4906 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4907 for (i = 0; i < event_data->NumElements; i++, element++) {
4908 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4909 continue;
4910 handle = le16_to_cpu(element->PhysDiskDevHandle);
4911 volume_handle = le16_to_cpu(element->VolDevHandle);
4912 clear_bit(handle, ioc->pd_handles);
4913 if (!volume_handle)
4914 _scsih_tm_tr_send(ioc, handle);
4915 else if (volume_handle == a || volume_handle == b) {
4916 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4917 BUG_ON(!delayed_tr);
4918 INIT_LIST_HEAD(&delayed_tr->list);
4919 delayed_tr->handle = handle;
4920 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4921 dewtprintk(ioc,
4922 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4923 handle));
4924 } else
4925 _scsih_tm_tr_send(ioc, handle);
4926 }
4927}
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940static void
4941_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4942 Mpi2EventDataIrVolume_t *event_data)
4943{
4944 u32 state;
4945
4946 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4947 return;
4948 state = le32_to_cpu(event_data->NewValue);
4949 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4950 MPI2_RAID_VOL_STATE_FAILED)
4951 _scsih_set_volume_delete_flag(ioc,
4952 le16_to_cpu(event_data->VolDevHandle));
4953}
4954
4955
4956
4957
4958
4959
4960
4961static void
4962_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4963 Mpi2EventDataTemperature_t *event_data)
4964{
4965 u32 doorbell;
4966 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4967 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4968 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4969 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4970 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4971 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4972 event_data->SensorNum);
4973 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4974 event_data->CurrentTemperature);
4975 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4976 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4977 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4978 MPI2_IOC_STATE_FAULT) {
4979 mpt3sas_print_fault_code(ioc,
4980 doorbell & MPI2_DOORBELL_DATA_MASK);
4981 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4982 MPI2_IOC_STATE_COREDUMP) {
4983 mpt3sas_print_coredump_info(ioc,
4984 doorbell & MPI2_DOORBELL_DATA_MASK);
4985 }
4986 }
4987 }
4988}
4989
4990static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4991{
4992 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4993
4994 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4995 return 0;
4996
4997 if (pending)
4998 return test_and_set_bit(0, &priv->ata_command_pending);
4999
5000 clear_bit(0, &priv->ata_command_pending);
5001 return 0;
5002}
5003
5004
5005
5006
5007
5008
5009
5010
5011static void
5012_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5013{
5014 struct scsi_cmnd *scmd;
5015 struct scsiio_tracker *st;
5016 u16 smid;
5017 int count = 0;
5018
5019 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5020 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5021 if (!scmd)
5022 continue;
5023 count++;
5024 _scsih_set_satl_pending(scmd, false);
5025 st = scsi_cmd_priv(scmd);
5026 mpt3sas_base_clear_st(ioc, st);
5027 scsi_dma_unmap(scmd);
5028 if (ioc->pci_error_recovery || ioc->remove_host)
5029 scmd->result = DID_NO_CONNECT << 16;
5030 else
5031 scmd->result = DID_RESET << 16;
5032 scmd->scsi_done(scmd);
5033 }
5034 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5035}
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045static void
5046_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5047 Mpi25SCSIIORequest_t *mpi_request)
5048{
5049 u16 eedp_flags;
5050 unsigned char prot_op = scsi_get_prot_op(scmd);
5051 unsigned char prot_type = scsi_get_prot_type(scmd);
5052 Mpi25SCSIIORequest_t *mpi_request_3v =
5053 (Mpi25SCSIIORequest_t *)mpi_request;
5054
5055 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
5056 return;
5057
5058 if (prot_op == SCSI_PROT_READ_STRIP)
5059 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5060 else if (prot_op == SCSI_PROT_WRITE_INSERT)
5061 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5062 else
5063 return;
5064
5065 switch (prot_type) {
5066 case SCSI_PROT_DIF_TYPE1:
5067 case SCSI_PROT_DIF_TYPE2:
5068
5069
5070
5071
5072
5073 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
5074 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
5075 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5076 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5077 cpu_to_be32(t10_pi_ref_tag(scmd->request));
5078 break;
5079
5080 case SCSI_PROT_DIF_TYPE3:
5081
5082
5083
5084
5085 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5086
5087 break;
5088 }
5089
5090 mpi_request_3v->EEDPBlockSize =
5091 cpu_to_le16(scmd->device->sector_size);
5092
5093 if (ioc->is_gen35_ioc)
5094 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5095 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5096}
5097
5098
5099
5100
5101
5102
5103static void
5104_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5105{
5106 u8 ascq;
5107
5108 switch (ioc_status) {
5109 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5110 ascq = 0x01;
5111 break;
5112 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5113 ascq = 0x02;
5114 break;
5115 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5116 ascq = 0x03;
5117 break;
5118 default:
5119 ascq = 0x00;
5120 break;
5121 }
5122 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
5123 ascq);
5124 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
5125 SAM_STAT_CHECK_CONDITION;
5126}
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139static int
5140scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5141{
5142 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5143 struct MPT3SAS_DEVICE *sas_device_priv_data;
5144 struct MPT3SAS_TARGET *sas_target_priv_data;
5145 struct _raid_device *raid_device;
5146 struct request *rq = scmd->request;
5147 int class;
5148 Mpi25SCSIIORequest_t *mpi_request;
5149 struct _pcie_device *pcie_device = NULL;
5150 u32 mpi_control;
5151 u16 smid;
5152 u16 handle;
5153
5154 if (ioc->logging_level & MPT_DEBUG_SCSI)
5155 scsi_print_command(scmd);
5156
5157 sas_device_priv_data = scmd->device->hostdata;
5158 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5159 scmd->result = DID_NO_CONNECT << 16;
5160 scmd->scsi_done(scmd);
5161 return 0;
5162 }
5163
5164 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5165 scmd->result = DID_NO_CONNECT << 16;
5166 scmd->scsi_done(scmd);
5167 return 0;
5168 }
5169
5170 sas_target_priv_data = sas_device_priv_data->sas_target;
5171
5172
5173 handle = sas_target_priv_data->handle;
5174 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5175 scmd->result = DID_NO_CONNECT << 16;
5176 scmd->scsi_done(scmd);
5177 return 0;
5178 }
5179
5180
5181 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5182
5183 return SCSI_MLQUEUE_HOST_BUSY;
5184 } else if (sas_target_priv_data->deleted) {
5185
5186 scmd->result = DID_NO_CONNECT << 16;
5187 scmd->scsi_done(scmd);
5188 return 0;
5189 } else if (sas_target_priv_data->tm_busy ||
5190 sas_device_priv_data->block) {
5191
5192 return SCSI_MLQUEUE_DEVICE_BUSY;
5193 }
5194
5195
5196
5197
5198
5199
5200 do {
5201 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5202 return SCSI_MLQUEUE_DEVICE_BUSY;
5203 } while (_scsih_set_satl_pending(scmd, true));
5204
5205 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5206 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5207 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5208 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5209 else
5210 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5211
5212
5213 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5214
5215 if (sas_device_priv_data->ncq_prio_enable) {
5216 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5217 if (class == IOPRIO_CLASS_RT)
5218 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5219 }
5220
5221
5222
5223 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5224 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5225 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5226 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5227
5228 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5229 if (!smid) {
5230 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5231 _scsih_set_satl_pending(scmd, false);
5232 goto out;
5233 }
5234 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5235 memset(mpi_request, 0, ioc->request_sz);
5236 _scsih_setup_eedp(ioc, scmd, mpi_request);
5237
5238 if (scmd->cmd_len == 32)
5239 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5240 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5241 if (sas_device_priv_data->sas_target->flags &
5242 MPT_TARGET_FLAGS_RAID_COMPONENT)
5243 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5244 else
5245 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5246 mpi_request->DevHandle = cpu_to_le16(handle);
5247 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5248 mpi_request->Control = cpu_to_le32(mpi_control);
5249 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5250 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5251 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5252 mpi_request->SenseBufferLowAddress =
5253 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5254 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5255 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5256 mpi_request->LUN);
5257 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5258
5259 if (mpi_request->DataLength) {
5260 pcie_device = sas_target_priv_data->pcie_dev;
5261 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5262 mpt3sas_base_free_smid(ioc, smid);
5263 _scsih_set_satl_pending(scmd, false);
5264 goto out;
5265 }
5266 } else
5267 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5268
5269 raid_device = sas_target_priv_data->raid_device;
5270 if (raid_device && raid_device->direct_io_enabled)
5271 mpt3sas_setup_direct_io(ioc, scmd,
5272 raid_device, mpi_request);
5273
5274 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5275 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5276 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5277 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5278 ioc->put_smid_fast_path(ioc, smid, handle);
5279 } else
5280 ioc->put_smid_scsi_io(ioc, smid,
5281 le16_to_cpu(mpi_request->DevHandle));
5282 } else
5283 ioc->put_smid_default(ioc, smid);
5284 return 0;
5285
5286 out:
5287 return SCSI_MLQUEUE_HOST_BUSY;
5288}
5289
5290
5291
5292
5293
5294
5295static void
5296_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5297{
5298 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5299
5300 data->skey = sense_buffer[1] & 0x0F;
5301 data->asc = sense_buffer[2];
5302 data->ascq = sense_buffer[3];
5303 } else {
5304
5305 data->skey = sense_buffer[2] & 0x0F;
5306 data->asc = sense_buffer[12];
5307 data->ascq = sense_buffer[13];
5308 }
5309}
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322static void
5323_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5324 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5325{
5326 u32 response_info;
5327 u8 *response_bytes;
5328 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5329 MPI2_IOCSTATUS_MASK;
5330 u8 scsi_state = mpi_reply->SCSIState;
5331 u8 scsi_status = mpi_reply->SCSIStatus;
5332 char *desc_ioc_state = NULL;
5333 char *desc_scsi_status = NULL;
5334 char *desc_scsi_state = ioc->tmp_string;
5335 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5336 struct _sas_device *sas_device = NULL;
5337 struct _pcie_device *pcie_device = NULL;
5338 struct scsi_target *starget = scmd->device->sdev_target;
5339 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5340 char *device_str = NULL;
5341
5342 if (!priv_target)
5343 return;
5344 if (ioc->hide_ir_msg)
5345 device_str = "WarpDrive";
5346 else
5347 device_str = "volume";
5348
5349 if (log_info == 0x31170000)
5350 return;
5351
5352 switch (ioc_status) {
5353 case MPI2_IOCSTATUS_SUCCESS:
5354 desc_ioc_state = "success";
5355 break;
5356 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5357 desc_ioc_state = "invalid function";
5358 break;
5359 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5360 desc_ioc_state = "scsi recovered error";
5361 break;
5362 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5363 desc_ioc_state = "scsi invalid dev handle";
5364 break;
5365 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5366 desc_ioc_state = "scsi device not there";
5367 break;
5368 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5369 desc_ioc_state = "scsi data overrun";
5370 break;
5371 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5372 desc_ioc_state = "scsi data underrun";
5373 break;
5374 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5375 desc_ioc_state = "scsi io data error";
5376 break;
5377 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5378 desc_ioc_state = "scsi protocol error";
5379 break;
5380 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5381 desc_ioc_state = "scsi task terminated";
5382 break;
5383 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5384 desc_ioc_state = "scsi residual mismatch";
5385 break;
5386 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5387 desc_ioc_state = "scsi task mgmt failed";
5388 break;
5389 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5390 desc_ioc_state = "scsi ioc terminated";
5391 break;
5392 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5393 desc_ioc_state = "scsi ext terminated";
5394 break;
5395 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5396 desc_ioc_state = "eedp guard error";
5397 break;
5398 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5399 desc_ioc_state = "eedp ref tag error";
5400 break;
5401 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5402 desc_ioc_state = "eedp app tag error";
5403 break;
5404 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5405 desc_ioc_state = "insufficient power";
5406 break;
5407 default:
5408 desc_ioc_state = "unknown";
5409 break;
5410 }
5411
5412 switch (scsi_status) {
5413 case MPI2_SCSI_STATUS_GOOD:
5414 desc_scsi_status = "good";
5415 break;
5416 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5417 desc_scsi_status = "check condition";
5418 break;
5419 case MPI2_SCSI_STATUS_CONDITION_MET:
5420 desc_scsi_status = "condition met";
5421 break;
5422 case MPI2_SCSI_STATUS_BUSY:
5423 desc_scsi_status = "busy";
5424 break;
5425 case MPI2_SCSI_STATUS_INTERMEDIATE:
5426 desc_scsi_status = "intermediate";
5427 break;
5428 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5429 desc_scsi_status = "intermediate condmet";
5430 break;
5431 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5432 desc_scsi_status = "reservation conflict";
5433 break;
5434 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5435 desc_scsi_status = "command terminated";
5436 break;
5437 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5438 desc_scsi_status = "task set full";
5439 break;
5440 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5441 desc_scsi_status = "aca active";
5442 break;
5443 case MPI2_SCSI_STATUS_TASK_ABORTED:
5444 desc_scsi_status = "task aborted";
5445 break;
5446 default:
5447 desc_scsi_status = "unknown";
5448 break;
5449 }
5450
5451 desc_scsi_state[0] = '\0';
5452 if (!scsi_state)
5453 desc_scsi_state = " ";
5454 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5455 strcat(desc_scsi_state, "response info ");
5456 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5457 strcat(desc_scsi_state, "state terminated ");
5458 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5459 strcat(desc_scsi_state, "no status ");
5460 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5461 strcat(desc_scsi_state, "autosense failed ");
5462 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5463 strcat(desc_scsi_state, "autosense valid ");
5464
5465 scsi_print_command(scmd);
5466
5467 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5468 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5469 device_str, (u64)priv_target->sas_address);
5470 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5471 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5472 if (pcie_device) {
5473 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5474 (u64)pcie_device->wwid, pcie_device->port_num);
5475 if (pcie_device->enclosure_handle != 0)
5476 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5477 (u64)pcie_device->enclosure_logical_id,
5478 pcie_device->slot);
5479 if (pcie_device->connector_name[0])
5480 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5481 pcie_device->enclosure_level,
5482 pcie_device->connector_name);
5483 pcie_device_put(pcie_device);
5484 }
5485 } else {
5486 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5487 if (sas_device) {
5488 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5489 (u64)sas_device->sas_address, sas_device->phy);
5490
5491 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5492 NULL, NULL);
5493
5494 sas_device_put(sas_device);
5495 }
5496 }
5497
5498 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5499 le16_to_cpu(mpi_reply->DevHandle),
5500 desc_ioc_state, ioc_status, smid);
5501 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5502 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5503 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5504 le16_to_cpu(mpi_reply->TaskTag),
5505 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5506 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5507 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5508
5509 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5510 struct sense_info data;
5511 _scsih_normalize_sense(scmd->sense_buffer, &data);
5512 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5513 data.skey, data.asc, data.ascq,
5514 le32_to_cpu(mpi_reply->SenseCount));
5515 }
5516 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5517 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5518 response_bytes = (u8 *)&response_info;
5519 _scsih_response_code(ioc, response_bytes[0]);
5520 }
5521}
5522
5523
5524
5525
5526
5527
5528
5529static void
5530_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5531{
5532 Mpi2SepReply_t mpi_reply;
5533 Mpi2SepRequest_t mpi_request;
5534 struct _sas_device *sas_device;
5535
5536 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5537 if (!sas_device)
5538 return;
5539
5540 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5541 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5542 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5543 mpi_request.SlotStatus =
5544 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5545 mpi_request.DevHandle = cpu_to_le16(handle);
5546 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5547 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5548 &mpi_request)) != 0) {
5549 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5550 __FILE__, __LINE__, __func__);
5551 goto out;
5552 }
5553 sas_device->pfa_led_on = 1;
5554
5555 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5556 dewtprintk(ioc,
5557 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5558 le16_to_cpu(mpi_reply.IOCStatus),
5559 le32_to_cpu(mpi_reply.IOCLogInfo)));
5560 goto out;
5561 }
5562out:
5563 sas_device_put(sas_device);
5564}
5565
5566
5567
5568
5569
5570
5571
5572static void
5573_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5574 struct _sas_device *sas_device)
5575{
5576 Mpi2SepReply_t mpi_reply;
5577 Mpi2SepRequest_t mpi_request;
5578
5579 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5580 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5581 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5582 mpi_request.SlotStatus = 0;
5583 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5584 mpi_request.DevHandle = 0;
5585 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5586 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5587 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5588 &mpi_request)) != 0) {
5589 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5590 __FILE__, __LINE__, __func__);
5591 return;
5592 }
5593
5594 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5595 dewtprintk(ioc,
5596 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5597 le16_to_cpu(mpi_reply.IOCStatus),
5598 le32_to_cpu(mpi_reply.IOCLogInfo)));
5599 return;
5600 }
5601}
5602
5603
5604
5605
5606
5607
5608
5609static void
5610_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5611{
5612 struct fw_event_work *fw_event;
5613
5614 fw_event = alloc_fw_event_work(0);
5615 if (!fw_event)
5616 return;
5617 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5618 fw_event->device_handle = handle;
5619 fw_event->ioc = ioc;
5620 _scsih_fw_event_add(ioc, fw_event);
5621 fw_event_work_put(fw_event);
5622}
5623
5624
5625
5626
5627
5628
5629
5630static void
5631_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5632{
5633 struct scsi_target *starget;
5634 struct MPT3SAS_TARGET *sas_target_priv_data;
5635 Mpi2EventNotificationReply_t *event_reply;
5636 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5637 struct _sas_device *sas_device;
5638 ssize_t sz;
5639 unsigned long flags;
5640
5641
5642 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5643 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5644 if (!sas_device)
5645 goto out_unlock;
5646
5647 starget = sas_device->starget;
5648 sas_target_priv_data = starget->hostdata;
5649
5650 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5651 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5652 goto out_unlock;
5653
5654 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5655
5656 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5657
5658 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5659 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5660
5661
5662 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5663 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5664 event_reply = kzalloc(sz, GFP_ATOMIC);
5665 if (!event_reply) {
5666 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5667 __FILE__, __LINE__, __func__);
5668 goto out;
5669 }
5670
5671 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5672 event_reply->Event =
5673 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5674 event_reply->MsgLength = sz/4;
5675 event_reply->EventDataLength =
5676 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5677 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5678 event_reply->EventData;
5679 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5680 event_data->ASC = 0x5D;
5681 event_data->DevHandle = cpu_to_le16(handle);
5682 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5683 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5684 kfree(event_reply);
5685out:
5686 if (sas_device)
5687 sas_device_put(sas_device);
5688 return;
5689
5690out_unlock:
5691 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5692 goto out;
5693}
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707static u8
5708_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5709{
5710 Mpi25SCSIIORequest_t *mpi_request;
5711 Mpi2SCSIIOReply_t *mpi_reply;
5712 struct scsi_cmnd *scmd;
5713 struct scsiio_tracker *st;
5714 u16 ioc_status;
5715 u32 xfer_cnt;
5716 u8 scsi_state;
5717 u8 scsi_status;
5718 u32 log_info;
5719 struct MPT3SAS_DEVICE *sas_device_priv_data;
5720 u32 response_code = 0;
5721
5722 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5723
5724 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5725 if (scmd == NULL)
5726 return 1;
5727
5728 _scsih_set_satl_pending(scmd, false);
5729
5730 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5731
5732 if (mpi_reply == NULL) {
5733 scmd->result = DID_OK << 16;
5734 goto out;
5735 }
5736
5737 sas_device_priv_data = scmd->device->hostdata;
5738 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5739 sas_device_priv_data->sas_target->deleted) {
5740 scmd->result = DID_NO_CONNECT << 16;
5741 goto out;
5742 }
5743 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5744
5745
5746
5747
5748
5749 st = scsi_cmd_priv(scmd);
5750 if (st->direct_io &&
5751 ((ioc_status & MPI2_IOCSTATUS_MASK)
5752 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5753 st->direct_io = 0;
5754 st->scmd = scmd;
5755 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5756 mpi_request->DevHandle =
5757 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5758 ioc->put_smid_scsi_io(ioc, smid,
5759 sas_device_priv_data->sas_target->handle);
5760 return 0;
5761 }
5762
5763 scsi_state = mpi_reply->SCSIState;
5764 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5765 response_code =
5766 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5767 if (!sas_device_priv_data->tlr_snoop_check) {
5768 sas_device_priv_data->tlr_snoop_check++;
5769 if ((!ioc->is_warpdrive &&
5770 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5771 !scsih_is_nvme(&scmd->device->sdev_gendev))
5772 && sas_is_tlr_enabled(scmd->device) &&
5773 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5774 sas_disable_tlr(scmd->device);
5775 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5776 }
5777 }
5778
5779 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5780 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5781 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5782 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5783 else
5784 log_info = 0;
5785 ioc_status &= MPI2_IOCSTATUS_MASK;
5786 scsi_status = mpi_reply->SCSIStatus;
5787
5788 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5789 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5790 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5791 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5792 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5793 }
5794
5795 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5796 struct sense_info data;
5797 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5798 smid);
5799 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5800 le32_to_cpu(mpi_reply->SenseCount));
5801 memcpy(scmd->sense_buffer, sense_data, sz);
5802 _scsih_normalize_sense(scmd->sense_buffer, &data);
5803
5804 if (data.asc == 0x5D)
5805 _scsih_smart_predicted_fault(ioc,
5806 le16_to_cpu(mpi_reply->DevHandle));
5807 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5808
5809 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5810 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5811 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5812 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5813 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5814 }
5815 switch (ioc_status) {
5816 case MPI2_IOCSTATUS_BUSY:
5817 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5818 scmd->result = SAM_STAT_BUSY;
5819 break;
5820
5821 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5822 scmd->result = DID_NO_CONNECT << 16;
5823 break;
5824
5825 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5826 if (sas_device_priv_data->block) {
5827 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5828 goto out;
5829 }
5830 if (log_info == 0x31110630) {
5831 if (scmd->retries > 2) {
5832 scmd->result = DID_NO_CONNECT << 16;
5833 scsi_device_set_state(scmd->device,
5834 SDEV_OFFLINE);
5835 } else {
5836 scmd->result = DID_SOFT_ERROR << 16;
5837 scmd->device->expecting_cc_ua = 1;
5838 }
5839 break;
5840 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5841 scmd->result = DID_RESET << 16;
5842 break;
5843 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5844 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5845 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5846 scmd->result = DID_RESET << 16;
5847 break;
5848 }
5849 scmd->result = DID_SOFT_ERROR << 16;
5850 break;
5851 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5852 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5853 scmd->result = DID_RESET << 16;
5854 break;
5855
5856 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5857 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5858 scmd->result = DID_SOFT_ERROR << 16;
5859 else
5860 scmd->result = (DID_OK << 16) | scsi_status;
5861 break;
5862
5863 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5864 scmd->result = (DID_OK << 16) | scsi_status;
5865
5866 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5867 break;
5868
5869 if (xfer_cnt < scmd->underflow) {
5870 if (scsi_status == SAM_STAT_BUSY)
5871 scmd->result = SAM_STAT_BUSY;
5872 else
5873 scmd->result = DID_SOFT_ERROR << 16;
5874 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5875 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5876 scmd->result = DID_SOFT_ERROR << 16;
5877 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5878 scmd->result = DID_RESET << 16;
5879 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5880 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5881 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5882 scmd->result = (DRIVER_SENSE << 24) |
5883 SAM_STAT_CHECK_CONDITION;
5884 scmd->sense_buffer[0] = 0x70;
5885 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5886 scmd->sense_buffer[12] = 0x20;
5887 scmd->sense_buffer[13] = 0;
5888 }
5889 break;
5890
5891 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5892 scsi_set_resid(scmd, 0);
5893 fallthrough;
5894 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5895 case MPI2_IOCSTATUS_SUCCESS:
5896 scmd->result = (DID_OK << 16) | scsi_status;
5897 if (response_code ==
5898 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5899 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5900 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5901 scmd->result = DID_SOFT_ERROR << 16;
5902 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5903 scmd->result = DID_RESET << 16;
5904 break;
5905
5906 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5907 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5908 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5909 _scsih_eedp_error_handling(scmd, ioc_status);
5910 break;
5911
5912 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5913 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5914 case MPI2_IOCSTATUS_INVALID_SGL:
5915 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5916 case MPI2_IOCSTATUS_INVALID_FIELD:
5917 case MPI2_IOCSTATUS_INVALID_STATE:
5918 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5919 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5920 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5921 default:
5922 scmd->result = DID_SOFT_ERROR << 16;
5923 break;
5924
5925 }
5926
5927 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5928 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5929
5930 out:
5931
5932 scsi_dma_unmap(scmd);
5933 mpt3sas_base_free_smid(ioc, smid);
5934 scmd->scsi_done(scmd);
5935 return 0;
5936}
5937
5938
5939
5940
5941
5942
5943
5944
5945static void
5946_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5947{
5948 u16 sz, ioc_status;
5949 int i;
5950 Mpi2ConfigReply_t mpi_reply;
5951 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5952 u16 attached_handle;
5953 u64 attached_sas_addr;
5954 u8 found = 0, port_id;
5955 Mpi2SasPhyPage0_t phy_pg0;
5956 struct hba_port *port, *port_next, *mport;
5957 struct virtual_phy *vphy, *vphy_next;
5958 struct _sas_device *sas_device;
5959
5960
5961
5962
5963 list_for_each_entry_safe(port, port_next,
5964 &ioc->port_table_list, list) {
5965 if (!port->vphys_mask)
5966 continue;
5967 list_for_each_entry_safe(vphy, vphy_next,
5968 &port->vphys_list, list) {
5969 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5970 }
5971 }
5972
5973
5974
5975
5976 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5977 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5978 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5979 if (!sas_iounit_pg0) {
5980 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5981 __FILE__, __LINE__, __func__);
5982 return;
5983 }
5984 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5985 sas_iounit_pg0, sz)) != 0)
5986 goto out;
5987 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5988 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5989 goto out;
5990
5991
5992
5993 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5994
5995
5996
5997 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5998 MPI2_SAS_NEG_LINK_RATE_1_5)
5999 continue;
6000
6001
6002
6003
6004
6005
6006
6007 if (!(le32_to_cpu(
6008 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6009 MPI2_SAS_DEVICE_INFO_SEP))
6010 continue;
6011
6012 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6013 i))) {
6014 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6015 __FILE__, __LINE__, __func__);
6016 continue;
6017 }
6018
6019 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6020 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6021 continue;
6022
6023
6024
6025 attached_handle = le16_to_cpu(
6026 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6027 if (_scsih_get_sas_address(ioc, attached_handle,
6028 &attached_sas_addr) != 0) {
6029 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6030 __FILE__, __LINE__, __func__);
6031 continue;
6032 }
6033
6034 found = 0;
6035 port = port_next = NULL;
6036
6037
6038
6039
6040 list_for_each_entry_safe(port,
6041 port_next, &ioc->port_table_list, list) {
6042 if (!port->vphys_mask)
6043 continue;
6044 list_for_each_entry_safe(vphy, vphy_next,
6045 &port->vphys_list, list) {
6046
6047
6048
6049
6050 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6051 continue;
6052
6053
6054
6055
6056
6057
6058 if (vphy->sas_address != attached_sas_addr)
6059 continue;
6060
6061
6062
6063
6064 if (!(vphy->phy_mask & (1 << i)))
6065 vphy->phy_mask = (1 << i);
6066
6067
6068
6069
6070
6071
6072
6073 port_id = sas_iounit_pg0->PhyData[i].Port;
6074 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6075 if (!mport) {
6076 mport = kzalloc(
6077 sizeof(struct hba_port), GFP_KERNEL);
6078 if (!mport)
6079 break;
6080 mport->port_id = port_id;
6081 ioc_info(ioc,
6082 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6083 __func__, mport, mport->port_id);
6084 list_add_tail(&mport->list,
6085 &ioc->port_table_list);
6086 }
6087
6088
6089
6090
6091
6092
6093
6094 if (port != mport) {
6095 if (!mport->vphys_mask)
6096 INIT_LIST_HEAD(
6097 &mport->vphys_list);
6098 mport->vphys_mask |= (1 << i);
6099 port->vphys_mask &= ~(1 << i);
6100 list_move(&vphy->list,
6101 &mport->vphys_list);
6102 sas_device = mpt3sas_get_sdev_by_addr(
6103 ioc, attached_sas_addr, port);
6104 if (sas_device)
6105 sas_device->port = mport;
6106 }
6107
6108
6109
6110
6111
6112
6113
6114
6115 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6116 mport->sas_address = 0;
6117 mport->phy_mask = 0;
6118 mport->flags &=
6119 ~HBA_PORT_FLAG_DIRTY_PORT;
6120 }
6121
6122
6123
6124 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6125 found = 1;
6126 break;
6127 }
6128 if (found)
6129 break;
6130 }
6131 }
6132out:
6133 kfree(sas_iounit_pg0);
6134}
6135
6136
6137
6138
6139
6140
6141
6142
6143static int
6144_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6145 struct hba_port *port_table)
6146{
6147 u16 sz, ioc_status;
6148 int i, j;
6149 Mpi2ConfigReply_t mpi_reply;
6150 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6151 u16 attached_handle;
6152 u64 attached_sas_addr;
6153 u8 found = 0, port_count = 0, port_id;
6154
6155 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6156 * sizeof(Mpi2SasIOUnit0PhyData_t));
6157 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6158 if (!sas_iounit_pg0) {
6159 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6160 __FILE__, __LINE__, __func__);
6161 return port_count;
6162 }
6163
6164 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6165 sas_iounit_pg0, sz)) != 0)
6166 goto out;
6167 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6168 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6169 goto out;
6170 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6171 found = 0;
6172 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6173 MPI2_SAS_NEG_LINK_RATE_1_5)
6174 continue;
6175 attached_handle =
6176 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6177 if (_scsih_get_sas_address(
6178 ioc, attached_handle, &attached_sas_addr) != 0) {
6179 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6180 __FILE__, __LINE__, __func__);
6181 continue;
6182 }
6183
6184 for (j = 0; j < port_count; j++) {
6185 port_id = sas_iounit_pg0->PhyData[i].Port;
6186 if (port_table[j].port_id == port_id &&
6187 port_table[j].sas_address == attached_sas_addr) {
6188 port_table[j].phy_mask |= (1 << i);
6189 found = 1;
6190 break;
6191 }
6192 }
6193
6194 if (found)
6195 continue;
6196
6197 port_id = sas_iounit_pg0->PhyData[i].Port;
6198 port_table[port_count].port_id = port_id;
6199 port_table[port_count].phy_mask = (1 << i);
6200 port_table[port_count].sas_address = attached_sas_addr;
6201 port_count++;
6202 }
6203out:
6204 kfree(sas_iounit_pg0);
6205 return port_count;
6206}
6207
6208enum hba_port_matched_codes {
6209 NOT_MATCHED = 0,
6210 MATCHED_WITH_ADDR_AND_PHYMASK,
6211 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6212 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6213 MATCHED_WITH_ADDR,
6214};
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227static enum hba_port_matched_codes
6228_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6229 struct hba_port *port_entry,
6230 struct hba_port **matched_port_entry, int *count)
6231{
6232 struct hba_port *port_table_entry, *matched_port = NULL;
6233 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6234 int lcount = 0;
6235 *matched_port_entry = NULL;
6236
6237 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6238 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6239 continue;
6240
6241 if ((port_table_entry->sas_address == port_entry->sas_address)
6242 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6243 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6244 matched_port = port_table_entry;
6245 break;
6246 }
6247
6248 if ((port_table_entry->sas_address == port_entry->sas_address)
6249 && (port_table_entry->phy_mask & port_entry->phy_mask)
6250 && (port_table_entry->port_id == port_entry->port_id)) {
6251 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6252 matched_port = port_table_entry;
6253 continue;
6254 }
6255
6256 if ((port_table_entry->sas_address == port_entry->sas_address)
6257 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6258 if (matched_code ==
6259 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6260 continue;
6261 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6262 matched_port = port_table_entry;
6263 continue;
6264 }
6265
6266 if (port_table_entry->sas_address == port_entry->sas_address) {
6267 if (matched_code ==
6268 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6269 continue;
6270 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6271 continue;
6272 matched_code = MATCHED_WITH_ADDR;
6273 matched_port = port_table_entry;
6274 lcount++;
6275 }
6276 }
6277
6278 *matched_port_entry = matched_port;
6279 if (matched_code == MATCHED_WITH_ADDR)
6280 *count = lcount;
6281 return matched_code;
6282}
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294static void
6295_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6296 struct hba_port *port_table,
6297 int index, u8 port_count, int offset)
6298{
6299 struct _sas_node *sas_node = &ioc->sas_hba;
6300 u32 i, found = 0;
6301
6302 for (i = 0; i < port_count; i++) {
6303 if (i == index)
6304 continue;
6305
6306 if (port_table[i].phy_mask & (1 << offset)) {
6307 mpt3sas_transport_del_phy_from_an_existing_port(
6308 ioc, sas_node, &sas_node->phy[offset]);
6309 found = 1;
6310 break;
6311 }
6312 }
6313 if (!found)
6314 port_table[index].phy_mask |= (1 << offset);
6315}
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327static void
6328_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6329 struct hba_port *hba_port_entry, struct hba_port *port_table,
6330 int index, int port_count)
6331{
6332 u32 phy_mask, offset = 0;
6333 struct _sas_node *sas_node = &ioc->sas_hba;
6334
6335 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6336
6337 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6338 if (phy_mask & (1 << offset)) {
6339 if (!(port_table[index].phy_mask & (1 << offset))) {
6340 _scsih_del_phy_part_of_anther_port(
6341 ioc, port_table, index, port_count,
6342 offset);
6343 continue;
6344 }
6345 if (sas_node->phy[offset].phy_belongs_to_port)
6346 mpt3sas_transport_del_phy_from_an_existing_port(
6347 ioc, sas_node, &sas_node->phy[offset]);
6348 mpt3sas_transport_add_phy_to_an_existing_port(
6349 ioc, sas_node, &sas_node->phy[offset],
6350 hba_port_entry->sas_address,
6351 hba_port_entry);
6352 }
6353 }
6354}
6355
6356
6357
6358
6359
6360
6361
6362static void
6363_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6364{
6365 struct hba_port *port, *port_next;
6366 struct virtual_phy *vphy, *vphy_next;
6367
6368 list_for_each_entry_safe(port, port_next,
6369 &ioc->port_table_list, list) {
6370 if (!port->vphys_mask)
6371 continue;
6372 list_for_each_entry_safe(vphy, vphy_next,
6373 &port->vphys_list, list) {
6374 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6375 drsprintk(ioc, ioc_info(ioc,
6376 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6377 vphy, port->port_id,
6378 vphy->phy_mask));
6379 port->vphys_mask &= ~vphy->phy_mask;
6380 list_del(&vphy->list);
6381 kfree(vphy);
6382 }
6383 }
6384 if (!port->vphys_mask && !port->sas_address)
6385 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6386 }
6387}
6388
6389
6390
6391
6392
6393
6394
6395static void
6396_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6397{
6398 struct hba_port *port, *port_next;
6399
6400 list_for_each_entry_safe(port, port_next,
6401 &ioc->port_table_list, list) {
6402 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6403 port->flags & HBA_PORT_FLAG_NEW_PORT)
6404 continue;
6405
6406 drsprintk(ioc, ioc_info(ioc,
6407 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6408 port, port->port_id, port->phy_mask));
6409 list_del(&port->list);
6410 kfree(port);
6411 }
6412}
6413
6414
6415
6416
6417
6418static void
6419_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6420{
6421 u32 port_count = 0;
6422 struct hba_port *port_table;
6423 struct hba_port *port_table_entry;
6424 struct hba_port *port_entry = NULL;
6425 int i, j, count = 0, lcount = 0;
6426 int ret;
6427 u64 sas_addr;
6428 u8 num_phys;
6429
6430 drsprintk(ioc, ioc_info(ioc,
6431 "updating ports for sas_host(0x%016llx)\n",
6432 (unsigned long long)ioc->sas_hba.sas_address));
6433
6434 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6435 if (!num_phys) {
6436 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6437 __FILE__, __LINE__, __func__);
6438 return;
6439 }
6440
6441 if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6442 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6443 __FILE__, __LINE__, __func__);
6444 return;
6445 }
6446 ioc->sas_hba.num_phys = num_phys;
6447
6448 port_table = kcalloc(ioc->sas_hba.num_phys,
6449 sizeof(struct hba_port), GFP_KERNEL);
6450 if (!port_table)
6451 return;
6452
6453 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6454 if (!port_count)
6455 return;
6456
6457 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6458 for (j = 0; j < port_count; j++)
6459 drsprintk(ioc, ioc_info(ioc,
6460 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6461 port_table[j].port_id,
6462 port_table[j].phy_mask, port_table[j].sas_address));
6463
6464 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6465 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6466
6467 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6468 port_table_entry = NULL;
6469 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6470 drsprintk(ioc, ioc_info(ioc,
6471 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6472 port_table_entry->port_id,
6473 port_table_entry->phy_mask,
6474 port_table_entry->sas_address));
6475 }
6476
6477 for (j = 0; j < port_count; j++) {
6478 ret = _scsih_look_and_get_matched_port_entry(ioc,
6479 &port_table[j], &port_entry, &count);
6480 if (!port_entry) {
6481 drsprintk(ioc, ioc_info(ioc,
6482 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6483 port_table[j].sas_address,
6484 port_table[j].port_id));
6485 continue;
6486 }
6487
6488 switch (ret) {
6489 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6490 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6491 _scsih_add_or_del_phys_from_existing_port(ioc,
6492 port_entry, port_table, j, port_count);
6493 break;
6494 case MATCHED_WITH_ADDR:
6495 sas_addr = port_table[j].sas_address;
6496 for (i = 0; i < port_count; i++) {
6497 if (port_table[i].sas_address == sas_addr)
6498 lcount++;
6499 }
6500
6501 if (count > 1 || lcount > 1)
6502 port_entry = NULL;
6503 else
6504 _scsih_add_or_del_phys_from_existing_port(ioc,
6505 port_entry, port_table, j, port_count);
6506 }
6507
6508 if (!port_entry)
6509 continue;
6510
6511 if (port_entry->port_id != port_table[j].port_id)
6512 port_entry->port_id = port_table[j].port_id;
6513 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6514 port_entry->phy_mask = port_table[j].phy_mask;
6515 }
6516
6517 port_table_entry = NULL;
6518}
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528static struct virtual_phy *
6529_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6530{
6531 struct virtual_phy *vphy;
6532 struct hba_port *port;
6533
6534 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6535 if (!port)
6536 return NULL;
6537
6538 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6539 if (!vphy) {
6540 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6541 if (!vphy)
6542 return NULL;
6543
6544 if (!port->vphys_mask)
6545 INIT_LIST_HEAD(&port->vphys_list);
6546
6547
6548
6549
6550
6551 port->vphys_mask |= (1 << phy_num);
6552 vphy->phy_mask |= (1 << phy_num);
6553
6554 list_add_tail(&vphy->list, &port->vphys_list);
6555
6556 ioc_info(ioc,
6557 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6558 vphy, port->port_id, phy_num);
6559 }
6560 return vphy;
6561}
6562
6563
6564
6565
6566
6567
6568
6569
6570
6571
6572static void
6573_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6574{
6575 u16 sz;
6576 u16 ioc_status;
6577 int i;
6578 Mpi2ConfigReply_t mpi_reply;
6579 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6580 u16 attached_handle;
6581 u8 link_rate, port_id;
6582 struct hba_port *port;
6583 Mpi2SasPhyPage0_t phy_pg0;
6584
6585 dtmprintk(ioc,
6586 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6587 (u64)ioc->sas_hba.sas_address));
6588
6589 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6590 * sizeof(Mpi2SasIOUnit0PhyData_t));
6591 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6592 if (!sas_iounit_pg0) {
6593 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6594 __FILE__, __LINE__, __func__);
6595 return;
6596 }
6597
6598 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6599 sas_iounit_pg0, sz)) != 0)
6600 goto out;
6601 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6602 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6603 goto out;
6604 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6605 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6606 if (i == 0)
6607 ioc->sas_hba.handle = le16_to_cpu(
6608 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6609 port_id = sas_iounit_pg0->PhyData[i].Port;
6610 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6611 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6612 if (!port)
6613 goto out;
6614
6615 port->port_id = port_id;
6616 ioc_info(ioc,
6617 "hba_port entry: %p, port: %d is added to hba_port list\n",
6618 port, port->port_id);
6619 if (ioc->shost_recovery)
6620 port->flags = HBA_PORT_FLAG_NEW_PORT;
6621 list_add_tail(&port->list, &ioc->port_table_list);
6622 }
6623
6624
6625
6626 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6627 MPI2_SAS_DEVICE_INFO_SEP &&
6628 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6629 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6630 &phy_pg0, i))) {
6631 ioc_err(ioc,
6632 "failure at %s:%d/%s()!\n",
6633 __FILE__, __LINE__, __func__);
6634 goto out;
6635 }
6636 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6637 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6638 continue;
6639
6640
6641
6642
6643 if (!_scsih_alloc_vphy(ioc, port_id, i))
6644 goto out;
6645 ioc->sas_hba.phy[i].hba_vphy = 1;
6646 }
6647
6648
6649
6650
6651
6652 if (!ioc->sas_hba.phy[i].phy) {
6653 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6654 &phy_pg0, i))) {
6655 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6656 __FILE__, __LINE__, __func__);
6657 continue;
6658 }
6659 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6660 MPI2_IOCSTATUS_MASK;
6661 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6662 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6663 __FILE__, __LINE__, __func__);
6664 continue;
6665 }
6666 ioc->sas_hba.phy[i].phy_id = i;
6667 mpt3sas_transport_add_host_phy(ioc,
6668 &ioc->sas_hba.phy[i], phy_pg0,
6669 ioc->sas_hba.parent_dev);
6670 continue;
6671 }
6672 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6673 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6674 AttachedDevHandle);
6675 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6676 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6677 ioc->sas_hba.phy[i].port =
6678 mpt3sas_get_port_by_id(ioc, port_id, 0);
6679 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6680 attached_handle, i, link_rate,
6681 ioc->sas_hba.phy[i].port);
6682 }
6683
6684
6685
6686
6687 for (i = ioc->sas_hba.num_phys;
6688 i < ioc->sas_hba.nr_phys_allocated; i++) {
6689 if (ioc->sas_hba.phy[i].phy &&
6690 ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6691 SAS_LINK_RATE_1_5_GBPS)
6692 mpt3sas_transport_update_links(ioc,
6693 ioc->sas_hba.sas_address, 0, i,
6694 MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6695 }
6696 out:
6697 kfree(sas_iounit_pg0);
6698}
6699
6700
6701
6702
6703
6704
6705
6706static void
6707_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6708{
6709 int i;
6710 Mpi2ConfigReply_t mpi_reply;
6711 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6712 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6713 Mpi2SasPhyPage0_t phy_pg0;
6714 Mpi2SasDevicePage0_t sas_device_pg0;
6715 Mpi2SasEnclosurePage0_t enclosure_pg0;
6716 u16 ioc_status;
6717 u16 sz;
6718 u8 device_missing_delay;
6719 u8 num_phys, port_id;
6720 struct hba_port *port;
6721
6722 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6723 if (!num_phys) {
6724 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6725 __FILE__, __LINE__, __func__);
6726 return;
6727 }
6728
6729 ioc->sas_hba.nr_phys_allocated = max_t(u8,
6730 MPT_MAX_HBA_NUM_PHYS, num_phys);
6731 ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
6732 sizeof(struct _sas_phy), GFP_KERNEL);
6733 if (!ioc->sas_hba.phy) {
6734 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6735 __FILE__, __LINE__, __func__);
6736 goto out;
6737 }
6738 ioc->sas_hba.num_phys = num_phys;
6739
6740
6741 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6742 sizeof(Mpi2SasIOUnit0PhyData_t));
6743 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6744 if (!sas_iounit_pg0) {
6745 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6746 __FILE__, __LINE__, __func__);
6747 return;
6748 }
6749 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6750 sas_iounit_pg0, sz))) {
6751 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6752 __FILE__, __LINE__, __func__);
6753 goto out;
6754 }
6755 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6756 MPI2_IOCSTATUS_MASK;
6757 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6758 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6759 __FILE__, __LINE__, __func__);
6760 goto out;
6761 }
6762
6763
6764 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6765 sizeof(Mpi2SasIOUnit1PhyData_t));
6766 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6767 if (!sas_iounit_pg1) {
6768 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6769 __FILE__, __LINE__, __func__);
6770 goto out;
6771 }
6772 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6773 sas_iounit_pg1, sz))) {
6774 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6775 __FILE__, __LINE__, __func__);
6776 goto out;
6777 }
6778 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6779 MPI2_IOCSTATUS_MASK;
6780 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6781 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6782 __FILE__, __LINE__, __func__);
6783 goto out;
6784 }
6785
6786 ioc->io_missing_delay =
6787 sas_iounit_pg1->IODeviceMissingDelay;
6788 device_missing_delay =
6789 sas_iounit_pg1->ReportDeviceMissingDelay;
6790 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6791 ioc->device_missing_delay = (device_missing_delay &
6792 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6793 else
6794 ioc->device_missing_delay = device_missing_delay &
6795 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6796
6797 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6798 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6799 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6800 i))) {
6801 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6802 __FILE__, __LINE__, __func__);
6803 goto out;
6804 }
6805 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6806 MPI2_IOCSTATUS_MASK;
6807 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6808 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6809 __FILE__, __LINE__, __func__);
6810 goto out;
6811 }
6812
6813 if (i == 0)
6814 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6815 PhyData[0].ControllerDevHandle);
6816
6817 port_id = sas_iounit_pg0->PhyData[i].Port;
6818 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6819 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6820 if (!port)
6821 goto out;
6822
6823 port->port_id = port_id;
6824 ioc_info(ioc,
6825 "hba_port entry: %p, port: %d is added to hba_port list\n",
6826 port, port->port_id);
6827 list_add_tail(&port->list,
6828 &ioc->port_table_list);
6829 }
6830
6831
6832
6833
6834 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6835 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6836 (phy_pg0.NegotiatedLinkRate >> 4) >=
6837 MPI2_SAS_NEG_LINK_RATE_1_5) {
6838
6839
6840
6841 if (!_scsih_alloc_vphy(ioc, port_id, i))
6842 goto out;
6843 ioc->sas_hba.phy[i].hba_vphy = 1;
6844 }
6845
6846 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6847 ioc->sas_hba.phy[i].phy_id = i;
6848 ioc->sas_hba.phy[i].port =
6849 mpt3sas_get_port_by_id(ioc, port_id, 0);
6850 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6851 phy_pg0, ioc->sas_hba.parent_dev);
6852 }
6853 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6854 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6855 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6856 __FILE__, __LINE__, __func__);
6857 goto out;
6858 }
6859 ioc->sas_hba.enclosure_handle =
6860 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6861 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6862 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6863 ioc->sas_hba.handle,
6864 (u64)ioc->sas_hba.sas_address,
6865 ioc->sas_hba.num_phys);
6866
6867 if (ioc->sas_hba.enclosure_handle) {
6868 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6869 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6870 ioc->sas_hba.enclosure_handle)))
6871 ioc->sas_hba.enclosure_logical_id =
6872 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6873 }
6874
6875 out:
6876 kfree(sas_iounit_pg1);
6877 kfree(sas_iounit_pg0);
6878}
6879
6880
6881
6882
6883
6884
6885
6886
6887
6888
6889static int
6890_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6891{
6892 struct _sas_node *sas_expander;
6893 struct _enclosure_node *enclosure_dev;
6894 Mpi2ConfigReply_t mpi_reply;
6895 Mpi2ExpanderPage0_t expander_pg0;
6896 Mpi2ExpanderPage1_t expander_pg1;
6897 u32 ioc_status;
6898 u16 parent_handle;
6899 u64 sas_address, sas_address_parent = 0;
6900 int i;
6901 unsigned long flags;
6902 struct _sas_port *mpt3sas_port = NULL;
6903 u8 port_id;
6904
6905 int rc = 0;
6906
6907 if (!handle)
6908 return -1;
6909
6910 if (ioc->shost_recovery || ioc->pci_error_recovery)
6911 return -1;
6912
6913 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6914 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6915 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6916 __FILE__, __LINE__, __func__);
6917 return -1;
6918 }
6919
6920 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6921 MPI2_IOCSTATUS_MASK;
6922 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6923 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6924 __FILE__, __LINE__, __func__);
6925 return -1;
6926 }
6927
6928
6929 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6930 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6931 != 0) {
6932 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6933 __FILE__, __LINE__, __func__);
6934 return -1;
6935 }
6936
6937 port_id = expander_pg0.PhysicalPort;
6938 if (sas_address_parent != ioc->sas_hba.sas_address) {
6939 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6940 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6941 sas_address_parent,
6942 mpt3sas_get_port_by_id(ioc, port_id, 0));
6943 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6944 if (!sas_expander) {
6945 rc = _scsih_expander_add(ioc, parent_handle);
6946 if (rc != 0)
6947 return rc;
6948 }
6949 }
6950
6951 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6952 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6953 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6954 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6955 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6956
6957 if (sas_expander)
6958 return 0;
6959
6960 sas_expander = kzalloc(sizeof(struct _sas_node),
6961 GFP_KERNEL);
6962 if (!sas_expander) {
6963 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6964 __FILE__, __LINE__, __func__);
6965 return -1;
6966 }
6967
6968 sas_expander->handle = handle;
6969 sas_expander->num_phys = expander_pg0.NumPhys;
6970 sas_expander->sas_address_parent = sas_address_parent;
6971 sas_expander->sas_address = sas_address;
6972 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6973 if (!sas_expander->port) {
6974 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6975 __FILE__, __LINE__, __func__);
6976 rc = -1;
6977 goto out_fail;
6978 }
6979
6980 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6981 handle, parent_handle,
6982 (u64)sas_expander->sas_address, sas_expander->num_phys);
6983
6984 if (!sas_expander->num_phys) {
6985 rc = -1;
6986 goto out_fail;
6987 }
6988 sas_expander->phy = kcalloc(sas_expander->num_phys,
6989 sizeof(struct _sas_phy), GFP_KERNEL);
6990 if (!sas_expander->phy) {
6991 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6992 __FILE__, __LINE__, __func__);
6993 rc = -1;
6994 goto out_fail;
6995 }
6996
6997 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6998 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6999 sas_address_parent, sas_expander->port);
7000 if (!mpt3sas_port) {
7001 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7002 __FILE__, __LINE__, __func__);
7003 rc = -1;
7004 goto out_fail;
7005 }
7006 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
7007 sas_expander->rphy = mpt3sas_port->rphy;
7008
7009 for (i = 0 ; i < sas_expander->num_phys ; i++) {
7010 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
7011 &expander_pg1, i, handle))) {
7012 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7013 __FILE__, __LINE__, __func__);
7014 rc = -1;
7015 goto out_fail;
7016 }
7017 sas_expander->phy[i].handle = handle;
7018 sas_expander->phy[i].phy_id = i;
7019 sas_expander->phy[i].port =
7020 mpt3sas_get_port_by_id(ioc, port_id, 0);
7021
7022 if ((mpt3sas_transport_add_expander_phy(ioc,
7023 &sas_expander->phy[i], expander_pg1,
7024 sas_expander->parent_dev))) {
7025 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7026 __FILE__, __LINE__, __func__);
7027 rc = -1;
7028 goto out_fail;
7029 }
7030 }
7031
7032 if (sas_expander->enclosure_handle) {
7033 enclosure_dev =
7034 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7035 sas_expander->enclosure_handle);
7036 if (enclosure_dev)
7037 sas_expander->enclosure_logical_id =
7038 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7039 }
7040
7041 _scsih_expander_node_add(ioc, sas_expander);
7042 return 0;
7043
7044 out_fail:
7045
7046 if (mpt3sas_port)
7047 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7048 sas_address_parent, sas_expander->port);
7049 kfree(sas_expander);
7050 return rc;
7051}
7052
7053
7054
7055
7056
7057
7058void
7059mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7060 struct hba_port *port)
7061{
7062 struct _sas_node *sas_expander;
7063 unsigned long flags;
7064
7065 if (ioc->shost_recovery)
7066 return;
7067
7068 if (!port)
7069 return;
7070
7071 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7072 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7073 sas_address, port);
7074 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7075 if (sas_expander)
7076 _scsih_expander_node_remove(ioc, sas_expander);
7077}
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090
7091
7092static u8
7093_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7094{
7095 MPI2DefaultReply_t *mpi_reply;
7096
7097 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7098 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7099 return 1;
7100 if (ioc->scsih_cmds.smid != smid)
7101 return 1;
7102 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7103 if (mpi_reply) {
7104 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7105 mpi_reply->MsgLength*4);
7106 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7107 }
7108 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7109 complete(&ioc->scsih_cmds.done);
7110 return 1;
7111}
7112
7113
7114
7115
7116#define MPT3_MAX_LUNS (255)
7117
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128static u8
7129_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7130 u16 handle, u8 access_status)
7131{
7132 u8 rc = 1;
7133 char *desc = NULL;
7134
7135 switch (access_status) {
7136 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7137 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7138 rc = 0;
7139 break;
7140 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7141 desc = "sata capability failed";
7142 break;
7143 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7144 desc = "sata affiliation conflict";
7145 break;
7146 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7147 desc = "route not addressable";
7148 break;
7149 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7150 desc = "smp error not addressable";
7151 break;
7152 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7153 desc = "device blocked";
7154 break;
7155 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7156 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7157 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7158 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7159 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7160 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7161 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7162 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7163 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7164 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7165 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7166 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7167 desc = "sata initialization failed";
7168 break;
7169 default:
7170 desc = "unknown";
7171 break;
7172 }
7173
7174 if (!rc)
7175 return 0;
7176
7177 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7178 desc, (u64)sas_address, handle);
7179 return rc;
7180}
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190static void
7191_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7192 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7193{
7194 Mpi2ConfigReply_t mpi_reply;
7195 Mpi2SasDevicePage0_t sas_device_pg0;
7196 struct _sas_device *sas_device = NULL;
7197 struct _enclosure_node *enclosure_dev = NULL;
7198 u32 ioc_status;
7199 unsigned long flags;
7200 u64 sas_address;
7201 struct scsi_target *starget;
7202 struct MPT3SAS_TARGET *sas_target_priv_data;
7203 u32 device_info;
7204 struct hba_port *port;
7205
7206 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7207 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7208 return;
7209
7210 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7211 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7212 return;
7213
7214
7215
7216
7217 if (phy_number != sas_device_pg0.PhyNum)
7218 return;
7219
7220
7221 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7222 if (!(_scsih_is_end_device(device_info)))
7223 return;
7224
7225 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7226 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7227 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7228 if (!port)
7229 goto out_unlock;
7230 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7231 sas_address, port);
7232
7233 if (!sas_device)
7234 goto out_unlock;
7235
7236 if (unlikely(sas_device->handle != handle)) {
7237 starget = sas_device->starget;
7238 sas_target_priv_data = starget->hostdata;
7239 starget_printk(KERN_INFO, starget,
7240 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7241 sas_device->handle, handle);
7242 sas_target_priv_data->handle = handle;
7243 sas_device->handle = handle;
7244 if (le16_to_cpu(sas_device_pg0.Flags) &
7245 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7246 sas_device->enclosure_level =
7247 sas_device_pg0.EnclosureLevel;
7248 memcpy(sas_device->connector_name,
7249 sas_device_pg0.ConnectorName, 4);
7250 sas_device->connector_name[4] = '\0';
7251 } else {
7252 sas_device->enclosure_level = 0;
7253 sas_device->connector_name[0] = '\0';
7254 }
7255
7256 sas_device->enclosure_handle =
7257 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7258 sas_device->is_chassis_slot_valid = 0;
7259 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7260 sas_device->enclosure_handle);
7261 if (enclosure_dev) {
7262 sas_device->enclosure_logical_id =
7263 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7264 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7265 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7266 sas_device->is_chassis_slot_valid = 1;
7267 sas_device->chassis_slot =
7268 enclosure_dev->pg0.ChassisSlot;
7269 }
7270 }
7271 }
7272
7273
7274 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7275 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7276 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7277 handle);
7278 goto out_unlock;
7279 }
7280
7281
7282 if (_scsih_check_access_status(ioc, sas_address, handle,
7283 sas_device_pg0.AccessStatus))
7284 goto out_unlock;
7285
7286 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7287 _scsih_ublock_io_device(ioc, sas_address, port);
7288
7289 if (sas_device)
7290 sas_device_put(sas_device);
7291 return;
7292
7293out_unlock:
7294 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7295 if (sas_device)
7296 sas_device_put(sas_device);
7297}
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307
7308
7309
7310static int
7311_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7312 u8 is_pd)
7313{
7314 Mpi2ConfigReply_t mpi_reply;
7315 Mpi2SasDevicePage0_t sas_device_pg0;
7316 struct _sas_device *sas_device;
7317 struct _enclosure_node *enclosure_dev = NULL;
7318 u32 ioc_status;
7319 u64 sas_address;
7320 u32 device_info;
7321 u8 port_id;
7322
7323 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7324 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7325 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7326 __FILE__, __LINE__, __func__);
7327 return -1;
7328 }
7329
7330 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7331 MPI2_IOCSTATUS_MASK;
7332 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7333 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7334 __FILE__, __LINE__, __func__);
7335 return -1;
7336 }
7337
7338
7339 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7340 if (!(_scsih_is_end_device(device_info)))
7341 return -1;
7342 set_bit(handle, ioc->pend_os_device_add);
7343 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7344
7345
7346 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7347 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7348 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7349 handle);
7350 return -1;
7351 }
7352
7353
7354 if (_scsih_check_access_status(ioc, sas_address, handle,
7355 sas_device_pg0.AccessStatus))
7356 return -1;
7357
7358 port_id = sas_device_pg0.PhysicalPort;
7359 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7360 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7361 if (sas_device) {
7362 clear_bit(handle, ioc->pend_os_device_add);
7363 sas_device_put(sas_device);
7364 return -1;
7365 }
7366
7367 if (sas_device_pg0.EnclosureHandle) {
7368 enclosure_dev =
7369 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7370 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7371 if (enclosure_dev == NULL)
7372 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7373 sas_device_pg0.EnclosureHandle);
7374 }
7375
7376 sas_device = kzalloc(sizeof(struct _sas_device),
7377 GFP_KERNEL);
7378 if (!sas_device) {
7379 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7380 __FILE__, __LINE__, __func__);
7381 return 0;
7382 }
7383
7384 kref_init(&sas_device->refcount);
7385 sas_device->handle = handle;
7386 if (_scsih_get_sas_address(ioc,
7387 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7388 &sas_device->sas_address_parent) != 0)
7389 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7390 __FILE__, __LINE__, __func__);
7391 sas_device->enclosure_handle =
7392 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7393 if (sas_device->enclosure_handle != 0)
7394 sas_device->slot =
7395 le16_to_cpu(sas_device_pg0.Slot);
7396 sas_device->device_info = device_info;
7397 sas_device->sas_address = sas_address;
7398 sas_device->phy = sas_device_pg0.PhyNum;
7399 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7400 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7401 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7402 if (!sas_device->port) {
7403 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7404 __FILE__, __LINE__, __func__);
7405 goto out;
7406 }
7407
7408 if (le16_to_cpu(sas_device_pg0.Flags)
7409 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7410 sas_device->enclosure_level =
7411 sas_device_pg0.EnclosureLevel;
7412 memcpy(sas_device->connector_name,
7413 sas_device_pg0.ConnectorName, 4);
7414 sas_device->connector_name[4] = '\0';
7415 } else {
7416 sas_device->enclosure_level = 0;
7417 sas_device->connector_name[0] = '\0';
7418 }
7419
7420 sas_device->is_chassis_slot_valid = 0;
7421 if (enclosure_dev) {
7422 sas_device->enclosure_logical_id =
7423 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7424 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7425 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7426 sas_device->is_chassis_slot_valid = 1;
7427 sas_device->chassis_slot =
7428 enclosure_dev->pg0.ChassisSlot;
7429 }
7430 }
7431
7432
7433 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7434 sas_device->port_type = sas_device_pg0.MaxPortConnections;
7435 ioc_info(ioc,
7436 "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7437 handle, sas_device->sas_address, sas_device->port_type);
7438
7439 if (ioc->wait_for_discovery_to_complete)
7440 _scsih_sas_device_init_add(ioc, sas_device);
7441 else
7442 _scsih_sas_device_add(ioc, sas_device);
7443
7444out:
7445 sas_device_put(sas_device);
7446 return 0;
7447}
7448
7449
7450
7451
7452
7453
7454static void
7455_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7456 struct _sas_device *sas_device)
7457{
7458 struct MPT3SAS_TARGET *sas_target_priv_data;
7459
7460 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7461 (sas_device->pfa_led_on)) {
7462 _scsih_turn_off_pfa_led(ioc, sas_device);
7463 sas_device->pfa_led_on = 0;
7464 }
7465
7466 dewtprintk(ioc,
7467 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7468 __func__,
7469 sas_device->handle, (u64)sas_device->sas_address));
7470
7471 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7472 NULL, NULL));
7473
7474 if (sas_device->starget && sas_device->starget->hostdata) {
7475 sas_target_priv_data = sas_device->starget->hostdata;
7476 sas_target_priv_data->deleted = 1;
7477 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7478 sas_device->port);
7479 sas_target_priv_data->handle =
7480 MPT3SAS_INVALID_DEVICE_HANDLE;
7481 }
7482
7483 if (!ioc->hide_drives)
7484 mpt3sas_transport_port_remove(ioc,
7485 sas_device->sas_address,
7486 sas_device->sas_address_parent,
7487 sas_device->port);
7488
7489 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7490 sas_device->handle, (u64)sas_device->sas_address);
7491
7492 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7493
7494 dewtprintk(ioc,
7495 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7496 __func__,
7497 sas_device->handle, (u64)sas_device->sas_address));
7498 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7499 NULL, NULL));
7500}
7501
7502
7503
7504
7505
7506
7507
7508static void
7509_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7510 Mpi2EventDataSasTopologyChangeList_t *event_data)
7511{
7512 int i;
7513 u16 handle;
7514 u16 reason_code;
7515 u8 phy_number;
7516 char *status_str = NULL;
7517 u8 link_rate, prev_link_rate;
7518
7519 switch (event_data->ExpStatus) {
7520 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7521 status_str = "add";
7522 break;
7523 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7524 status_str = "remove";
7525 break;
7526 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7527 case 0:
7528 status_str = "responding";
7529 break;
7530 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7531 status_str = "remove delay";
7532 break;
7533 default:
7534 status_str = "unknown status";
7535 break;
7536 }
7537 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7538 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7539 "start_phy(%02d), count(%d)\n",
7540 le16_to_cpu(event_data->ExpanderDevHandle),
7541 le16_to_cpu(event_data->EnclosureHandle),
7542 event_data->StartPhyNum, event_data->NumEntries);
7543 for (i = 0; i < event_data->NumEntries; i++) {
7544 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7545 if (!handle)
7546 continue;
7547 phy_number = event_data->StartPhyNum + i;
7548 reason_code = event_data->PHY[i].PhyStatus &
7549 MPI2_EVENT_SAS_TOPO_RC_MASK;
7550 switch (reason_code) {
7551 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7552 status_str = "target add";
7553 break;
7554 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7555 status_str = "target remove";
7556 break;
7557 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7558 status_str = "delay target remove";
7559 break;
7560 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7561 status_str = "link rate change";
7562 break;
7563 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7564 status_str = "target responding";
7565 break;
7566 default:
7567 status_str = "unknown";
7568 break;
7569 }
7570 link_rate = event_data->PHY[i].LinkRate >> 4;
7571 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7572 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7573 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7574 handle, status_str, link_rate, prev_link_rate);
7575
7576 }
7577}
7578
7579
7580
7581
7582
7583
7584
7585
7586static int
7587_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7588 struct fw_event_work *fw_event)
7589{
7590 int i;
7591 u16 parent_handle, handle;
7592 u16 reason_code;
7593 u8 phy_number, max_phys;
7594 struct _sas_node *sas_expander;
7595 u64 sas_address;
7596 unsigned long flags;
7597 u8 link_rate, prev_link_rate;
7598 struct hba_port *port;
7599 Mpi2EventDataSasTopologyChangeList_t *event_data =
7600 (Mpi2EventDataSasTopologyChangeList_t *)
7601 fw_event->event_data;
7602
7603 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7604 _scsih_sas_topology_change_event_debug(ioc, event_data);
7605
7606 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7607 return 0;
7608
7609 if (!ioc->sas_hba.num_phys)
7610 _scsih_sas_host_add(ioc);
7611 else
7612 _scsih_sas_host_refresh(ioc);
7613
7614 if (fw_event->ignore) {
7615 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7616 return 0;
7617 }
7618
7619 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7620 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7621
7622
7623 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7624 if (_scsih_expander_add(ioc, parent_handle) != 0)
7625 return 0;
7626
7627 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7628 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7629 parent_handle);
7630 if (sas_expander) {
7631 sas_address = sas_expander->sas_address;
7632 max_phys = sas_expander->num_phys;
7633 port = sas_expander->port;
7634 } else if (parent_handle < ioc->sas_hba.num_phys) {
7635 sas_address = ioc->sas_hba.sas_address;
7636 max_phys = ioc->sas_hba.num_phys;
7637 } else {
7638 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7639 return 0;
7640 }
7641 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7642
7643
7644 for (i = 0; i < event_data->NumEntries; i++) {
7645 if (fw_event->ignore) {
7646 dewtprintk(ioc,
7647 ioc_info(ioc, "ignoring expander event\n"));
7648 return 0;
7649 }
7650 if (ioc->remove_host || ioc->pci_error_recovery)
7651 return 0;
7652 phy_number = event_data->StartPhyNum + i;
7653 if (phy_number >= max_phys)
7654 continue;
7655 reason_code = event_data->PHY[i].PhyStatus &
7656 MPI2_EVENT_SAS_TOPO_RC_MASK;
7657 if ((event_data->PHY[i].PhyStatus &
7658 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7659 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7660 continue;
7661 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7662 if (!handle)
7663 continue;
7664 link_rate = event_data->PHY[i].LinkRate >> 4;
7665 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7666 switch (reason_code) {
7667 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7668
7669 if (ioc->shost_recovery)
7670 break;
7671
7672 if (link_rate == prev_link_rate)
7673 break;
7674
7675 mpt3sas_transport_update_links(ioc, sas_address,
7676 handle, phy_number, link_rate, port);
7677
7678 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7679 break;
7680
7681 _scsih_check_device(ioc, sas_address, handle,
7682 phy_number, link_rate);
7683
7684 if (!test_bit(handle, ioc->pend_os_device_add))
7685 break;
7686
7687 fallthrough;
7688
7689 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7690
7691 if (ioc->shost_recovery)
7692 break;
7693
7694 mpt3sas_transport_update_links(ioc, sas_address,
7695 handle, phy_number, link_rate, port);
7696
7697 _scsih_add_device(ioc, handle, phy_number, 0);
7698
7699 break;
7700 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7701
7702 _scsih_device_remove_by_handle(ioc, handle);
7703 break;
7704 }
7705 }
7706
7707
7708 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7709 sas_expander)
7710 mpt3sas_expander_remove(ioc, sas_address, port);
7711
7712 return 0;
7713}
7714
7715
7716
7717
7718
7719
7720
7721static void
7722_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7723 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7724{
7725 char *reason_str = NULL;
7726
7727 switch (event_data->ReasonCode) {
7728 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7729 reason_str = "smart data";
7730 break;
7731 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7732 reason_str = "unsupported device discovered";
7733 break;
7734 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7735 reason_str = "internal device reset";
7736 break;
7737 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7738 reason_str = "internal task abort";
7739 break;
7740 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7741 reason_str = "internal task abort set";
7742 break;
7743 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7744 reason_str = "internal clear task set";
7745 break;
7746 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7747 reason_str = "internal query task";
7748 break;
7749 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7750 reason_str = "sata init failure";
7751 break;
7752 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7753 reason_str = "internal device reset complete";
7754 break;
7755 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7756 reason_str = "internal task abort complete";
7757 break;
7758 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7759 reason_str = "internal async notification";
7760 break;
7761 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7762 reason_str = "expander reduced functionality";
7763 break;
7764 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7765 reason_str = "expander reduced functionality complete";
7766 break;
7767 default:
7768 reason_str = "unknown reason";
7769 break;
7770 }
7771 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7772 reason_str, le16_to_cpu(event_data->DevHandle),
7773 (u64)le64_to_cpu(event_data->SASAddress),
7774 le16_to_cpu(event_data->TaskTag));
7775 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7776 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7777 event_data->ASC, event_data->ASCQ);
7778 pr_cont("\n");
7779}
7780
7781
7782
7783
7784
7785
7786
7787static void
7788_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7789 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7790{
7791 struct MPT3SAS_TARGET *target_priv_data;
7792 struct _sas_device *sas_device;
7793 u64 sas_address;
7794 unsigned long flags;
7795
7796
7797
7798
7799 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7800 return;
7801
7802 if (event_data->ReasonCode !=
7803 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7804 event_data->ReasonCode !=
7805 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7806 return;
7807
7808 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7809 sas_address = le64_to_cpu(event_data->SASAddress);
7810 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7811 sas_address,
7812 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7813
7814 if (!sas_device || !sas_device->starget)
7815 goto out;
7816
7817 target_priv_data = sas_device->starget->hostdata;
7818 if (!target_priv_data)
7819 goto out;
7820
7821 if (event_data->ReasonCode ==
7822 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7823 target_priv_data->tm_busy = 1;
7824 else
7825 target_priv_data->tm_busy = 0;
7826
7827 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7828 ioc_info(ioc,
7829 "%s tm_busy flag for handle(0x%04x)\n",
7830 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7831 target_priv_data->handle);
7832
7833out:
7834 if (sas_device)
7835 sas_device_put(sas_device);
7836
7837 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7838}
7839
7840
7841
7842
7843
7844
7845
7846
7847
7848
7849
7850static u8
7851_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7852 u16 handle, u8 access_status)
7853{
7854 u8 rc = 1;
7855 char *desc = NULL;
7856
7857 switch (access_status) {
7858 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7859 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7860 rc = 0;
7861 break;
7862 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7863 desc = "PCIe device capability failed";
7864 break;
7865 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7866 desc = "PCIe device blocked";
7867 ioc_info(ioc,
7868 "Device with Access Status (%s): wwid(0x%016llx), "
7869 "handle(0x%04x)\n ll only be added to the internal list",
7870 desc, (u64)wwid, handle);
7871 rc = 0;
7872 break;
7873 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7874 desc = "PCIe device mem space access failed";
7875 break;
7876 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7877 desc = "PCIe device unsupported";
7878 break;
7879 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7880 desc = "PCIe device MSIx Required";
7881 break;
7882 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7883 desc = "PCIe device init fail max";
7884 break;
7885 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7886 desc = "PCIe device status unknown";
7887 break;
7888 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7889 desc = "nvme ready timeout";
7890 break;
7891 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7892 desc = "nvme device configuration unsupported";
7893 break;
7894 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7895 desc = "nvme identify failed";
7896 break;
7897 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7898 desc = "nvme qconfig failed";
7899 break;
7900 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7901 desc = "nvme qcreation failed";
7902 break;
7903 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7904 desc = "nvme eventcfg failed";
7905 break;
7906 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7907 desc = "nvme get feature stat failed";
7908 break;
7909 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7910 desc = "nvme idle timeout";
7911 break;
7912 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7913 desc = "nvme failure status";
7914 break;
7915 default:
7916 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7917 access_status, (u64)wwid, handle);
7918 return rc;
7919 }
7920
7921 if (!rc)
7922 return rc;
7923
7924 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7925 desc, (u64)wwid, handle);
7926 return rc;
7927}
7928
7929
7930
7931
7932
7933
7934
7935static void
7936_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7937 struct _pcie_device *pcie_device)
7938{
7939 struct MPT3SAS_TARGET *sas_target_priv_data;
7940
7941 dewtprintk(ioc,
7942 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7943 __func__,
7944 pcie_device->handle, (u64)pcie_device->wwid));
7945 if (pcie_device->enclosure_handle != 0)
7946 dewtprintk(ioc,
7947 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7948 __func__,
7949 (u64)pcie_device->enclosure_logical_id,
7950 pcie_device->slot));
7951 if (pcie_device->connector_name[0] != '\0')
7952 dewtprintk(ioc,
7953 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7954 __func__,
7955 pcie_device->enclosure_level,
7956 pcie_device->connector_name));
7957
7958 if (pcie_device->starget && pcie_device->starget->hostdata) {
7959 sas_target_priv_data = pcie_device->starget->hostdata;
7960 sas_target_priv_data->deleted = 1;
7961 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7962 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7963 }
7964
7965 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7966 pcie_device->handle, (u64)pcie_device->wwid);
7967 if (pcie_device->enclosure_handle != 0)
7968 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7969 (u64)pcie_device->enclosure_logical_id,
7970 pcie_device->slot);
7971 if (pcie_device->connector_name[0] != '\0')
7972 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7973 pcie_device->enclosure_level,
7974 pcie_device->connector_name);
7975
7976 if (pcie_device->starget && (pcie_device->access_status !=
7977 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7978 scsi_remove_target(&pcie_device->starget->dev);
7979 dewtprintk(ioc,
7980 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7981 __func__,
7982 pcie_device->handle, (u64)pcie_device->wwid));
7983 if (pcie_device->enclosure_handle != 0)
7984 dewtprintk(ioc,
7985 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7986 __func__,
7987 (u64)pcie_device->enclosure_logical_id,
7988 pcie_device->slot));
7989 if (pcie_device->connector_name[0] != '\0')
7990 dewtprintk(ioc,
7991 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7992 __func__,
7993 pcie_device->enclosure_level,
7994 pcie_device->connector_name));
7995
7996 kfree(pcie_device->serial_number);
7997}
7998
7999
8000
8001
8002
8003
8004
8005static void
8006_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8007{
8008 Mpi2ConfigReply_t mpi_reply;
8009 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8010 u32 ioc_status;
8011 struct _pcie_device *pcie_device;
8012 u64 wwid;
8013 unsigned long flags;
8014 struct scsi_target *starget;
8015 struct MPT3SAS_TARGET *sas_target_priv_data;
8016 u32 device_info;
8017
8018 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8019 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8020 return;
8021
8022 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8023 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8024 return;
8025
8026
8027 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8028 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8029 return;
8030
8031 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8032 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8033 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8034
8035 if (!pcie_device) {
8036 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8037 return;
8038 }
8039
8040 if (unlikely(pcie_device->handle != handle)) {
8041 starget = pcie_device->starget;
8042 sas_target_priv_data = starget->hostdata;
8043 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8044 starget_printk(KERN_INFO, starget,
8045 "handle changed from(0x%04x) to (0x%04x)!!!\n",
8046 pcie_device->handle, handle);
8047 sas_target_priv_data->handle = handle;
8048 pcie_device->handle = handle;
8049
8050 if (le32_to_cpu(pcie_device_pg0.Flags) &
8051 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8052 pcie_device->enclosure_level =
8053 pcie_device_pg0.EnclosureLevel;
8054 memcpy(&pcie_device->connector_name[0],
8055 &pcie_device_pg0.ConnectorName[0], 4);
8056 } else {
8057 pcie_device->enclosure_level = 0;
8058 pcie_device->connector_name[0] = '\0';
8059 }
8060 }
8061
8062
8063 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8064 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8065 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8066 handle);
8067 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8068 pcie_device_put(pcie_device);
8069 return;
8070 }
8071
8072
8073 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8074 pcie_device_pg0.AccessStatus)) {
8075 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8076 pcie_device_put(pcie_device);
8077 return;
8078 }
8079
8080 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8081 pcie_device_put(pcie_device);
8082
8083 _scsih_ublock_io_device(ioc, wwid, NULL);
8084
8085 return;
8086}
8087
8088
8089
8090
8091
8092
8093
8094
8095
8096
8097static int
8098_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8099{
8100 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8101 Mpi26PCIeDevicePage2_t pcie_device_pg2;
8102 Mpi2ConfigReply_t mpi_reply;
8103 struct _pcie_device *pcie_device;
8104 struct _enclosure_node *enclosure_dev;
8105 u32 ioc_status;
8106 u64 wwid;
8107
8108 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8109 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8110 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8111 __FILE__, __LINE__, __func__);
8112 return 0;
8113 }
8114 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8115 MPI2_IOCSTATUS_MASK;
8116 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8117 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8118 __FILE__, __LINE__, __func__);
8119 return 0;
8120 }
8121
8122 set_bit(handle, ioc->pend_os_device_add);
8123 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8124
8125
8126 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8127 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8128 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8129 handle);
8130 return 0;
8131 }
8132
8133
8134 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8135 pcie_device_pg0.AccessStatus))
8136 return 0;
8137
8138 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8139 (pcie_device_pg0.DeviceInfo))))
8140 return 0;
8141
8142 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8143 if (pcie_device) {
8144 clear_bit(handle, ioc->pend_os_device_add);
8145 pcie_device_put(pcie_device);
8146 return 0;
8147 }
8148
8149
8150
8151
8152
8153 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8154 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8155 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8156 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8157 handle)) {
8158 ioc_err(ioc,
8159 "failure at %s:%d/%s()!\n", __FILE__,
8160 __LINE__, __func__);
8161 return 0;
8162 }
8163
8164 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8165 MPI2_IOCSTATUS_MASK;
8166 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8167 ioc_err(ioc,
8168 "failure at %s:%d/%s()!\n", __FILE__,
8169 __LINE__, __func__);
8170 return 0;
8171 }
8172 }
8173
8174 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8175 if (!pcie_device) {
8176 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8177 __FILE__, __LINE__, __func__);
8178 return 0;
8179 }
8180
8181 kref_init(&pcie_device->refcount);
8182 pcie_device->id = ioc->pcie_target_id++;
8183 pcie_device->channel = PCIE_CHANNEL;
8184 pcie_device->handle = handle;
8185 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8186 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8187 pcie_device->wwid = wwid;
8188 pcie_device->port_num = pcie_device_pg0.PortNum;
8189 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8190 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8191
8192 pcie_device->enclosure_handle =
8193 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8194 if (pcie_device->enclosure_handle != 0)
8195 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8196
8197 if (le32_to_cpu(pcie_device_pg0.Flags) &
8198 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8199 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8200 memcpy(&pcie_device->connector_name[0],
8201 &pcie_device_pg0.ConnectorName[0], 4);
8202 } else {
8203 pcie_device->enclosure_level = 0;
8204 pcie_device->connector_name[0] = '\0';
8205 }
8206
8207
8208 if (pcie_device->enclosure_handle) {
8209 enclosure_dev =
8210 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8211 pcie_device->enclosure_handle);
8212 if (enclosure_dev)
8213 pcie_device->enclosure_logical_id =
8214 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8215 }
8216
8217 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8218 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8219 pcie_device->nvme_mdts =
8220 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8221 pcie_device->shutdown_latency =
8222 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8223
8224
8225
8226
8227
8228 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8229 ioc->max_shutdown_latency =
8230 pcie_device->shutdown_latency;
8231 if (pcie_device_pg2.ControllerResetTO)
8232 pcie_device->reset_timeout =
8233 pcie_device_pg2.ControllerResetTO;
8234 else
8235 pcie_device->reset_timeout = 30;
8236 } else
8237 pcie_device->reset_timeout = 30;
8238
8239 if (ioc->wait_for_discovery_to_complete)
8240 _scsih_pcie_device_init_add(ioc, pcie_device);
8241 else
8242 _scsih_pcie_device_add(ioc, pcie_device);
8243
8244 pcie_device_put(pcie_device);
8245 return 0;
8246}
8247
8248
8249
8250
8251
8252
8253
8254
8255static void
8256_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8257 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8258{
8259 int i;
8260 u16 handle;
8261 u16 reason_code;
8262 u8 port_number;
8263 char *status_str = NULL;
8264 u8 link_rate, prev_link_rate;
8265
8266 switch (event_data->SwitchStatus) {
8267 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8268 status_str = "add";
8269 break;
8270 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8271 status_str = "remove";
8272 break;
8273 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8274 case 0:
8275 status_str = "responding";
8276 break;
8277 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8278 status_str = "remove delay";
8279 break;
8280 default:
8281 status_str = "unknown status";
8282 break;
8283 }
8284 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8285 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8286 "start_port(%02d), count(%d)\n",
8287 le16_to_cpu(event_data->SwitchDevHandle),
8288 le16_to_cpu(event_data->EnclosureHandle),
8289 event_data->StartPortNum, event_data->NumEntries);
8290 for (i = 0; i < event_data->NumEntries; i++) {
8291 handle =
8292 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8293 if (!handle)
8294 continue;
8295 port_number = event_data->StartPortNum + i;
8296 reason_code = event_data->PortEntry[i].PortStatus;
8297 switch (reason_code) {
8298 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8299 status_str = "target add";
8300 break;
8301 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8302 status_str = "target remove";
8303 break;
8304 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8305 status_str = "delay target remove";
8306 break;
8307 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8308 status_str = "link rate change";
8309 break;
8310 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8311 status_str = "target responding";
8312 break;
8313 default:
8314 status_str = "unknown";
8315 break;
8316 }
8317 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8318 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8319 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8320 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8321 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8322 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8323 handle, status_str, link_rate, prev_link_rate);
8324 }
8325}
8326
8327
8328
8329
8330
8331
8332
8333
8334
8335static void
8336_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8337 struct fw_event_work *fw_event)
8338{
8339 int i;
8340 u16 handle;
8341 u16 reason_code;
8342 u8 link_rate, prev_link_rate;
8343 unsigned long flags;
8344 int rc;
8345 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8346 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8347 struct _pcie_device *pcie_device;
8348
8349 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8350 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8351
8352 if (ioc->shost_recovery || ioc->remove_host ||
8353 ioc->pci_error_recovery)
8354 return;
8355
8356 if (fw_event->ignore) {
8357 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8358 return;
8359 }
8360
8361
8362 for (i = 0; i < event_data->NumEntries; i++) {
8363 if (fw_event->ignore) {
8364 dewtprintk(ioc,
8365 ioc_info(ioc, "ignoring switch event\n"));
8366 return;
8367 }
8368 if (ioc->remove_host || ioc->pci_error_recovery)
8369 return;
8370 reason_code = event_data->PortEntry[i].PortStatus;
8371 handle =
8372 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8373 if (!handle)
8374 continue;
8375
8376 link_rate = event_data->PortEntry[i].CurrentPortInfo
8377 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8378 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8379 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8380
8381 switch (reason_code) {
8382 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8383 if (ioc->shost_recovery)
8384 break;
8385 if (link_rate == prev_link_rate)
8386 break;
8387 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8388 break;
8389
8390 _scsih_pcie_check_device(ioc, handle);
8391
8392
8393
8394
8395
8396
8397
8398 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8399 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8400 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8401
8402 if (pcie_device) {
8403 pcie_device_put(pcie_device);
8404 break;
8405 }
8406
8407 if (!test_bit(handle, ioc->pend_os_device_add))
8408 break;
8409
8410 dewtprintk(ioc,
8411 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8412 handle));
8413 event_data->PortEntry[i].PortStatus &= 0xF0;
8414 event_data->PortEntry[i].PortStatus |=
8415 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8416 fallthrough;
8417 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8418 if (ioc->shost_recovery)
8419 break;
8420 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8421 break;
8422
8423 rc = _scsih_pcie_add_device(ioc, handle);
8424 if (!rc) {
8425
8426
8427
8428
8429
8430 event_data->PortEntry[i].PortStatus |=
8431 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8432 }
8433 break;
8434 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8435 _scsih_pcie_device_remove_by_handle(ioc, handle);
8436 break;
8437 }
8438 }
8439}
8440
8441
8442
8443
8444
8445
8446
8447static void
8448_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8449 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8450{
8451 char *reason_str = NULL;
8452
8453 switch (event_data->ReasonCode) {
8454 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8455 reason_str = "smart data";
8456 break;
8457 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8458 reason_str = "unsupported device discovered";
8459 break;
8460 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8461 reason_str = "internal device reset";
8462 break;
8463 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8464 reason_str = "internal task abort";
8465 break;
8466 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8467 reason_str = "internal task abort set";
8468 break;
8469 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8470 reason_str = "internal clear task set";
8471 break;
8472 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8473 reason_str = "internal query task";
8474 break;
8475 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8476 reason_str = "device init failure";
8477 break;
8478 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8479 reason_str = "internal device reset complete";
8480 break;
8481 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8482 reason_str = "internal task abort complete";
8483 break;
8484 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8485 reason_str = "internal async notification";
8486 break;
8487 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8488 reason_str = "pcie hot reset failed";
8489 break;
8490 default:
8491 reason_str = "unknown reason";
8492 break;
8493 }
8494
8495 ioc_info(ioc, "PCIE device status change: (%s)\n"
8496 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8497 reason_str, le16_to_cpu(event_data->DevHandle),
8498 (u64)le64_to_cpu(event_data->WWID),
8499 le16_to_cpu(event_data->TaskTag));
8500 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8501 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8502 event_data->ASC, event_data->ASCQ);
8503 pr_cont("\n");
8504}
8505
8506
8507
8508
8509
8510
8511
8512
8513static void
8514_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8515 struct fw_event_work *fw_event)
8516{
8517 struct MPT3SAS_TARGET *target_priv_data;
8518 struct _pcie_device *pcie_device;
8519 u64 wwid;
8520 unsigned long flags;
8521 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8522 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8523 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8524 _scsih_pcie_device_status_change_event_debug(ioc,
8525 event_data);
8526
8527 if (event_data->ReasonCode !=
8528 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8529 event_data->ReasonCode !=
8530 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8531 return;
8532
8533 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8534 wwid = le64_to_cpu(event_data->WWID);
8535 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8536
8537 if (!pcie_device || !pcie_device->starget)
8538 goto out;
8539
8540 target_priv_data = pcie_device->starget->hostdata;
8541 if (!target_priv_data)
8542 goto out;
8543
8544 if (event_data->ReasonCode ==
8545 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8546 target_priv_data->tm_busy = 1;
8547 else
8548 target_priv_data->tm_busy = 0;
8549out:
8550 if (pcie_device)
8551 pcie_device_put(pcie_device);
8552
8553 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8554}
8555
8556
8557
8558
8559
8560
8561
8562
8563static void
8564_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8565 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8566{
8567 char *reason_str = NULL;
8568
8569 switch (event_data->ReasonCode) {
8570 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8571 reason_str = "enclosure add";
8572 break;
8573 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8574 reason_str = "enclosure remove";
8575 break;
8576 default:
8577 reason_str = "unknown reason";
8578 break;
8579 }
8580
8581 ioc_info(ioc, "enclosure status change: (%s)\n"
8582 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8583 reason_str,
8584 le16_to_cpu(event_data->EnclosureHandle),
8585 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8586 le16_to_cpu(event_data->StartSlot));
8587}
8588
8589
8590
8591
8592
8593
8594
8595static void
8596_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8597 struct fw_event_work *fw_event)
8598{
8599 Mpi2ConfigReply_t mpi_reply;
8600 struct _enclosure_node *enclosure_dev = NULL;
8601 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8602 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8603 int rc;
8604 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8605
8606 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8607 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8608 (Mpi2EventDataSasEnclDevStatusChange_t *)
8609 fw_event->event_data);
8610 if (ioc->shost_recovery)
8611 return;
8612
8613 if (enclosure_handle)
8614 enclosure_dev =
8615 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8616 enclosure_handle);
8617 switch (event_data->ReasonCode) {
8618 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8619 if (!enclosure_dev) {
8620 enclosure_dev =
8621 kzalloc(sizeof(struct _enclosure_node),
8622 GFP_KERNEL);
8623 if (!enclosure_dev) {
8624 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8625 __FILE__, __LINE__, __func__);
8626 return;
8627 }
8628 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8629 &enclosure_dev->pg0,
8630 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8631 enclosure_handle);
8632
8633 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8634 MPI2_IOCSTATUS_MASK)) {
8635 kfree(enclosure_dev);
8636 return;
8637 }
8638
8639 list_add_tail(&enclosure_dev->list,
8640 &ioc->enclosure_list);
8641 }
8642 break;
8643 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8644 if (enclosure_dev) {
8645 list_del(&enclosure_dev->list);
8646 kfree(enclosure_dev);
8647 }
8648 break;
8649 default:
8650 break;
8651 }
8652}
8653
8654
8655
8656
8657
8658
8659
8660static void
8661_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8662 struct fw_event_work *fw_event)
8663{
8664 struct scsi_cmnd *scmd;
8665 struct scsi_device *sdev;
8666 struct scsiio_tracker *st;
8667 u16 smid, handle;
8668 u32 lun;
8669 struct MPT3SAS_DEVICE *sas_device_priv_data;
8670 u32 termination_count;
8671 u32 query_count;
8672 Mpi2SCSITaskManagementReply_t *mpi_reply;
8673 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8674 (Mpi2EventDataSasBroadcastPrimitive_t *)
8675 fw_event->event_data;
8676 u16 ioc_status;
8677 unsigned long flags;
8678 int r;
8679 u8 max_retries = 0;
8680 u8 task_abort_retries;
8681
8682 mutex_lock(&ioc->tm_cmds.mutex);
8683 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8684 __func__, event_data->PhyNum, event_data->PortWidth);
8685
8686 _scsih_block_io_all_device(ioc);
8687
8688 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8689 mpi_reply = ioc->tm_cmds.reply;
8690 broadcast_aen_retry:
8691
8692
8693 if (max_retries++ == 5) {
8694 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8695 goto out;
8696 } else if (max_retries > 1)
8697 dewtprintk(ioc,
8698 ioc_info(ioc, "%s: %d retry\n",
8699 __func__, max_retries - 1));
8700
8701 termination_count = 0;
8702 query_count = 0;
8703 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8704 if (ioc->shost_recovery)
8705 goto out;
8706 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8707 if (!scmd)
8708 continue;
8709 st = scsi_cmd_priv(scmd);
8710 sdev = scmd->device;
8711 sas_device_priv_data = sdev->hostdata;
8712 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8713 continue;
8714
8715 if (sas_device_priv_data->sas_target->flags &
8716 MPT_TARGET_FLAGS_RAID_COMPONENT)
8717 continue;
8718
8719 if (sas_device_priv_data->sas_target->flags &
8720 MPT_TARGET_FLAGS_VOLUME)
8721 continue;
8722
8723 if (sas_device_priv_data->sas_target->flags &
8724 MPT_TARGET_FLAGS_PCIE_DEVICE)
8725 continue;
8726
8727 handle = sas_device_priv_data->sas_target->handle;
8728 lun = sas_device_priv_data->lun;
8729 query_count++;
8730
8731 if (ioc->shost_recovery)
8732 goto out;
8733
8734 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8735 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8736 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8737 st->msix_io, 30, 0);
8738 if (r == FAILED) {
8739 sdev_printk(KERN_WARNING, sdev,
8740 "mpt3sas_scsih_issue_tm: FAILED when sending "
8741 "QUERY_TASK: scmd(%p)\n", scmd);
8742 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8743 goto broadcast_aen_retry;
8744 }
8745 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8746 & MPI2_IOCSTATUS_MASK;
8747 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8748 sdev_printk(KERN_WARNING, sdev,
8749 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8750 ioc_status, scmd);
8751 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8752 goto broadcast_aen_retry;
8753 }
8754
8755
8756 if (mpi_reply->ResponseCode ==
8757 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8758 mpi_reply->ResponseCode ==
8759 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8760 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8761 continue;
8762 }
8763 task_abort_retries = 0;
8764 tm_retry:
8765 if (task_abort_retries++ == 60) {
8766 dewtprintk(ioc,
8767 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8768 __func__));
8769 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8770 goto broadcast_aen_retry;
8771 }
8772
8773 if (ioc->shost_recovery)
8774 goto out_no_lock;
8775
8776 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8777 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8778 st->smid, st->msix_io, 30, 0);
8779 if (r == FAILED || st->cb_idx != 0xFF) {
8780 sdev_printk(KERN_WARNING, sdev,
8781 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8782 "scmd(%p)\n", scmd);
8783 goto tm_retry;
8784 }
8785
8786 if (task_abort_retries > 1)
8787 sdev_printk(KERN_WARNING, sdev,
8788 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8789 " scmd(%p)\n",
8790 task_abort_retries - 1, scmd);
8791
8792 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8793 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8794 }
8795
8796 if (ioc->broadcast_aen_pending) {
8797 dewtprintk(ioc,
8798 ioc_info(ioc,
8799 "%s: loop back due to pending AEN\n",
8800 __func__));
8801 ioc->broadcast_aen_pending = 0;
8802 goto broadcast_aen_retry;
8803 }
8804
8805 out:
8806 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8807 out_no_lock:
8808
8809 dewtprintk(ioc,
8810 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8811 __func__, query_count, termination_count));
8812
8813 ioc->broadcast_aen_busy = 0;
8814 if (!ioc->shost_recovery)
8815 _scsih_ublock_io_all_device(ioc);
8816 mutex_unlock(&ioc->tm_cmds.mutex);
8817}
8818
8819
8820
8821
8822
8823
8824
8825static void
8826_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8827 struct fw_event_work *fw_event)
8828{
8829 Mpi2EventDataSasDiscovery_t *event_data =
8830 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8831
8832 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8833 ioc_info(ioc, "discovery event: (%s)",
8834 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8835 "start" : "stop");
8836 if (event_data->DiscoveryStatus)
8837 pr_cont("discovery_status(0x%08x)",
8838 le32_to_cpu(event_data->DiscoveryStatus));
8839 pr_cont("\n");
8840 }
8841
8842 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8843 !ioc->sas_hba.num_phys) {
8844 if (disable_discovery > 0 && ioc->shost_recovery) {
8845
8846 while (ioc->shost_recovery)
8847 ssleep(1);
8848 }
8849 _scsih_sas_host_add(ioc);
8850 }
8851}
8852
8853
8854
8855
8856
8857
8858
8859
8860static void
8861_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8862 struct fw_event_work *fw_event)
8863{
8864 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8865 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8866
8867 switch (event_data->ReasonCode) {
8868 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8869 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8870 le16_to_cpu(event_data->DevHandle),
8871 (u64)le64_to_cpu(event_data->SASAddress),
8872 event_data->PhysicalPort);
8873 break;
8874 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8875 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8876 le16_to_cpu(event_data->DevHandle),
8877 (u64)le64_to_cpu(event_data->SASAddress),
8878 event_data->PhysicalPort);
8879 break;
8880 default:
8881 break;
8882 }
8883}
8884
8885
8886
8887
8888
8889
8890
8891static void
8892_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8893 struct fw_event_work *fw_event)
8894{
8895 Mpi26EventDataPCIeEnumeration_t *event_data =
8896 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8897
8898 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8899 return;
8900
8901 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8902 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8903 "started" : "completed",
8904 event_data->Flags);
8905 if (event_data->EnumerationStatus)
8906 pr_cont("enumeration_status(0x%08x)",
8907 le32_to_cpu(event_data->EnumerationStatus));
8908 pr_cont("\n");
8909}
8910
8911
8912
8913
8914
8915
8916
8917
8918
8919static int
8920_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8921{
8922 Mpi2RaidActionRequest_t *mpi_request;
8923 Mpi2RaidActionReply_t *mpi_reply;
8924 u16 smid;
8925 u8 issue_reset = 0;
8926 int rc = 0;
8927 u16 ioc_status;
8928 u32 log_info;
8929
8930 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8931 return rc;
8932
8933 mutex_lock(&ioc->scsih_cmds.mutex);
8934
8935 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8936 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8937 rc = -EAGAIN;
8938 goto out;
8939 }
8940 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8941
8942 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8943 if (!smid) {
8944 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8945 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8946 rc = -EAGAIN;
8947 goto out;
8948 }
8949
8950 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8951 ioc->scsih_cmds.smid = smid;
8952 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8953
8954 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8955 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8956 mpi_request->PhysDiskNum = phys_disk_num;
8957
8958 dewtprintk(ioc,
8959 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8960 handle, phys_disk_num));
8961
8962 init_completion(&ioc->scsih_cmds.done);
8963 ioc->put_smid_default(ioc, smid);
8964 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8965
8966 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8967 mpt3sas_check_cmd_timeout(ioc,
8968 ioc->scsih_cmds.status, mpi_request,
8969 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8970 rc = -EFAULT;
8971 goto out;
8972 }
8973
8974 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8975
8976 mpi_reply = ioc->scsih_cmds.reply;
8977 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8978 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8979 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8980 else
8981 log_info = 0;
8982 ioc_status &= MPI2_IOCSTATUS_MASK;
8983 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8984 dewtprintk(ioc,
8985 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8986 ioc_status, log_info));
8987 rc = -EFAULT;
8988 } else
8989 dewtprintk(ioc,
8990 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8991 }
8992
8993 out:
8994 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8995 mutex_unlock(&ioc->scsih_cmds.mutex);
8996
8997 if (issue_reset)
8998 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8999 return rc;
9000}
9001
9002
9003
9004
9005
9006
9007
9008static void
9009_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
9010{
9011 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
9012 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
9013 sdev->no_uld_attach ? "hiding" : "exposing");
9014 WARN_ON(scsi_device_reprobe(sdev));
9015}
9016
9017
9018
9019
9020
9021
9022
9023static void
9024_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9025 Mpi2EventIrConfigElement_t *element)
9026{
9027 struct _raid_device *raid_device;
9028 unsigned long flags;
9029 u64 wwid;
9030 u16 handle = le16_to_cpu(element->VolDevHandle);
9031 int rc;
9032
9033 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9034 if (!wwid) {
9035 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9036 __FILE__, __LINE__, __func__);
9037 return;
9038 }
9039
9040 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9041 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9042 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9043
9044 if (raid_device)
9045 return;
9046
9047 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9048 if (!raid_device) {
9049 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9050 __FILE__, __LINE__, __func__);
9051 return;
9052 }
9053
9054 raid_device->id = ioc->sas_id++;
9055 raid_device->channel = RAID_CHANNEL;
9056 raid_device->handle = handle;
9057 raid_device->wwid = wwid;
9058 _scsih_raid_device_add(ioc, raid_device);
9059 if (!ioc->wait_for_discovery_to_complete) {
9060 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9061 raid_device->id, 0);
9062 if (rc)
9063 _scsih_raid_device_remove(ioc, raid_device);
9064 } else {
9065 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9066 _scsih_determine_boot_device(ioc, raid_device, 1);
9067 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9068 }
9069}
9070
9071
9072
9073
9074
9075
9076
9077static void
9078_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9079{
9080 struct _raid_device *raid_device;
9081 unsigned long flags;
9082 struct MPT3SAS_TARGET *sas_target_priv_data;
9083 struct scsi_target *starget = NULL;
9084
9085 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9086 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9087 if (raid_device) {
9088 if (raid_device->starget) {
9089 starget = raid_device->starget;
9090 sas_target_priv_data = starget->hostdata;
9091 sas_target_priv_data->deleted = 1;
9092 }
9093 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9094 raid_device->handle, (u64)raid_device->wwid);
9095 list_del(&raid_device->list);
9096 kfree(raid_device);
9097 }
9098 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9099 if (starget)
9100 scsi_remove_target(&starget->dev);
9101}
9102
9103
9104
9105
9106
9107
9108
9109static void
9110_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9111 Mpi2EventIrConfigElement_t *element)
9112{
9113 struct _sas_device *sas_device;
9114 struct scsi_target *starget = NULL;
9115 struct MPT3SAS_TARGET *sas_target_priv_data;
9116 unsigned long flags;
9117 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9118
9119 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9120 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9121 if (sas_device) {
9122 sas_device->volume_handle = 0;
9123 sas_device->volume_wwid = 0;
9124 clear_bit(handle, ioc->pd_handles);
9125 if (sas_device->starget && sas_device->starget->hostdata) {
9126 starget = sas_device->starget;
9127 sas_target_priv_data = starget->hostdata;
9128 sas_target_priv_data->flags &=
9129 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9130 }
9131 }
9132 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9133 if (!sas_device)
9134 return;
9135
9136
9137 if (starget)
9138 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9139
9140 sas_device_put(sas_device);
9141}
9142
9143
9144
9145
9146
9147
9148
9149static void
9150_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9151 Mpi2EventIrConfigElement_t *element)
9152{
9153 struct _sas_device *sas_device;
9154 struct scsi_target *starget = NULL;
9155 struct MPT3SAS_TARGET *sas_target_priv_data;
9156 unsigned long flags;
9157 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9158 u16 volume_handle = 0;
9159 u64 volume_wwid = 0;
9160
9161 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9162 if (volume_handle)
9163 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9164 &volume_wwid);
9165
9166 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9167 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9168 if (sas_device) {
9169 set_bit(handle, ioc->pd_handles);
9170 if (sas_device->starget && sas_device->starget->hostdata) {
9171 starget = sas_device->starget;
9172 sas_target_priv_data = starget->hostdata;
9173 sas_target_priv_data->flags |=
9174 MPT_TARGET_FLAGS_RAID_COMPONENT;
9175 sas_device->volume_handle = volume_handle;
9176 sas_device->volume_wwid = volume_wwid;
9177 }
9178 }
9179 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9180 if (!sas_device)
9181 return;
9182
9183
9184 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9185
9186 if (starget)
9187 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9188
9189 sas_device_put(sas_device);
9190}
9191
9192
9193
9194
9195
9196
9197
9198static void
9199_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9200 Mpi2EventIrConfigElement_t *element)
9201{
9202 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9203
9204 _scsih_device_remove_by_handle(ioc, handle);
9205}
9206
9207
9208
9209
9210
9211
9212
9213static void
9214_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9215 Mpi2EventIrConfigElement_t *element)
9216{
9217 struct _sas_device *sas_device;
9218 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9219 Mpi2ConfigReply_t mpi_reply;
9220 Mpi2SasDevicePage0_t sas_device_pg0;
9221 u32 ioc_status;
9222 u64 sas_address;
9223 u16 parent_handle;
9224
9225 set_bit(handle, ioc->pd_handles);
9226
9227 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9228 if (sas_device) {
9229 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9230 sas_device_put(sas_device);
9231 return;
9232 }
9233
9234 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9235 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9236 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9237 __FILE__, __LINE__, __func__);
9238 return;
9239 }
9240
9241 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9242 MPI2_IOCSTATUS_MASK;
9243 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9244 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9245 __FILE__, __LINE__, __func__);
9246 return;
9247 }
9248
9249 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9250 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9251 mpt3sas_transport_update_links(ioc, sas_address, handle,
9252 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9253 mpt3sas_get_port_by_id(ioc,
9254 sas_device_pg0.PhysicalPort, 0));
9255
9256 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9257 _scsih_add_device(ioc, handle, 0, 1);
9258}
9259
9260
9261
9262
9263
9264
9265
9266static void
9267_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9268 Mpi2EventDataIrConfigChangeList_t *event_data)
9269{
9270 Mpi2EventIrConfigElement_t *element;
9271 u8 element_type;
9272 int i;
9273 char *reason_str = NULL, *element_str = NULL;
9274
9275 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9276
9277 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9278 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9279 "foreign" : "native",
9280 event_data->NumElements);
9281 for (i = 0; i < event_data->NumElements; i++, element++) {
9282 switch (element->ReasonCode) {
9283 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9284 reason_str = "add";
9285 break;
9286 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9287 reason_str = "remove";
9288 break;
9289 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9290 reason_str = "no change";
9291 break;
9292 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9293 reason_str = "hide";
9294 break;
9295 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9296 reason_str = "unhide";
9297 break;
9298 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9299 reason_str = "volume_created";
9300 break;
9301 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9302 reason_str = "volume_deleted";
9303 break;
9304 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9305 reason_str = "pd_created";
9306 break;
9307 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9308 reason_str = "pd_deleted";
9309 break;
9310 default:
9311 reason_str = "unknown reason";
9312 break;
9313 }
9314 element_type = le16_to_cpu(element->ElementFlags) &
9315 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9316 switch (element_type) {
9317 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9318 element_str = "volume";
9319 break;
9320 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9321 element_str = "phys disk";
9322 break;
9323 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9324 element_str = "hot spare";
9325 break;
9326 default:
9327 element_str = "unknown element";
9328 break;
9329 }
9330 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9331 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9332 reason_str, le16_to_cpu(element->VolDevHandle),
9333 le16_to_cpu(element->PhysDiskDevHandle),
9334 element->PhysDiskNum);
9335 }
9336}
9337
9338
9339
9340
9341
9342
9343
9344static void
9345_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9346 struct fw_event_work *fw_event)
9347{
9348 Mpi2EventIrConfigElement_t *element;
9349 int i;
9350 u8 foreign_config;
9351 Mpi2EventDataIrConfigChangeList_t *event_data =
9352 (Mpi2EventDataIrConfigChangeList_t *)
9353 fw_event->event_data;
9354
9355 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9356 (!ioc->hide_ir_msg))
9357 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9358
9359 foreign_config = (le32_to_cpu(event_data->Flags) &
9360 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9361
9362 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9363 if (ioc->shost_recovery &&
9364 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9365 for (i = 0; i < event_data->NumElements; i++, element++) {
9366 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9367 _scsih_ir_fastpath(ioc,
9368 le16_to_cpu(element->PhysDiskDevHandle),
9369 element->PhysDiskNum);
9370 }
9371 return;
9372 }
9373
9374 for (i = 0; i < event_data->NumElements; i++, element++) {
9375
9376 switch (element->ReasonCode) {
9377 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9378 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9379 if (!foreign_config)
9380 _scsih_sas_volume_add(ioc, element);
9381 break;
9382 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9383 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9384 if (!foreign_config)
9385 _scsih_sas_volume_delete(ioc,
9386 le16_to_cpu(element->VolDevHandle));
9387 break;
9388 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9389 if (!ioc->is_warpdrive)
9390 _scsih_sas_pd_hide(ioc, element);
9391 break;
9392 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9393 if (!ioc->is_warpdrive)
9394 _scsih_sas_pd_expose(ioc, element);
9395 break;
9396 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9397 if (!ioc->is_warpdrive)
9398 _scsih_sas_pd_add(ioc, element);
9399 break;
9400 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9401 if (!ioc->is_warpdrive)
9402 _scsih_sas_pd_delete(ioc, element);
9403 break;
9404 }
9405 }
9406}
9407
9408
9409
9410
9411
9412
9413
9414static void
9415_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9416 struct fw_event_work *fw_event)
9417{
9418 u64 wwid;
9419 unsigned long flags;
9420 struct _raid_device *raid_device;
9421 u16 handle;
9422 u32 state;
9423 int rc;
9424 Mpi2EventDataIrVolume_t *event_data =
9425 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9426
9427 if (ioc->shost_recovery)
9428 return;
9429
9430 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9431 return;
9432
9433 handle = le16_to_cpu(event_data->VolDevHandle);
9434 state = le32_to_cpu(event_data->NewValue);
9435 if (!ioc->hide_ir_msg)
9436 dewtprintk(ioc,
9437 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9438 __func__, handle,
9439 le32_to_cpu(event_data->PreviousValue),
9440 state));
9441 switch (state) {
9442 case MPI2_RAID_VOL_STATE_MISSING:
9443 case MPI2_RAID_VOL_STATE_FAILED:
9444 _scsih_sas_volume_delete(ioc, handle);
9445 break;
9446
9447 case MPI2_RAID_VOL_STATE_ONLINE:
9448 case MPI2_RAID_VOL_STATE_DEGRADED:
9449 case MPI2_RAID_VOL_STATE_OPTIMAL:
9450
9451 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9452 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9453 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9454
9455 if (raid_device)
9456 break;
9457
9458 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9459 if (!wwid) {
9460 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9461 __FILE__, __LINE__, __func__);
9462 break;
9463 }
9464
9465 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9466 if (!raid_device) {
9467 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9468 __FILE__, __LINE__, __func__);
9469 break;
9470 }
9471
9472 raid_device->id = ioc->sas_id++;
9473 raid_device->channel = RAID_CHANNEL;
9474 raid_device->handle = handle;
9475 raid_device->wwid = wwid;
9476 _scsih_raid_device_add(ioc, raid_device);
9477 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9478 raid_device->id, 0);
9479 if (rc)
9480 _scsih_raid_device_remove(ioc, raid_device);
9481 break;
9482
9483 case MPI2_RAID_VOL_STATE_INITIALIZING:
9484 default:
9485 break;
9486 }
9487}
9488
9489
9490
9491
9492
9493
9494
9495static void
9496_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9497 struct fw_event_work *fw_event)
9498{
9499 u16 handle, parent_handle;
9500 u32 state;
9501 struct _sas_device *sas_device;
9502 Mpi2ConfigReply_t mpi_reply;
9503 Mpi2SasDevicePage0_t sas_device_pg0;
9504 u32 ioc_status;
9505 Mpi2EventDataIrPhysicalDisk_t *event_data =
9506 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9507 u64 sas_address;
9508
9509 if (ioc->shost_recovery)
9510 return;
9511
9512 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9513 return;
9514
9515 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9516 state = le32_to_cpu(event_data->NewValue);
9517
9518 if (!ioc->hide_ir_msg)
9519 dewtprintk(ioc,
9520 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9521 __func__, handle,
9522 le32_to_cpu(event_data->PreviousValue),
9523 state));
9524
9525 switch (state) {
9526 case MPI2_RAID_PD_STATE_ONLINE:
9527 case MPI2_RAID_PD_STATE_DEGRADED:
9528 case MPI2_RAID_PD_STATE_REBUILDING:
9529 case MPI2_RAID_PD_STATE_OPTIMAL:
9530 case MPI2_RAID_PD_STATE_HOT_SPARE:
9531
9532 if (!ioc->is_warpdrive)
9533 set_bit(handle, ioc->pd_handles);
9534
9535 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9536 if (sas_device) {
9537 sas_device_put(sas_device);
9538 return;
9539 }
9540
9541 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9542 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9543 handle))) {
9544 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9545 __FILE__, __LINE__, __func__);
9546 return;
9547 }
9548
9549 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9550 MPI2_IOCSTATUS_MASK;
9551 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9552 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9553 __FILE__, __LINE__, __func__);
9554 return;
9555 }
9556
9557 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9558 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9559 mpt3sas_transport_update_links(ioc, sas_address, handle,
9560 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9561 mpt3sas_get_port_by_id(ioc,
9562 sas_device_pg0.PhysicalPort, 0));
9563
9564 _scsih_add_device(ioc, handle, 0, 1);
9565
9566 break;
9567
9568 case MPI2_RAID_PD_STATE_OFFLINE:
9569 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9570 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9571 default:
9572 break;
9573 }
9574}
9575
9576
9577
9578
9579
9580
9581
9582static void
9583_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9584 Mpi2EventDataIrOperationStatus_t *event_data)
9585{
9586 char *reason_str = NULL;
9587
9588 switch (event_data->RAIDOperation) {
9589 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9590 reason_str = "resync";
9591 break;
9592 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9593 reason_str = "online capacity expansion";
9594 break;
9595 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9596 reason_str = "consistency check";
9597 break;
9598 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9599 reason_str = "background init";
9600 break;
9601 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9602 reason_str = "make data consistent";
9603 break;
9604 }
9605
9606 if (!reason_str)
9607 return;
9608
9609 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9610 reason_str,
9611 le16_to_cpu(event_data->VolDevHandle),
9612 event_data->PercentComplete);
9613}
9614
9615
9616
9617
9618
9619
9620
9621static void
9622_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9623 struct fw_event_work *fw_event)
9624{
9625 Mpi2EventDataIrOperationStatus_t *event_data =
9626 (Mpi2EventDataIrOperationStatus_t *)
9627 fw_event->event_data;
9628 static struct _raid_device *raid_device;
9629 unsigned long flags;
9630 u16 handle;
9631
9632 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9633 (!ioc->hide_ir_msg))
9634 _scsih_sas_ir_operation_status_event_debug(ioc,
9635 event_data);
9636
9637
9638 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9639
9640 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9641 handle = le16_to_cpu(event_data->VolDevHandle);
9642 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9643 if (raid_device)
9644 raid_device->percent_complete =
9645 event_data->PercentComplete;
9646 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9647 }
9648}
9649
9650
9651
9652
9653
9654
9655
9656
9657static void
9658_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9659{
9660 struct MPT3SAS_DEVICE *sas_device_priv_data;
9661 struct scsi_device *sdev;
9662
9663 shost_for_each_device(sdev, ioc->shost) {
9664 sas_device_priv_data = sdev->hostdata;
9665 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9666 sas_device_priv_data->sas_target->deleted = 1;
9667 }
9668}
9669
9670
9671
9672
9673
9674
9675static void
9676_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9677{
9678 struct MPT3SAS_DEVICE *sas_device_priv_data;
9679 struct MPT3SAS_TARGET *sas_target_priv_data;
9680 struct _sas_device *sas_device;
9681 struct scsi_device *sdev;
9682 u16 qdepth;
9683
9684 ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9685 shost_for_each_device(sdev, ioc->shost) {
9686 sas_device_priv_data = sdev->hostdata;
9687 if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9688 sas_target_priv_data = sas_device_priv_data->sas_target;
9689 sas_device = sas_device_priv_data->sas_target->sas_dev;
9690 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9691 qdepth = ioc->max_nvme_qd;
9692 else if (sas_device &&
9693 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9694 qdepth = (sas_device->port_type > 1) ?
9695 ioc->max_wideport_qd : ioc->max_narrowport_qd;
9696 else if (sas_device &&
9697 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9698 qdepth = ioc->max_sata_qd;
9699 else
9700 continue;
9701 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9702 }
9703 }
9704}
9705
9706
9707
9708
9709
9710
9711
9712
9713
9714static void
9715_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9716Mpi2SasDevicePage0_t *sas_device_pg0)
9717{
9718 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9719 struct scsi_target *starget;
9720 struct _sas_device *sas_device = NULL;
9721 struct _enclosure_node *enclosure_dev = NULL;
9722 unsigned long flags;
9723 struct hba_port *port = mpt3sas_get_port_by_id(
9724 ioc, sas_device_pg0->PhysicalPort, 0);
9725
9726 if (sas_device_pg0->EnclosureHandle) {
9727 enclosure_dev =
9728 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9729 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9730 if (enclosure_dev == NULL)
9731 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9732 sas_device_pg0->EnclosureHandle);
9733 }
9734 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9735 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9736 if (sas_device->sas_address != le64_to_cpu(
9737 sas_device_pg0->SASAddress))
9738 continue;
9739 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9740 continue;
9741 if (sas_device->port != port)
9742 continue;
9743 sas_device->responding = 1;
9744 starget = sas_device->starget;
9745 if (starget && starget->hostdata) {
9746 sas_target_priv_data = starget->hostdata;
9747 sas_target_priv_data->tm_busy = 0;
9748 sas_target_priv_data->deleted = 0;
9749 } else
9750 sas_target_priv_data = NULL;
9751 if (starget) {
9752 starget_printk(KERN_INFO, starget,
9753 "handle(0x%04x), sas_addr(0x%016llx)\n",
9754 le16_to_cpu(sas_device_pg0->DevHandle),
9755 (unsigned long long)
9756 sas_device->sas_address);
9757
9758 if (sas_device->enclosure_handle != 0)
9759 starget_printk(KERN_INFO, starget,
9760 "enclosure logical id(0x%016llx), slot(%d)\n",
9761 (unsigned long long)
9762 sas_device->enclosure_logical_id,
9763 sas_device->slot);
9764 }
9765 if (le16_to_cpu(sas_device_pg0->Flags) &
9766 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9767 sas_device->enclosure_level =
9768 sas_device_pg0->EnclosureLevel;
9769 memcpy(&sas_device->connector_name[0],
9770 &sas_device_pg0->ConnectorName[0], 4);
9771 } else {
9772 sas_device->enclosure_level = 0;
9773 sas_device->connector_name[0] = '\0';
9774 }
9775
9776 sas_device->enclosure_handle =
9777 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9778 sas_device->is_chassis_slot_valid = 0;
9779 if (enclosure_dev) {
9780 sas_device->enclosure_logical_id = le64_to_cpu(
9781 enclosure_dev->pg0.EnclosureLogicalID);
9782 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9783 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9784 sas_device->is_chassis_slot_valid = 1;
9785 sas_device->chassis_slot =
9786 enclosure_dev->pg0.ChassisSlot;
9787 }
9788 }
9789
9790 if (sas_device->handle == le16_to_cpu(
9791 sas_device_pg0->DevHandle))
9792 goto out;
9793 pr_info("\thandle changed from(0x%04x)!!!\n",
9794 sas_device->handle);
9795 sas_device->handle = le16_to_cpu(
9796 sas_device_pg0->DevHandle);
9797 if (sas_target_priv_data)
9798 sas_target_priv_data->handle =
9799 le16_to_cpu(sas_device_pg0->DevHandle);
9800 goto out;
9801 }
9802 out:
9803 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9804}
9805
9806
9807
9808
9809
9810
9811static void
9812_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9813{
9814 struct _enclosure_node *enclosure_dev;
9815 Mpi2ConfigReply_t mpi_reply;
9816 u16 enclosure_handle;
9817 int rc;
9818
9819
9820 mpt3sas_free_enclosure_list(ioc);
9821
9822
9823 enclosure_handle = 0xFFFF;
9824 do {
9825 enclosure_dev =
9826 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9827 if (!enclosure_dev) {
9828 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9829 __FILE__, __LINE__, __func__);
9830 return;
9831 }
9832 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9833 &enclosure_dev->pg0,
9834 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9835 enclosure_handle);
9836
9837 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9838 MPI2_IOCSTATUS_MASK)) {
9839 kfree(enclosure_dev);
9840 return;
9841 }
9842 list_add_tail(&enclosure_dev->list,
9843 &ioc->enclosure_list);
9844 enclosure_handle =
9845 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9846 } while (1);
9847}
9848
9849
9850
9851
9852
9853
9854
9855
9856static void
9857_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9858{
9859 Mpi2SasDevicePage0_t sas_device_pg0;
9860 Mpi2ConfigReply_t mpi_reply;
9861 u16 ioc_status;
9862 u16 handle;
9863 u32 device_info;
9864
9865 ioc_info(ioc, "search for end-devices: start\n");
9866
9867 if (list_empty(&ioc->sas_device_list))
9868 goto out;
9869
9870 handle = 0xFFFF;
9871 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9872 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9873 handle))) {
9874 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9875 MPI2_IOCSTATUS_MASK;
9876 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9877 break;
9878 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9879 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9880 if (!(_scsih_is_end_device(device_info)))
9881 continue;
9882 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9883 }
9884
9885 out:
9886 ioc_info(ioc, "search for end-devices: complete\n");
9887}
9888
9889
9890
9891
9892
9893
9894
9895
9896
9897static void
9898_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9899 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9900{
9901 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9902 struct scsi_target *starget;
9903 struct _pcie_device *pcie_device;
9904 unsigned long flags;
9905
9906 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9907 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9908 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9909 && (pcie_device->slot == le16_to_cpu(
9910 pcie_device_pg0->Slot))) {
9911 pcie_device->access_status =
9912 pcie_device_pg0->AccessStatus;
9913 pcie_device->responding = 1;
9914 starget = pcie_device->starget;
9915 if (starget && starget->hostdata) {
9916 sas_target_priv_data = starget->hostdata;
9917 sas_target_priv_data->tm_busy = 0;
9918 sas_target_priv_data->deleted = 0;
9919 } else
9920 sas_target_priv_data = NULL;
9921 if (starget) {
9922 starget_printk(KERN_INFO, starget,
9923 "handle(0x%04x), wwid(0x%016llx) ",
9924 pcie_device->handle,
9925 (unsigned long long)pcie_device->wwid);
9926 if (pcie_device->enclosure_handle != 0)
9927 starget_printk(KERN_INFO, starget,
9928 "enclosure logical id(0x%016llx), "
9929 "slot(%d)\n",
9930 (unsigned long long)
9931 pcie_device->enclosure_logical_id,
9932 pcie_device->slot);
9933 }
9934
9935 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9936 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9937 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9938 pcie_device->enclosure_level =
9939 pcie_device_pg0->EnclosureLevel;
9940 memcpy(&pcie_device->connector_name[0],
9941 &pcie_device_pg0->ConnectorName[0], 4);
9942 } else {
9943 pcie_device->enclosure_level = 0;
9944 pcie_device->connector_name[0] = '\0';
9945 }
9946
9947 if (pcie_device->handle == le16_to_cpu(
9948 pcie_device_pg0->DevHandle))
9949 goto out;
9950 pr_info("\thandle changed from(0x%04x)!!!\n",
9951 pcie_device->handle);
9952 pcie_device->handle = le16_to_cpu(
9953 pcie_device_pg0->DevHandle);
9954 if (sas_target_priv_data)
9955 sas_target_priv_data->handle =
9956 le16_to_cpu(pcie_device_pg0->DevHandle);
9957 goto out;
9958 }
9959 }
9960
9961 out:
9962 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9963}
9964
9965
9966
9967
9968
9969
9970
9971
9972static void
9973_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9974{
9975 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9976 Mpi2ConfigReply_t mpi_reply;
9977 u16 ioc_status;
9978 u16 handle;
9979 u32 device_info;
9980
9981 ioc_info(ioc, "search for end-devices: start\n");
9982
9983 if (list_empty(&ioc->pcie_device_list))
9984 goto out;
9985
9986 handle = 0xFFFF;
9987 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9988 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9989 handle))) {
9990 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9991 MPI2_IOCSTATUS_MASK;
9992 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9993 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9994 __func__, ioc_status,
9995 le32_to_cpu(mpi_reply.IOCLogInfo));
9996 break;
9997 }
9998 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9999 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
10000 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
10001 continue;
10002 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
10003 }
10004out:
10005 ioc_info(ioc, "search for PCIe end-devices: complete\n");
10006}
10007
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017static void
10018_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
10019 u16 handle)
10020{
10021 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10022 struct scsi_target *starget;
10023 struct _raid_device *raid_device;
10024 unsigned long flags;
10025
10026 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10027 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10028 if (raid_device->wwid == wwid && raid_device->starget) {
10029 starget = raid_device->starget;
10030 if (starget && starget->hostdata) {
10031 sas_target_priv_data = starget->hostdata;
10032 sas_target_priv_data->deleted = 0;
10033 } else
10034 sas_target_priv_data = NULL;
10035 raid_device->responding = 1;
10036 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10037 starget_printk(KERN_INFO, raid_device->starget,
10038 "handle(0x%04x), wwid(0x%016llx)\n", handle,
10039 (unsigned long long)raid_device->wwid);
10040
10041
10042
10043
10044
10045
10046 mpt3sas_init_warpdrive_properties(ioc, raid_device);
10047 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10048 if (raid_device->handle == handle) {
10049 spin_unlock_irqrestore(&ioc->raid_device_lock,
10050 flags);
10051 return;
10052 }
10053 pr_info("\thandle changed from(0x%04x)!!!\n",
10054 raid_device->handle);
10055 raid_device->handle = handle;
10056 if (sas_target_priv_data)
10057 sas_target_priv_data->handle = handle;
10058 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10059 return;
10060 }
10061 }
10062 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10063}
10064
10065
10066
10067
10068
10069
10070
10071
10072static void
10073_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10074{
10075 Mpi2RaidVolPage1_t volume_pg1;
10076 Mpi2RaidVolPage0_t volume_pg0;
10077 Mpi2RaidPhysDiskPage0_t pd_pg0;
10078 Mpi2ConfigReply_t mpi_reply;
10079 u16 ioc_status;
10080 u16 handle;
10081 u8 phys_disk_num;
10082
10083 if (!ioc->ir_firmware)
10084 return;
10085
10086 ioc_info(ioc, "search for raid volumes: start\n");
10087
10088 if (list_empty(&ioc->raid_device_list))
10089 goto out;
10090
10091 handle = 0xFFFF;
10092 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10093 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10094 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10095 MPI2_IOCSTATUS_MASK;
10096 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10097 break;
10098 handle = le16_to_cpu(volume_pg1.DevHandle);
10099
10100 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10101 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10102 sizeof(Mpi2RaidVolPage0_t)))
10103 continue;
10104
10105 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10106 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10107 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10108 _scsih_mark_responding_raid_device(ioc,
10109 le64_to_cpu(volume_pg1.WWID), handle);
10110 }
10111
10112
10113 if (!ioc->is_warpdrive) {
10114 phys_disk_num = 0xFF;
10115 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10116 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10117 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10118 phys_disk_num))) {
10119 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10120 MPI2_IOCSTATUS_MASK;
10121 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10122 break;
10123 phys_disk_num = pd_pg0.PhysDiskNum;
10124 handle = le16_to_cpu(pd_pg0.DevHandle);
10125 set_bit(handle, ioc->pd_handles);
10126 }
10127 }
10128 out:
10129 ioc_info(ioc, "search for responding raid volumes: complete\n");
10130}
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140static void
10141_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10142 Mpi2ExpanderPage0_t *expander_pg0)
10143{
10144 struct _sas_node *sas_expander = NULL;
10145 unsigned long flags;
10146 int i;
10147 struct _enclosure_node *enclosure_dev = NULL;
10148 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10149 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10150 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10151 struct hba_port *port = mpt3sas_get_port_by_id(
10152 ioc, expander_pg0->PhysicalPort, 0);
10153
10154 if (enclosure_handle)
10155 enclosure_dev =
10156 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10157 enclosure_handle);
10158
10159 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10160 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10161 if (sas_expander->sas_address != sas_address)
10162 continue;
10163 if (sas_expander->port != port)
10164 continue;
10165 sas_expander->responding = 1;
10166
10167 if (enclosure_dev) {
10168 sas_expander->enclosure_logical_id =
10169 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10170 sas_expander->enclosure_handle =
10171 le16_to_cpu(expander_pg0->EnclosureHandle);
10172 }
10173
10174 if (sas_expander->handle == handle)
10175 goto out;
10176 pr_info("\texpander(0x%016llx): handle changed" \
10177 " from(0x%04x) to (0x%04x)!!!\n",
10178 (unsigned long long)sas_expander->sas_address,
10179 sas_expander->handle, handle);
10180 sas_expander->handle = handle;
10181 for (i = 0 ; i < sas_expander->num_phys ; i++)
10182 sas_expander->phy[i].handle = handle;
10183 goto out;
10184 }
10185 out:
10186 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10187}
10188
10189
10190
10191
10192
10193
10194
10195
10196static void
10197_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10198{
10199 Mpi2ExpanderPage0_t expander_pg0;
10200 Mpi2ConfigReply_t mpi_reply;
10201 u16 ioc_status;
10202 u64 sas_address;
10203 u16 handle;
10204 u8 port;
10205
10206 ioc_info(ioc, "search for expanders: start\n");
10207
10208 if (list_empty(&ioc->sas_expander_list))
10209 goto out;
10210
10211 handle = 0xFFFF;
10212 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10213 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10214
10215 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10216 MPI2_IOCSTATUS_MASK;
10217 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10218 break;
10219
10220 handle = le16_to_cpu(expander_pg0.DevHandle);
10221 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10222 port = expander_pg0.PhysicalPort;
10223 pr_info(
10224 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10225 handle, (unsigned long long)sas_address,
10226 (ioc->multipath_on_hba ?
10227 port : MULTIPATH_DISABLED_PORT_ID));
10228 _scsih_mark_responding_expander(ioc, &expander_pg0);
10229 }
10230
10231 out:
10232 ioc_info(ioc, "search for expanders: complete\n");
10233}
10234
10235
10236
10237
10238
10239static void
10240_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10241{
10242 struct _sas_device *sas_device, *sas_device_next;
10243 struct _sas_node *sas_expander, *sas_expander_next;
10244 struct _raid_device *raid_device, *raid_device_next;
10245 struct _pcie_device *pcie_device, *pcie_device_next;
10246 struct list_head tmp_list;
10247 unsigned long flags;
10248 LIST_HEAD(head);
10249
10250 ioc_info(ioc, "removing unresponding devices: start\n");
10251
10252
10253 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10254
10255
10256
10257
10258 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10259
10260
10261
10262
10263
10264 list_for_each_entry_safe(sas_device, sas_device_next,
10265 &ioc->sas_device_init_list, list) {
10266 list_del_init(&sas_device->list);
10267 sas_device_put(sas_device);
10268 }
10269
10270 list_for_each_entry_safe(sas_device, sas_device_next,
10271 &ioc->sas_device_list, list) {
10272 if (!sas_device->responding)
10273 list_move_tail(&sas_device->list, &head);
10274 else
10275 sas_device->responding = 0;
10276 }
10277 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10278
10279
10280
10281
10282 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10283 _scsih_remove_device(ioc, sas_device);
10284 list_del_init(&sas_device->list);
10285 sas_device_put(sas_device);
10286 }
10287
10288 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10289 INIT_LIST_HEAD(&head);
10290 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10291
10292
10293
10294
10295 list_for_each_entry_safe(pcie_device, pcie_device_next,
10296 &ioc->pcie_device_init_list, list) {
10297 list_del_init(&pcie_device->list);
10298 pcie_device_put(pcie_device);
10299 }
10300
10301 list_for_each_entry_safe(pcie_device, pcie_device_next,
10302 &ioc->pcie_device_list, list) {
10303 if (!pcie_device->responding)
10304 list_move_tail(&pcie_device->list, &head);
10305 else
10306 pcie_device->responding = 0;
10307 }
10308 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10309
10310 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10311 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10312 list_del_init(&pcie_device->list);
10313 pcie_device_put(pcie_device);
10314 }
10315
10316
10317 if (ioc->ir_firmware) {
10318 ioc_info(ioc, "removing unresponding devices: volumes\n");
10319 list_for_each_entry_safe(raid_device, raid_device_next,
10320 &ioc->raid_device_list, list) {
10321 if (!raid_device->responding)
10322 _scsih_sas_volume_delete(ioc,
10323 raid_device->handle);
10324 else
10325 raid_device->responding = 0;
10326 }
10327 }
10328
10329
10330 ioc_info(ioc, "removing unresponding devices: expanders\n");
10331 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10332 INIT_LIST_HEAD(&tmp_list);
10333 list_for_each_entry_safe(sas_expander, sas_expander_next,
10334 &ioc->sas_expander_list, list) {
10335 if (!sas_expander->responding)
10336 list_move_tail(&sas_expander->list, &tmp_list);
10337 else
10338 sas_expander->responding = 0;
10339 }
10340 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10341 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10342 list) {
10343 _scsih_expander_node_remove(ioc, sas_expander);
10344 }
10345
10346 ioc_info(ioc, "removing unresponding devices: complete\n");
10347
10348
10349 _scsih_ublock_io_all_device(ioc);
10350}
10351
10352static void
10353_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10354 struct _sas_node *sas_expander, u16 handle)
10355{
10356 Mpi2ExpanderPage1_t expander_pg1;
10357 Mpi2ConfigReply_t mpi_reply;
10358 int i;
10359
10360 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10361 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10362 &expander_pg1, i, handle))) {
10363 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10364 __FILE__, __LINE__, __func__);
10365 return;
10366 }
10367
10368 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10369 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10370 expander_pg1.NegotiatedLinkRate >> 4,
10371 sas_expander->port);
10372 }
10373}
10374
10375
10376
10377
10378
10379static void
10380_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10381{
10382 Mpi2ExpanderPage0_t expander_pg0;
10383 Mpi2SasDevicePage0_t sas_device_pg0;
10384 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10385 Mpi2RaidVolPage1_t *volume_pg1;
10386 Mpi2RaidVolPage0_t *volume_pg0;
10387 Mpi2RaidPhysDiskPage0_t pd_pg0;
10388 Mpi2EventIrConfigElement_t element;
10389 Mpi2ConfigReply_t mpi_reply;
10390 u8 phys_disk_num, port_id;
10391 u16 ioc_status;
10392 u16 handle, parent_handle;
10393 u64 sas_address;
10394 struct _sas_device *sas_device;
10395 struct _pcie_device *pcie_device;
10396 struct _sas_node *expander_device;
10397 static struct _raid_device *raid_device;
10398 u8 retry_count;
10399 unsigned long flags;
10400
10401 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
10402 if (!volume_pg0)
10403 return;
10404
10405 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
10406 if (!volume_pg1) {
10407 kfree(volume_pg0);
10408 return;
10409 }
10410
10411 ioc_info(ioc, "scan devices: start\n");
10412
10413 _scsih_sas_host_refresh(ioc);
10414
10415 ioc_info(ioc, "\tscan devices: expanders start\n");
10416
10417
10418 handle = 0xFFFF;
10419 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10420 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10421 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10422 MPI2_IOCSTATUS_MASK;
10423 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10424 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10425 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10426 break;
10427 }
10428 handle = le16_to_cpu(expander_pg0.DevHandle);
10429 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10430 port_id = expander_pg0.PhysicalPort;
10431 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10432 ioc, le64_to_cpu(expander_pg0.SASAddress),
10433 mpt3sas_get_port_by_id(ioc, port_id, 0));
10434 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10435 if (expander_device)
10436 _scsih_refresh_expander_links(ioc, expander_device,
10437 handle);
10438 else {
10439 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10440 handle,
10441 (u64)le64_to_cpu(expander_pg0.SASAddress));
10442 _scsih_expander_add(ioc, handle);
10443 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10444 handle,
10445 (u64)le64_to_cpu(expander_pg0.SASAddress));
10446 }
10447 }
10448
10449 ioc_info(ioc, "\tscan devices: expanders complete\n");
10450
10451 if (!ioc->ir_firmware)
10452 goto skip_to_sas;
10453
10454 ioc_info(ioc, "\tscan devices: phys disk start\n");
10455
10456
10457 phys_disk_num = 0xFF;
10458 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10459 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10460 phys_disk_num))) {
10461 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10462 MPI2_IOCSTATUS_MASK;
10463 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10464 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10465 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10466 break;
10467 }
10468 phys_disk_num = pd_pg0.PhysDiskNum;
10469 handle = le16_to_cpu(pd_pg0.DevHandle);
10470 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10471 if (sas_device) {
10472 sas_device_put(sas_device);
10473 continue;
10474 }
10475 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10476 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10477 handle) != 0)
10478 continue;
10479 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10480 MPI2_IOCSTATUS_MASK;
10481 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10482 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10483 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10484 break;
10485 }
10486 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10487 if (!_scsih_get_sas_address(ioc, parent_handle,
10488 &sas_address)) {
10489 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10490 handle,
10491 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10492 port_id = sas_device_pg0.PhysicalPort;
10493 mpt3sas_transport_update_links(ioc, sas_address,
10494 handle, sas_device_pg0.PhyNum,
10495 MPI2_SAS_NEG_LINK_RATE_1_5,
10496 mpt3sas_get_port_by_id(ioc, port_id, 0));
10497 set_bit(handle, ioc->pd_handles);
10498 retry_count = 0;
10499
10500
10501
10502
10503 while (_scsih_add_device(ioc, handle, retry_count++,
10504 1)) {
10505 ssleep(1);
10506 }
10507 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10508 handle,
10509 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10510 }
10511 }
10512
10513 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10514
10515 ioc_info(ioc, "\tscan devices: volumes start\n");
10516
10517
10518 handle = 0xFFFF;
10519 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10520 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10521 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10522 MPI2_IOCSTATUS_MASK;
10523 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10524 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10525 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10526 break;
10527 }
10528 handle = le16_to_cpu(volume_pg1->DevHandle);
10529 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10530 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10531 le64_to_cpu(volume_pg1->WWID));
10532 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10533 if (raid_device)
10534 continue;
10535 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10536 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10537 sizeof(Mpi2RaidVolPage0_t)))
10538 continue;
10539 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10540 MPI2_IOCSTATUS_MASK;
10541 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10542 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10543 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10544 break;
10545 }
10546 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10547 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10548 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10549 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10550 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10551 element.VolDevHandle = volume_pg1->DevHandle;
10552 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10553 volume_pg1->DevHandle);
10554 _scsih_sas_volume_add(ioc, &element);
10555 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10556 volume_pg1->DevHandle);
10557 }
10558 }
10559
10560 ioc_info(ioc, "\tscan devices: volumes complete\n");
10561
10562 skip_to_sas:
10563
10564 ioc_info(ioc, "\tscan devices: end devices start\n");
10565
10566
10567 handle = 0xFFFF;
10568 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10569 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10570 handle))) {
10571 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10572 MPI2_IOCSTATUS_MASK;
10573 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10574 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10575 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10576 break;
10577 }
10578 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10579 if (!(_scsih_is_end_device(
10580 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10581 continue;
10582 port_id = sas_device_pg0.PhysicalPort;
10583 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10584 le64_to_cpu(sas_device_pg0.SASAddress),
10585 mpt3sas_get_port_by_id(ioc, port_id, 0));
10586 if (sas_device) {
10587 sas_device_put(sas_device);
10588 continue;
10589 }
10590 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10591 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10592 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10593 handle,
10594 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10595 mpt3sas_transport_update_links(ioc, sas_address, handle,
10596 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10597 mpt3sas_get_port_by_id(ioc, port_id, 0));
10598 retry_count = 0;
10599
10600
10601
10602
10603 while (_scsih_add_device(ioc, handle, retry_count++,
10604 0)) {
10605 ssleep(1);
10606 }
10607 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10608 handle,
10609 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10610 }
10611 }
10612 ioc_info(ioc, "\tscan devices: end devices complete\n");
10613 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10614
10615
10616 handle = 0xFFFF;
10617 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10618 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10619 handle))) {
10620 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10621 & MPI2_IOCSTATUS_MASK;
10622 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10623 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10624 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10625 break;
10626 }
10627 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10628 if (!(_scsih_is_nvme_pciescsi_device(
10629 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10630 continue;
10631 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10632 le64_to_cpu(pcie_device_pg0.WWID));
10633 if (pcie_device) {
10634 pcie_device_put(pcie_device);
10635 continue;
10636 }
10637 retry_count = 0;
10638 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10639 _scsih_pcie_add_device(ioc, handle);
10640
10641 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10642 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10643 }
10644
10645 kfree(volume_pg0);
10646 kfree(volume_pg1);
10647
10648 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10649 ioc_info(ioc, "scan devices: complete\n");
10650}
10651
10652
10653
10654
10655
10656
10657
10658void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10659{
10660 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10661}
10662
10663
10664
10665
10666
10667
10668
10669
10670void
10671mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10672{
10673 dtmprintk(ioc,
10674 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10675 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10676 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10677 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10678 complete(&ioc->scsih_cmds.done);
10679 }
10680 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10681 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10682 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10683 complete(&ioc->tm_cmds.done);
10684 }
10685
10686 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10687 memset(ioc->device_remove_in_progress, 0,
10688 ioc->device_remove_in_progress_sz);
10689 _scsih_fw_event_cleanup_queue(ioc);
10690 _scsih_flush_running_cmds(ioc);
10691}
10692
10693
10694
10695
10696
10697
10698
10699void
10700mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10701{
10702 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10703 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10704 if (ioc->multipath_on_hba) {
10705 _scsih_sas_port_refresh(ioc);
10706 _scsih_update_vphys_after_reset(ioc);
10707 }
10708 _scsih_prep_device_scan(ioc);
10709 _scsih_create_enclosure_list_after_reset(ioc);
10710 _scsih_search_responding_sas_devices(ioc);
10711 _scsih_search_responding_pcie_devices(ioc);
10712 _scsih_search_responding_raid_devices(ioc);
10713 _scsih_search_responding_expanders(ioc);
10714 _scsih_error_recovery_delete_devices(ioc);
10715 }
10716}
10717
10718
10719
10720
10721
10722
10723
10724static void
10725_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10726{
10727 ioc->current_event = fw_event;
10728 _scsih_fw_event_del_from_list(ioc, fw_event);
10729
10730
10731 if (ioc->remove_host || ioc->pci_error_recovery) {
10732 fw_event_work_put(fw_event);
10733 ioc->current_event = NULL;
10734 return;
10735 }
10736
10737 switch (fw_event->event) {
10738 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10739 mpt3sas_process_trigger_data(ioc,
10740 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10741 fw_event->event_data);
10742 break;
10743 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10744 while (scsi_host_in_recovery(ioc->shost) ||
10745 ioc->shost_recovery) {
10746
10747
10748
10749
10750 if (ioc->remove_host || ioc->fw_events_cleanup)
10751 goto out;
10752 ssleep(1);
10753 }
10754 _scsih_remove_unresponding_devices(ioc);
10755 _scsih_del_dirty_vphy(ioc);
10756 _scsih_del_dirty_port_entries(ioc);
10757 if (ioc->is_gen35_ioc)
10758 _scsih_update_device_qdepth(ioc);
10759 _scsih_scan_for_devices_after_reset(ioc);
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770 if (ioc->is_driver_loading)
10771 _scsih_complete_devices_scanning(ioc);
10772 _scsih_set_nvme_max_shutdown_latency(ioc);
10773 break;
10774 case MPT3SAS_PORT_ENABLE_COMPLETE:
10775 ioc->start_scan = 0;
10776 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10777 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10778 missing_delay[1]);
10779 dewtprintk(ioc,
10780 ioc_info(ioc, "port enable: complete from worker thread\n"));
10781 break;
10782 case MPT3SAS_TURN_ON_PFA_LED:
10783 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10784 break;
10785 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10786 _scsih_sas_topology_change_event(ioc, fw_event);
10787 break;
10788 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10789 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10790 _scsih_sas_device_status_change_event_debug(ioc,
10791 (Mpi2EventDataSasDeviceStatusChange_t *)
10792 fw_event->event_data);
10793 break;
10794 case MPI2_EVENT_SAS_DISCOVERY:
10795 _scsih_sas_discovery_event(ioc, fw_event);
10796 break;
10797 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10798 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10799 break;
10800 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10801 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10802 break;
10803 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10804 _scsih_sas_enclosure_dev_status_change_event(ioc,
10805 fw_event);
10806 break;
10807 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10808 _scsih_sas_ir_config_change_event(ioc, fw_event);
10809 break;
10810 case MPI2_EVENT_IR_VOLUME:
10811 _scsih_sas_ir_volume_event(ioc, fw_event);
10812 break;
10813 case MPI2_EVENT_IR_PHYSICAL_DISK:
10814 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10815 break;
10816 case MPI2_EVENT_IR_OPERATION_STATUS:
10817 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10818 break;
10819 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10820 _scsih_pcie_device_status_change_event(ioc, fw_event);
10821 break;
10822 case MPI2_EVENT_PCIE_ENUMERATION:
10823 _scsih_pcie_enumeration_event(ioc, fw_event);
10824 break;
10825 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10826 _scsih_pcie_topology_change_event(ioc, fw_event);
10827 ioc->current_event = NULL;
10828 return;
10829 }
10830out:
10831 fw_event_work_put(fw_event);
10832 ioc->current_event = NULL;
10833}
10834
10835
10836
10837
10838
10839
10840
10841
10842
10843static void
10844_firmware_event_work(struct work_struct *work)
10845{
10846 struct fw_event_work *fw_event = container_of(work,
10847 struct fw_event_work, work);
10848
10849 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10850}
10851
10852
10853
10854
10855
10856
10857
10858
10859
10860
10861
10862
10863
10864
10865u8
10866mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10867 u32 reply)
10868{
10869 struct fw_event_work *fw_event;
10870 Mpi2EventNotificationReply_t *mpi_reply;
10871 u16 event;
10872 u16 sz;
10873 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10874
10875
10876 if (ioc->pci_error_recovery)
10877 return 1;
10878
10879 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10880
10881 if (unlikely(!mpi_reply)) {
10882 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10883 __FILE__, __LINE__, __func__);
10884 return 1;
10885 }
10886
10887 event = le16_to_cpu(mpi_reply->Event);
10888
10889 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10890 mpt3sas_trigger_event(ioc, event, 0);
10891
10892 switch (event) {
10893
10894 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10895 {
10896 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10897 (Mpi2EventDataSasBroadcastPrimitive_t *)
10898 mpi_reply->EventData;
10899
10900 if (baen_data->Primitive !=
10901 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10902 return 1;
10903
10904 if (ioc->broadcast_aen_busy) {
10905 ioc->broadcast_aen_pending++;
10906 return 1;
10907 } else
10908 ioc->broadcast_aen_busy = 1;
10909 break;
10910 }
10911
10912 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10913 _scsih_check_topo_delete_events(ioc,
10914 (Mpi2EventDataSasTopologyChangeList_t *)
10915 mpi_reply->EventData);
10916
10917
10918
10919
10920
10921
10922
10923
10924 if (ioc->shost_recovery)
10925 return 1;
10926 break;
10927 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10928 _scsih_check_pcie_topo_remove_events(ioc,
10929 (Mpi26EventDataPCIeTopologyChangeList_t *)
10930 mpi_reply->EventData);
10931 if (ioc->shost_recovery)
10932 return 1;
10933 break;
10934 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10935 _scsih_check_ir_config_unhide_events(ioc,
10936 (Mpi2EventDataIrConfigChangeList_t *)
10937 mpi_reply->EventData);
10938 break;
10939 case MPI2_EVENT_IR_VOLUME:
10940 _scsih_check_volume_delete_events(ioc,
10941 (Mpi2EventDataIrVolume_t *)
10942 mpi_reply->EventData);
10943 break;
10944 case MPI2_EVENT_LOG_ENTRY_ADDED:
10945 {
10946 Mpi2EventDataLogEntryAdded_t *log_entry;
10947 u32 *log_code;
10948
10949 if (!ioc->is_warpdrive)
10950 break;
10951
10952 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10953 mpi_reply->EventData;
10954 log_code = (u32 *)log_entry->LogData;
10955
10956 if (le16_to_cpu(log_entry->LogEntryQualifier)
10957 != MPT2_WARPDRIVE_LOGENTRY)
10958 break;
10959
10960 switch (le32_to_cpu(*log_code)) {
10961 case MPT2_WARPDRIVE_LC_SSDT:
10962 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10963 break;
10964 case MPT2_WARPDRIVE_LC_SSDLW:
10965 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10966 break;
10967 case MPT2_WARPDRIVE_LC_SSDLF:
10968 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10969 break;
10970 case MPT2_WARPDRIVE_LC_BRMF:
10971 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10972 break;
10973 }
10974
10975 break;
10976 }
10977 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10978 _scsih_sas_device_status_change_event(ioc,
10979 (Mpi2EventDataSasDeviceStatusChange_t *)
10980 mpi_reply->EventData);
10981 break;
10982 case MPI2_EVENT_IR_OPERATION_STATUS:
10983 case MPI2_EVENT_SAS_DISCOVERY:
10984 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10985 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10986 case MPI2_EVENT_IR_PHYSICAL_DISK:
10987 case MPI2_EVENT_PCIE_ENUMERATION:
10988 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10989 break;
10990
10991 case MPI2_EVENT_TEMP_THRESHOLD:
10992 _scsih_temp_threshold_events(ioc,
10993 (Mpi2EventDataTemperature_t *)
10994 mpi_reply->EventData);
10995 break;
10996 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10997 ActiveCableEventData =
10998 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10999 switch (ActiveCableEventData->ReasonCode) {
11000 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
11001 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
11002 ActiveCableEventData->ReceptacleID);
11003 pr_notice("cannot be powered and devices connected\n");
11004 pr_notice("to this active cable will not be seen\n");
11005 pr_notice("This active cable requires %d mW of power\n",
11006 le32_to_cpu(
11007 ActiveCableEventData->ActiveCablePowerRequirement));
11008 break;
11009
11010 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
11011 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
11012 ActiveCableEventData->ReceptacleID);
11013 pr_notice(
11014 "is not running at optimal speed(12 Gb/s rate)\n");
11015 break;
11016 }
11017
11018 break;
11019
11020 default:
11021 return 1;
11022 }
11023
11024 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
11025 fw_event = alloc_fw_event_work(sz);
11026 if (!fw_event) {
11027 ioc_err(ioc, "failure at %s:%d/%s()!\n",
11028 __FILE__, __LINE__, __func__);
11029 return 1;
11030 }
11031
11032 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11033 fw_event->ioc = ioc;
11034 fw_event->VF_ID = mpi_reply->VF_ID;
11035 fw_event->VP_ID = mpi_reply->VP_ID;
11036 fw_event->event = event;
11037 _scsih_fw_event_add(ioc, fw_event);
11038 fw_event_work_put(fw_event);
11039 return 1;
11040}
11041
11042
11043
11044
11045
11046
11047
11048
11049
11050static void
11051_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11052 struct _sas_node *sas_expander)
11053{
11054 struct _sas_port *mpt3sas_port, *next;
11055 unsigned long flags;
11056
11057
11058 list_for_each_entry_safe(mpt3sas_port, next,
11059 &sas_expander->sas_port_list, port_list) {
11060 if (ioc->shost_recovery)
11061 return;
11062 if (mpt3sas_port->remote_identify.device_type ==
11063 SAS_END_DEVICE)
11064 mpt3sas_device_remove_by_sas_address(ioc,
11065 mpt3sas_port->remote_identify.sas_address,
11066 mpt3sas_port->hba_port);
11067 else if (mpt3sas_port->remote_identify.device_type ==
11068 SAS_EDGE_EXPANDER_DEVICE ||
11069 mpt3sas_port->remote_identify.device_type ==
11070 SAS_FANOUT_EXPANDER_DEVICE)
11071 mpt3sas_expander_remove(ioc,
11072 mpt3sas_port->remote_identify.sas_address,
11073 mpt3sas_port->hba_port);
11074 }
11075
11076 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
11077 sas_expander->sas_address_parent, sas_expander->port);
11078
11079 ioc_info(ioc,
11080 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11081 sas_expander->handle, (unsigned long long)
11082 sas_expander->sas_address,
11083 sas_expander->port->port_id);
11084
11085 spin_lock_irqsave(&ioc->sas_node_lock, flags);
11086 list_del(&sas_expander->list);
11087 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11088
11089 kfree(sas_expander->phy);
11090 kfree(sas_expander);
11091}
11092
11093
11094
11095
11096
11097
11098
11099
11100
11101static void
11102_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11103{
11104 Mpi26IoUnitControlRequest_t *mpi_request;
11105 Mpi26IoUnitControlReply_t *mpi_reply;
11106 u16 smid;
11107
11108
11109 if (list_empty(&ioc->pcie_device_list))
11110 return;
11111
11112 mutex_lock(&ioc->scsih_cmds.mutex);
11113
11114 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11115 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11116 goto out;
11117 }
11118
11119 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11120
11121 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11122 if (!smid) {
11123 ioc_err(ioc,
11124 "%s: failed obtaining a smid\n", __func__);
11125 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11126 goto out;
11127 }
11128
11129 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11130 ioc->scsih_cmds.smid = smid;
11131 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11132 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11133 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11134
11135 init_completion(&ioc->scsih_cmds.done);
11136 ioc->put_smid_default(ioc, smid);
11137
11138 ioc_info(ioc,
11139 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11140 ioc->max_shutdown_latency);
11141 wait_for_completion_timeout(&ioc->scsih_cmds.done,
11142 ioc->max_shutdown_latency*HZ);
11143
11144 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11145 ioc_err(ioc, "%s: timeout\n", __func__);
11146 goto out;
11147 }
11148
11149 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11150 mpi_reply = ioc->scsih_cmds.reply;
11151 ioc_info(ioc, "Io Unit Control shutdown (complete):"
11152 "ioc_status(0x%04x), loginfo(0x%08x)\n",
11153 le16_to_cpu(mpi_reply->IOCStatus),
11154 le32_to_cpu(mpi_reply->IOCLogInfo));
11155 }
11156 out:
11157 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11158 mutex_unlock(&ioc->scsih_cmds.mutex);
11159}
11160
11161
11162
11163
11164
11165
11166
11167
11168
11169static void
11170_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11171{
11172 Mpi2RaidActionRequest_t *mpi_request;
11173 Mpi2RaidActionReply_t *mpi_reply;
11174 u16 smid;
11175
11176
11177 if (!ioc->ir_firmware)
11178 return;
11179
11180
11181 if (list_empty(&ioc->raid_device_list))
11182 return;
11183
11184 mutex_lock(&ioc->scsih_cmds.mutex);
11185
11186 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11187 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11188 goto out;
11189 }
11190 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11191
11192 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11193 if (!smid) {
11194 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11195 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11196 goto out;
11197 }
11198
11199 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11200 ioc->scsih_cmds.smid = smid;
11201 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11202
11203 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11204 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11205
11206 if (!ioc->hide_ir_msg)
11207 ioc_info(ioc, "IR shutdown (sending)\n");
11208 init_completion(&ioc->scsih_cmds.done);
11209 ioc->put_smid_default(ioc, smid);
11210 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11211
11212 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11213 ioc_err(ioc, "%s: timeout\n", __func__);
11214 goto out;
11215 }
11216
11217 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11218 mpi_reply = ioc->scsih_cmds.reply;
11219 if (!ioc->hide_ir_msg)
11220 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11221 le16_to_cpu(mpi_reply->IOCStatus),
11222 le32_to_cpu(mpi_reply->IOCLogInfo));
11223 }
11224
11225 out:
11226 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11227 mutex_unlock(&ioc->scsih_cmds.mutex);
11228}
11229
11230
11231
11232
11233
11234
11235
11236
11237
11238
11239static int
11240_scsih_get_shost_and_ioc(struct pci_dev *pdev,
11241 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11242{
11243 *shost = pci_get_drvdata(pdev);
11244 if (*shost == NULL) {
11245 dev_err(&pdev->dev, "pdev's driver data is null\n");
11246 return -ENXIO;
11247 }
11248
11249 *ioc = shost_priv(*shost);
11250 if (*ioc == NULL) {
11251 dev_err(&pdev->dev, "shost's private data is null\n");
11252 return -ENXIO;
11253 }
11254
11255 return 0;
11256}
11257
11258
11259
11260
11261
11262
11263
11264static void scsih_remove(struct pci_dev *pdev)
11265{
11266 struct Scsi_Host *shost;
11267 struct MPT3SAS_ADAPTER *ioc;
11268 struct _sas_port *mpt3sas_port, *next_port;
11269 struct _raid_device *raid_device, *next;
11270 struct MPT3SAS_TARGET *sas_target_priv_data;
11271 struct _pcie_device *pcie_device, *pcienext;
11272 struct workqueue_struct *wq;
11273 unsigned long flags;
11274 Mpi2ConfigReply_t mpi_reply;
11275 struct hba_port *port, *port_next;
11276
11277 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11278 return;
11279
11280 ioc->remove_host = 1;
11281
11282 if (!pci_device_is_present(pdev))
11283 _scsih_flush_running_cmds(ioc);
11284
11285 _scsih_fw_event_cleanup_queue(ioc);
11286
11287 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11288 wq = ioc->firmware_event_thread;
11289 ioc->firmware_event_thread = NULL;
11290 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11291 if (wq)
11292 destroy_workqueue(wq);
11293
11294
11295
11296
11297 if (ioc->is_aero_ioc)
11298 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11299 &ioc->ioc_pg1_copy);
11300
11301 _scsih_ir_shutdown(ioc);
11302 mpt3sas_destroy_debugfs(ioc);
11303 sas_remove_host(shost);
11304 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11305 list) {
11306 if (raid_device->starget) {
11307 sas_target_priv_data =
11308 raid_device->starget->hostdata;
11309 sas_target_priv_data->deleted = 1;
11310 scsi_remove_target(&raid_device->starget->dev);
11311 }
11312 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11313 raid_device->handle, (u64)raid_device->wwid);
11314 _scsih_raid_device_remove(ioc, raid_device);
11315 }
11316 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11317 list) {
11318 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11319 list_del_init(&pcie_device->list);
11320 pcie_device_put(pcie_device);
11321 }
11322
11323
11324 list_for_each_entry_safe(mpt3sas_port, next_port,
11325 &ioc->sas_hba.sas_port_list, port_list) {
11326 if (mpt3sas_port->remote_identify.device_type ==
11327 SAS_END_DEVICE)
11328 mpt3sas_device_remove_by_sas_address(ioc,
11329 mpt3sas_port->remote_identify.sas_address,
11330 mpt3sas_port->hba_port);
11331 else if (mpt3sas_port->remote_identify.device_type ==
11332 SAS_EDGE_EXPANDER_DEVICE ||
11333 mpt3sas_port->remote_identify.device_type ==
11334 SAS_FANOUT_EXPANDER_DEVICE)
11335 mpt3sas_expander_remove(ioc,
11336 mpt3sas_port->remote_identify.sas_address,
11337 mpt3sas_port->hba_port);
11338 }
11339
11340 list_for_each_entry_safe(port, port_next,
11341 &ioc->port_table_list, list) {
11342 list_del(&port->list);
11343 kfree(port);
11344 }
11345
11346
11347 if (ioc->sas_hba.num_phys) {
11348 kfree(ioc->sas_hba.phy);
11349 ioc->sas_hba.phy = NULL;
11350 ioc->sas_hba.num_phys = 0;
11351 }
11352
11353 mpt3sas_base_detach(ioc);
11354 spin_lock(&gioc_lock);
11355 list_del(&ioc->list);
11356 spin_unlock(&gioc_lock);
11357 scsi_host_put(shost);
11358}
11359
11360
11361
11362
11363
11364static void
11365scsih_shutdown(struct pci_dev *pdev)
11366{
11367 struct Scsi_Host *shost;
11368 struct MPT3SAS_ADAPTER *ioc;
11369 struct workqueue_struct *wq;
11370 unsigned long flags;
11371 Mpi2ConfigReply_t mpi_reply;
11372
11373 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11374 return;
11375
11376 ioc->remove_host = 1;
11377
11378 if (!pci_device_is_present(pdev))
11379 _scsih_flush_running_cmds(ioc);
11380
11381 _scsih_fw_event_cleanup_queue(ioc);
11382
11383 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11384 wq = ioc->firmware_event_thread;
11385 ioc->firmware_event_thread = NULL;
11386 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11387 if (wq)
11388 destroy_workqueue(wq);
11389
11390
11391
11392
11393 if (ioc->is_aero_ioc)
11394 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11395 &ioc->ioc_pg1_copy);
11396
11397 _scsih_ir_shutdown(ioc);
11398 _scsih_nvme_shutdown(ioc);
11399 mpt3sas_base_mask_interrupts(ioc);
11400 ioc->shost_recovery = 1;
11401 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11402 ioc->shost_recovery = 0;
11403 mpt3sas_base_free_irq(ioc);
11404 mpt3sas_base_disable_msix(ioc);
11405}
11406
11407
11408
11409
11410
11411
11412
11413
11414
11415
11416static void
11417_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11418{
11419 u32 channel;
11420 void *device;
11421 struct _sas_device *sas_device;
11422 struct _raid_device *raid_device;
11423 struct _pcie_device *pcie_device;
11424 u16 handle;
11425 u64 sas_address_parent;
11426 u64 sas_address;
11427 unsigned long flags;
11428 int rc;
11429 int tid;
11430 struct hba_port *port;
11431
11432
11433 if (!ioc->bios_pg3.BiosVersion)
11434 return;
11435
11436 device = NULL;
11437 if (ioc->req_boot_device.device) {
11438 device = ioc->req_boot_device.device;
11439 channel = ioc->req_boot_device.channel;
11440 } else if (ioc->req_alt_boot_device.device) {
11441 device = ioc->req_alt_boot_device.device;
11442 channel = ioc->req_alt_boot_device.channel;
11443 } else if (ioc->current_boot_device.device) {
11444 device = ioc->current_boot_device.device;
11445 channel = ioc->current_boot_device.channel;
11446 }
11447
11448 if (!device)
11449 return;
11450
11451 if (channel == RAID_CHANNEL) {
11452 raid_device = device;
11453
11454
11455
11456
11457
11458 if (raid_device->starget)
11459 return;
11460 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11461 raid_device->id, 0);
11462 if (rc)
11463 _scsih_raid_device_remove(ioc, raid_device);
11464 } else if (channel == PCIE_CHANNEL) {
11465 pcie_device = device;
11466
11467
11468
11469
11470
11471 if (pcie_device->starget)
11472 return;
11473 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11474 tid = pcie_device->id;
11475 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11476 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11477 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11478 if (rc)
11479 _scsih_pcie_device_remove(ioc, pcie_device);
11480 } else {
11481 sas_device = device;
11482
11483
11484
11485
11486
11487 if (sas_device->starget)
11488 return;
11489 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11490 handle = sas_device->handle;
11491 sas_address_parent = sas_device->sas_address_parent;
11492 sas_address = sas_device->sas_address;
11493 port = sas_device->port;
11494 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11495 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11496
11497 if (ioc->hide_drives)
11498 return;
11499
11500 if (!port)
11501 return;
11502
11503 if (!mpt3sas_transport_port_add(ioc, handle,
11504 sas_address_parent, port)) {
11505 _scsih_sas_device_remove(ioc, sas_device);
11506 } else if (!sas_device->starget) {
11507 if (!ioc->is_driver_loading) {
11508 mpt3sas_transport_port_remove(ioc,
11509 sas_address,
11510 sas_address_parent, port);
11511 _scsih_sas_device_remove(ioc, sas_device);
11512 }
11513 }
11514 }
11515}
11516
11517
11518
11519
11520
11521
11522
11523static void
11524_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11525{
11526 struct _raid_device *raid_device, *raid_next;
11527 int rc;
11528
11529 list_for_each_entry_safe(raid_device, raid_next,
11530 &ioc->raid_device_list, list) {
11531 if (raid_device->starget)
11532 continue;
11533 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11534 raid_device->id, 0);
11535 if (rc)
11536 _scsih_raid_device_remove(ioc, raid_device);
11537 }
11538}
11539
11540static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11541{
11542 struct _sas_device *sas_device = NULL;
11543 unsigned long flags;
11544
11545 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11546 if (!list_empty(&ioc->sas_device_init_list)) {
11547 sas_device = list_first_entry(&ioc->sas_device_init_list,
11548 struct _sas_device, list);
11549 sas_device_get(sas_device);
11550 }
11551 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11552
11553 return sas_device;
11554}
11555
11556static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11557 struct _sas_device *sas_device)
11558{
11559 unsigned long flags;
11560
11561 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11562
11563
11564
11565
11566
11567
11568
11569
11570
11571 if (!list_empty(&sas_device->list)) {
11572 list_del_init(&sas_device->list);
11573 sas_device_put(sas_device);
11574 }
11575
11576 sas_device_get(sas_device);
11577 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11578
11579 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11580}
11581
11582
11583
11584
11585
11586
11587
11588static void
11589_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11590{
11591 struct _sas_device *sas_device;
11592
11593 if (ioc->hide_drives)
11594 return;
11595
11596 while ((sas_device = get_next_sas_device(ioc))) {
11597 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11598 sas_device->sas_address_parent, sas_device->port)) {
11599 _scsih_sas_device_remove(ioc, sas_device);
11600 sas_device_put(sas_device);
11601 continue;
11602 } else if (!sas_device->starget) {
11603
11604
11605
11606
11607
11608
11609 if (!ioc->is_driver_loading) {
11610 mpt3sas_transport_port_remove(ioc,
11611 sas_device->sas_address,
11612 sas_device->sas_address_parent,
11613 sas_device->port);
11614 _scsih_sas_device_remove(ioc, sas_device);
11615 sas_device_put(sas_device);
11616 continue;
11617 }
11618 }
11619 sas_device_make_active(ioc, sas_device);
11620 sas_device_put(sas_device);
11621 }
11622}
11623
11624
11625
11626
11627
11628
11629
11630
11631
11632
11633static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11634{
11635 struct _pcie_device *pcie_device = NULL;
11636 unsigned long flags;
11637
11638 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11639 if (!list_empty(&ioc->pcie_device_init_list)) {
11640 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11641 struct _pcie_device, list);
11642 pcie_device_get(pcie_device);
11643 }
11644 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11645
11646 return pcie_device;
11647}
11648
11649
11650
11651
11652
11653
11654
11655
11656
11657static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11658 struct _pcie_device *pcie_device)
11659{
11660 unsigned long flags;
11661
11662 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11663
11664 if (!list_empty(&pcie_device->list)) {
11665 list_del_init(&pcie_device->list);
11666 pcie_device_put(pcie_device);
11667 }
11668 pcie_device_get(pcie_device);
11669 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11670
11671 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11672}
11673
11674
11675
11676
11677
11678
11679
11680static void
11681_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11682{
11683 struct _pcie_device *pcie_device;
11684 int rc;
11685
11686
11687 while ((pcie_device = get_next_pcie_device(ioc))) {
11688 if (pcie_device->starget) {
11689 pcie_device_put(pcie_device);
11690 continue;
11691 }
11692 if (pcie_device->access_status ==
11693 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11694 pcie_device_make_active(ioc, pcie_device);
11695 pcie_device_put(pcie_device);
11696 continue;
11697 }
11698 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11699 pcie_device->id, 0);
11700 if (rc) {
11701 _scsih_pcie_device_remove(ioc, pcie_device);
11702 pcie_device_put(pcie_device);
11703 continue;
11704 } else if (!pcie_device->starget) {
11705
11706
11707
11708
11709
11710
11711 if (!ioc->is_driver_loading) {
11712
11713
11714
11715 _scsih_pcie_device_remove(ioc, pcie_device);
11716 pcie_device_put(pcie_device);
11717 continue;
11718 }
11719 }
11720 pcie_device_make_active(ioc, pcie_device);
11721 pcie_device_put(pcie_device);
11722 }
11723}
11724
11725
11726
11727
11728
11729
11730
11731static void
11732_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11733{
11734 u16 volume_mapping_flags;
11735
11736 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11737 return;
11738
11739 _scsih_probe_boot_devices(ioc);
11740
11741 if (ioc->ir_firmware) {
11742 volume_mapping_flags =
11743 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11744 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11745 if (volume_mapping_flags ==
11746 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11747 _scsih_probe_raid(ioc);
11748 _scsih_probe_sas(ioc);
11749 } else {
11750 _scsih_probe_sas(ioc);
11751 _scsih_probe_raid(ioc);
11752 }
11753 } else {
11754 _scsih_probe_sas(ioc);
11755 _scsih_probe_pcie(ioc);
11756 }
11757}
11758
11759
11760
11761
11762
11763
11764
11765
11766
11767static void
11768scsih_scan_start(struct Scsi_Host *shost)
11769{
11770 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11771 int rc;
11772 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11773 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11774 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11775 mpt3sas_enable_diag_buffer(ioc, 1);
11776
11777 if (disable_discovery > 0)
11778 return;
11779
11780 ioc->start_scan = 1;
11781 rc = mpt3sas_port_enable(ioc);
11782
11783 if (rc != 0)
11784 ioc_info(ioc, "port enable: FAILED\n");
11785}
11786
11787
11788
11789
11790
11791
11792
11793
11794static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11795{
11796
11797 if (ioc->wait_for_discovery_to_complete) {
11798 ioc->wait_for_discovery_to_complete = 0;
11799 _scsih_probe_devices(ioc);
11800 }
11801
11802 mpt3sas_base_start_watchdog(ioc);
11803 ioc->is_driver_loading = 0;
11804}
11805
11806
11807
11808
11809
11810
11811
11812
11813
11814
11815static int
11816scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11817{
11818 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11819 u32 ioc_state;
11820 int issue_hard_reset = 0;
11821
11822 if (disable_discovery > 0) {
11823 ioc->is_driver_loading = 0;
11824 ioc->wait_for_discovery_to_complete = 0;
11825 return 1;
11826 }
11827
11828 if (time >= (300 * HZ)) {
11829 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11830 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11831 ioc->is_driver_loading = 0;
11832 return 1;
11833 }
11834
11835 if (ioc->start_scan) {
11836 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11837 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11838 mpt3sas_print_fault_code(ioc, ioc_state &
11839 MPI2_DOORBELL_DATA_MASK);
11840 issue_hard_reset = 1;
11841 goto out;
11842 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11843 MPI2_IOC_STATE_COREDUMP) {
11844 mpt3sas_base_coredump_info(ioc, ioc_state &
11845 MPI2_DOORBELL_DATA_MASK);
11846 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11847 issue_hard_reset = 1;
11848 goto out;
11849 }
11850 return 0;
11851 }
11852
11853 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11854 ioc_info(ioc,
11855 "port enable: aborted due to diag reset\n");
11856 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11857 goto out;
11858 }
11859 if (ioc->start_scan_failed) {
11860 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11861 ioc->start_scan_failed);
11862 ioc->is_driver_loading = 0;
11863 ioc->wait_for_discovery_to_complete = 0;
11864 ioc->remove_host = 1;
11865 return 1;
11866 }
11867
11868 ioc_info(ioc, "port enable: SUCCESS\n");
11869 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11870 _scsih_complete_devices_scanning(ioc);
11871
11872out:
11873 if (issue_hard_reset) {
11874 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11875 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11876 ioc->is_driver_loading = 0;
11877 }
11878 return 1;
11879}
11880
11881
11882
11883
11884
11885static int scsih_map_queues(struct Scsi_Host *shost)
11886{
11887 struct MPT3SAS_ADAPTER *ioc =
11888 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11889
11890 if (ioc->shost->nr_hw_queues == 1)
11891 return 0;
11892
11893 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
11894 ioc->pdev, ioc->high_iops_queues);
11895}
11896
11897
11898static struct scsi_host_template mpt2sas_driver_template = {
11899 .module = THIS_MODULE,
11900 .name = "Fusion MPT SAS Host",
11901 .proc_name = MPT2SAS_DRIVER_NAME,
11902 .queuecommand = scsih_qcmd,
11903 .target_alloc = scsih_target_alloc,
11904 .slave_alloc = scsih_slave_alloc,
11905 .slave_configure = scsih_slave_configure,
11906 .target_destroy = scsih_target_destroy,
11907 .slave_destroy = scsih_slave_destroy,
11908 .scan_finished = scsih_scan_finished,
11909 .scan_start = scsih_scan_start,
11910 .change_queue_depth = scsih_change_queue_depth,
11911 .eh_abort_handler = scsih_abort,
11912 .eh_device_reset_handler = scsih_dev_reset,
11913 .eh_target_reset_handler = scsih_target_reset,
11914 .eh_host_reset_handler = scsih_host_reset,
11915 .bios_param = scsih_bios_param,
11916 .can_queue = 1,
11917 .this_id = -1,
11918 .sg_tablesize = MPT2SAS_SG_DEPTH,
11919 .max_sectors = 32767,
11920 .cmd_per_lun = 7,
11921 .use_clustering = ENABLE_CLUSTERING,
11922 .shost_attrs = mpt3sas_host_attrs,
11923 .sdev_attrs = mpt3sas_dev_attrs,
11924 .track_queue_depth = 1,
11925 .cmd_size = sizeof(struct scsiio_tracker),
11926};
11927
11928
11929static struct raid_function_template mpt2sas_raid_functions = {
11930 .cookie = &mpt2sas_driver_template,
11931 .is_raid = scsih_is_raid,
11932 .get_resync = scsih_get_resync,
11933 .get_state = scsih_get_state,
11934};
11935
11936
11937static struct scsi_host_template mpt3sas_driver_template = {
11938 .module = THIS_MODULE,
11939 .name = "Fusion MPT SAS Host",
11940 .proc_name = MPT3SAS_DRIVER_NAME,
11941 .queuecommand = scsih_qcmd,
11942 .target_alloc = scsih_target_alloc,
11943 .slave_alloc = scsih_slave_alloc,
11944 .slave_configure = scsih_slave_configure,
11945 .target_destroy = scsih_target_destroy,
11946 .slave_destroy = scsih_slave_destroy,
11947 .scan_finished = scsih_scan_finished,
11948 .scan_start = scsih_scan_start,
11949 .change_queue_depth = scsih_change_queue_depth,
11950 .eh_abort_handler = scsih_abort,
11951 .eh_device_reset_handler = scsih_dev_reset,
11952 .eh_target_reset_handler = scsih_target_reset,
11953 .eh_host_reset_handler = scsih_host_reset,
11954 .bios_param = scsih_bios_param,
11955 .can_queue = 1,
11956 .this_id = -1,
11957 .sg_tablesize = MPT3SAS_SG_DEPTH,
11958 .max_sectors = 32767,
11959 .cmd_per_lun = 7,
11960 .use_clustering = ENABLE_CLUSTERING,
11961 .shost_attrs = mpt3sas_host_attrs,
11962 .sdev_attrs = mpt3sas_dev_attrs,
11963 .track_queue_depth = 1,
11964 .cmd_size = sizeof(struct scsiio_tracker),
11965 .map_queues = scsih_map_queues,
11966};
11967
11968
11969static struct raid_function_template mpt3sas_raid_functions = {
11970 .cookie = &mpt3sas_driver_template,
11971 .is_raid = scsih_is_raid,
11972 .get_resync = scsih_get_resync,
11973 .get_state = scsih_get_state,
11974};
11975
11976
11977
11978
11979
11980
11981
11982
11983
11984
11985static u16
11986_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11987{
11988
11989 switch (pdev->device) {
11990 case MPI2_MFGPAGE_DEVID_SSS6200:
11991 case MPI2_MFGPAGE_DEVID_SAS2004:
11992 case MPI2_MFGPAGE_DEVID_SAS2008:
11993 case MPI2_MFGPAGE_DEVID_SAS2108_1:
11994 case MPI2_MFGPAGE_DEVID_SAS2108_2:
11995 case MPI2_MFGPAGE_DEVID_SAS2108_3:
11996 case MPI2_MFGPAGE_DEVID_SAS2116_1:
11997 case MPI2_MFGPAGE_DEVID_SAS2116_2:
11998 case MPI2_MFGPAGE_DEVID_SAS2208_1:
11999 case MPI2_MFGPAGE_DEVID_SAS2208_2:
12000 case MPI2_MFGPAGE_DEVID_SAS2208_3:
12001 case MPI2_MFGPAGE_DEVID_SAS2208_4:
12002 case MPI2_MFGPAGE_DEVID_SAS2208_5:
12003 case MPI2_MFGPAGE_DEVID_SAS2208_6:
12004 case MPI2_MFGPAGE_DEVID_SAS2308_1:
12005 case MPI2_MFGPAGE_DEVID_SAS2308_2:
12006 case MPI2_MFGPAGE_DEVID_SAS2308_3:
12007 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12008 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12009 return MPI2_VERSION;
12010 case MPI25_MFGPAGE_DEVID_SAS3004:
12011 case MPI25_MFGPAGE_DEVID_SAS3008:
12012 case MPI25_MFGPAGE_DEVID_SAS3108_1:
12013 case MPI25_MFGPAGE_DEVID_SAS3108_2:
12014 case MPI25_MFGPAGE_DEVID_SAS3108_5:
12015 case MPI25_MFGPAGE_DEVID_SAS3108_6:
12016 return MPI25_VERSION;
12017 case MPI26_MFGPAGE_DEVID_SAS3216:
12018 case MPI26_MFGPAGE_DEVID_SAS3224:
12019 case MPI26_MFGPAGE_DEVID_SAS3316_1:
12020 case MPI26_MFGPAGE_DEVID_SAS3316_2:
12021 case MPI26_MFGPAGE_DEVID_SAS3316_3:
12022 case MPI26_MFGPAGE_DEVID_SAS3316_4:
12023 case MPI26_MFGPAGE_DEVID_SAS3324_1:
12024 case MPI26_MFGPAGE_DEVID_SAS3324_2:
12025 case MPI26_MFGPAGE_DEVID_SAS3324_3:
12026 case MPI26_MFGPAGE_DEVID_SAS3324_4:
12027 case MPI26_MFGPAGE_DEVID_SAS3508:
12028 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12029 case MPI26_MFGPAGE_DEVID_SAS3408:
12030 case MPI26_MFGPAGE_DEVID_SAS3516:
12031 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12032 case MPI26_MFGPAGE_DEVID_SAS3416:
12033 case MPI26_MFGPAGE_DEVID_SAS3616:
12034 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12035 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12036 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12037 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12038 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12039 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12040 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12041 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12042 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12043 return MPI26_VERSION;
12044 }
12045 return 0;
12046}
12047
12048static const struct pci_device_id rh_deprecated_pci_table[] = {
12049
12050 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12051 PCI_ANY_ID, PCI_ANY_ID },
12052 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12053 PCI_ANY_ID, PCI_ANY_ID },
12054 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12055 PCI_ANY_ID, PCI_ANY_ID },
12056 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12057 PCI_ANY_ID, PCI_ANY_ID },
12058 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12059 PCI_ANY_ID, PCI_ANY_ID },
12060 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12061 PCI_ANY_ID, PCI_ANY_ID },
12062
12063 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12064 PCI_ANY_ID, PCI_ANY_ID },
12065 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12066 PCI_ANY_ID, PCI_ANY_ID },
12067 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12068 PCI_ANY_ID, PCI_ANY_ID },
12069
12070 {0}
12071};
12072
12073static const struct pci_device_id rh_unmaintained_pci_table[] = {
12074
12075 {0}
12076};
12077
12078static const struct pci_device_id rh_disabled_pci_table[] = {
12079
12080 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12081 PCI_ANY_ID, PCI_ANY_ID },
12082
12083 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12084 PCI_ANY_ID, PCI_ANY_ID },
12085
12086 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12087 PCI_ANY_ID, PCI_ANY_ID },
12088 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12089 PCI_ANY_ID, PCI_ANY_ID },
12090 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12091 PCI_ANY_ID, PCI_ANY_ID },
12092
12093 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12094 PCI_ANY_ID, PCI_ANY_ID },
12095 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12096 PCI_ANY_ID, PCI_ANY_ID },
12097
12098
12099 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12100 PCI_ANY_ID, PCI_ANY_ID },
12101
12102 {0}
12103};
12104
12105
12106
12107
12108
12109
12110
12111
12112static int
12113_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12114{
12115 struct MPT3SAS_ADAPTER *ioc;
12116 struct Scsi_Host *shost = NULL;
12117 int rv;
12118 u16 hba_mpi_version;
12119
12120 if (pci_hw_disabled(rh_disabled_pci_table, pdev))
12121 return -ENODEV;
12122
12123 pci_hw_deprecated(rh_deprecated_pci_table, pdev);
12124 pci_hw_unmaintained(rh_unmaintained_pci_table, pdev);
12125
12126
12127 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12128 if (hba_mpi_version == 0)
12129 return -ENODEV;
12130
12131
12132
12133
12134 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
12135 return -ENODEV;
12136
12137
12138
12139
12140 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
12141 || hba_mpi_version == MPI26_VERSION)))
12142 return -ENODEV;
12143
12144 switch (hba_mpi_version) {
12145 case MPI2_VERSION:
12146 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12147 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
12148
12149 shost = scsi_host_alloc(&mpt2sas_driver_template,
12150 sizeof(struct MPT3SAS_ADAPTER));
12151 if (!shost)
12152 return -ENODEV;
12153 ioc = shost_priv(shost);
12154 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12155 ioc->hba_mpi_version_belonged = hba_mpi_version;
12156 ioc->id = mpt2_ids++;
12157 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
12158 switch (pdev->device) {
12159 case MPI2_MFGPAGE_DEVID_SSS6200:
12160 ioc->is_warpdrive = 1;
12161 ioc->hide_ir_msg = 1;
12162 break;
12163 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12164 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12165 ioc->is_mcpu_endpoint = 1;
12166 break;
12167 default:
12168 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12169 break;
12170 }
12171
12172 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12173 ioc->multipath_on_hba = 0;
12174 else
12175 ioc->multipath_on_hba = 1;
12176
12177 break;
12178 case MPI25_VERSION:
12179 case MPI26_VERSION:
12180
12181 shost = scsi_host_alloc(&mpt3sas_driver_template,
12182 sizeof(struct MPT3SAS_ADAPTER));
12183 if (!shost)
12184 return -ENODEV;
12185 ioc = shost_priv(shost);
12186 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12187 ioc->hba_mpi_version_belonged = hba_mpi_version;
12188 ioc->id = mpt3_ids++;
12189 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12190 switch (pdev->device) {
12191 case MPI26_MFGPAGE_DEVID_SAS3508:
12192 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12193 case MPI26_MFGPAGE_DEVID_SAS3408:
12194 case MPI26_MFGPAGE_DEVID_SAS3516:
12195 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12196 case MPI26_MFGPAGE_DEVID_SAS3416:
12197 case MPI26_MFGPAGE_DEVID_SAS3616:
12198 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12199 ioc->is_gen35_ioc = 1;
12200 break;
12201 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12202 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12203 dev_err(&pdev->dev,
12204 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12205 pdev->device, pdev->subsystem_vendor,
12206 pdev->subsystem_device);
12207 return 1;
12208 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12209 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12210 dev_err(&pdev->dev,
12211 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12212 pdev->device, pdev->subsystem_vendor,
12213 pdev->subsystem_device);
12214 return 1;
12215 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12216 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12217 dev_info(&pdev->dev,
12218 "HBA is in Configurable Secure mode\n");
12219 fallthrough;
12220 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12221 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12222 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12223 break;
12224 default:
12225 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12226 }
12227 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12228 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12229 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12230 ioc->combined_reply_queue = 1;
12231 if (ioc->is_gen35_ioc)
12232 ioc->combined_reply_index_count =
12233 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12234 else
12235 ioc->combined_reply_index_count =
12236 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12237 }
12238
12239 switch (ioc->is_gen35_ioc) {
12240 case 0:
12241 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12242 ioc->multipath_on_hba = 0;
12243 else
12244 ioc->multipath_on_hba = 1;
12245 break;
12246 case 1:
12247 if (multipath_on_hba == -1 || multipath_on_hba > 0)
12248 ioc->multipath_on_hba = 1;
12249 else
12250 ioc->multipath_on_hba = 0;
12251 break;
12252 default:
12253 break;
12254 }
12255
12256 break;
12257 default:
12258 return -ENODEV;
12259 }
12260
12261 INIT_LIST_HEAD(&ioc->list);
12262 spin_lock(&gioc_lock);
12263 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12264 spin_unlock(&gioc_lock);
12265 ioc->shost = shost;
12266 ioc->pdev = pdev;
12267 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12268 ioc->tm_cb_idx = tm_cb_idx;
12269 ioc->ctl_cb_idx = ctl_cb_idx;
12270 ioc->base_cb_idx = base_cb_idx;
12271 ioc->port_enable_cb_idx = port_enable_cb_idx;
12272 ioc->transport_cb_idx = transport_cb_idx;
12273 ioc->scsih_cb_idx = scsih_cb_idx;
12274 ioc->config_cb_idx = config_cb_idx;
12275 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12276 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12277 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12278 ioc->logging_level = logging_level;
12279 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12280
12281 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12282
12283
12284
12285 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12286
12287 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12288
12289 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12290
12291
12292 mutex_init(&ioc->reset_in_progress_mutex);
12293
12294 mutex_init(&ioc->pci_access_mutex);
12295 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12296 spin_lock_init(&ioc->scsi_lookup_lock);
12297 spin_lock_init(&ioc->sas_device_lock);
12298 spin_lock_init(&ioc->sas_node_lock);
12299 spin_lock_init(&ioc->fw_event_lock);
12300 spin_lock_init(&ioc->raid_device_lock);
12301 spin_lock_init(&ioc->pcie_device_lock);
12302 spin_lock_init(&ioc->diag_trigger_lock);
12303
12304 INIT_LIST_HEAD(&ioc->sas_device_list);
12305 INIT_LIST_HEAD(&ioc->sas_device_init_list);
12306 INIT_LIST_HEAD(&ioc->sas_expander_list);
12307 INIT_LIST_HEAD(&ioc->enclosure_list);
12308 INIT_LIST_HEAD(&ioc->pcie_device_list);
12309 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12310 INIT_LIST_HEAD(&ioc->fw_event_list);
12311 INIT_LIST_HEAD(&ioc->raid_device_list);
12312 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12313 INIT_LIST_HEAD(&ioc->delayed_tr_list);
12314 INIT_LIST_HEAD(&ioc->delayed_sc_list);
12315 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12316 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12317 INIT_LIST_HEAD(&ioc->reply_queue_list);
12318 INIT_LIST_HEAD(&ioc->port_table_list);
12319
12320 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12321
12322
12323 shost->max_cmd_len = 32;
12324 shost->max_lun = max_lun;
12325 shost->transportt = mpt3sas_transport_template;
12326 shost->unique_id = ioc->id;
12327
12328 if (ioc->is_mcpu_endpoint) {
12329
12330 shost->max_sectors = 128;
12331 ioc_info(ioc, "The max_sectors value is set to %d\n",
12332 shost->max_sectors);
12333 } else {
12334 if (max_sectors != 0xFFFF) {
12335 if (max_sectors < 64) {
12336 shost->max_sectors = 64;
12337 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12338 max_sectors);
12339 } else if (max_sectors > 32767) {
12340 shost->max_sectors = 32767;
12341 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12342 max_sectors);
12343 } else {
12344 shost->max_sectors = max_sectors & 0xFFFE;
12345 ioc_info(ioc, "The max_sectors value is set to %d\n",
12346 shost->max_sectors);
12347 }
12348 }
12349 }
12350
12351 if (prot_mask >= 0)
12352 scsi_host_set_prot(shost, (prot_mask & 0x07));
12353 else
12354 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12355 | SHOST_DIF_TYPE2_PROTECTION
12356 | SHOST_DIF_TYPE3_PROTECTION);
12357
12358 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12359
12360
12361 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12362 "fw_event_%s%d", ioc->driver_name, ioc->id);
12363 ioc->firmware_event_thread = alloc_ordered_workqueue(
12364 ioc->firmware_event_name, 0);
12365 if (!ioc->firmware_event_thread) {
12366 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12367 __FILE__, __LINE__, __func__);
12368 rv = -ENODEV;
12369 goto out_thread_fail;
12370 }
12371
12372 ioc->is_driver_loading = 1;
12373 if ((mpt3sas_base_attach(ioc))) {
12374 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12375 __FILE__, __LINE__, __func__);
12376 rv = -ENODEV;
12377 goto out_attach_fail;
12378 }
12379
12380 if (ioc->is_warpdrive) {
12381 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12382 ioc->hide_drives = 0;
12383 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12384 ioc->hide_drives = 1;
12385 else {
12386 if (mpt3sas_get_num_volumes(ioc))
12387 ioc->hide_drives = 1;
12388 else
12389 ioc->hide_drives = 0;
12390 }
12391 } else
12392 ioc->hide_drives = 0;
12393
12394 shost->host_tagset = 0;
12395 shost->nr_hw_queues = 1;
12396
12397 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
12398 host_tagset_enable && ioc->smp_affinity_enable) {
12399
12400 shost->host_tagset = 1;
12401 shost->nr_hw_queues =
12402 ioc->reply_queue_count - ioc->high_iops_queues;
12403
12404 dev_info(&ioc->pdev->dev,
12405 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12406 shost->can_queue, shost->nr_hw_queues);
12407 }
12408
12409 rv = scsi_add_host(shost, &pdev->dev);
12410 if (rv) {
12411 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12412 __FILE__, __LINE__, __func__);
12413 goto out_add_shost_fail;
12414 }
12415
12416 scsi_scan_host(shost);
12417 mpt3sas_setup_debugfs(ioc);
12418 return 0;
12419out_add_shost_fail:
12420 mpt3sas_base_detach(ioc);
12421 out_attach_fail:
12422 destroy_workqueue(ioc->firmware_event_thread);
12423 out_thread_fail:
12424 spin_lock(&gioc_lock);
12425 list_del(&ioc->list);
12426 spin_unlock(&gioc_lock);
12427 scsi_host_put(shost);
12428 return rv;
12429}
12430
12431
12432
12433
12434
12435
12436
12437static int __maybe_unused
12438scsih_suspend(struct device *dev)
12439{
12440 struct pci_dev *pdev = to_pci_dev(dev);
12441 struct Scsi_Host *shost;
12442 struct MPT3SAS_ADAPTER *ioc;
12443 int rc;
12444
12445 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12446 if (rc)
12447 return rc;
12448
12449 mpt3sas_base_stop_watchdog(ioc);
12450 flush_scheduled_work();
12451 scsi_block_requests(shost);
12452 _scsih_nvme_shutdown(ioc);
12453 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12454 pdev, pci_name(pdev));
12455
12456 mpt3sas_base_free_resources(ioc);
12457 return 0;
12458}
12459
12460
12461
12462
12463
12464
12465
12466static int __maybe_unused
12467scsih_resume(struct device *dev)
12468{
12469 struct pci_dev *pdev = to_pci_dev(dev);
12470 struct Scsi_Host *shost;
12471 struct MPT3SAS_ADAPTER *ioc;
12472 pci_power_t device_state = pdev->current_state;
12473 int r;
12474
12475 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12476 if (r)
12477 return r;
12478
12479 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12480 pdev, pci_name(pdev), device_state);
12481
12482 ioc->pdev = pdev;
12483 r = mpt3sas_base_map_resources(ioc);
12484 if (r)
12485 return r;
12486 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12487 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12488 scsi_unblock_requests(shost);
12489 mpt3sas_base_start_watchdog(ioc);
12490 return 0;
12491}
12492
12493
12494
12495
12496
12497
12498
12499
12500
12501
12502static pci_ers_result_t
12503scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12504{
12505 struct Scsi_Host *shost;
12506 struct MPT3SAS_ADAPTER *ioc;
12507
12508 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12509 return PCI_ERS_RESULT_DISCONNECT;
12510
12511 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12512
12513 switch (state) {
12514 case pci_channel_io_normal:
12515 return PCI_ERS_RESULT_CAN_RECOVER;
12516 case pci_channel_io_frozen:
12517
12518 ioc->pci_error_recovery = 1;
12519 scsi_block_requests(ioc->shost);
12520 mpt3sas_base_stop_watchdog(ioc);
12521 mpt3sas_base_free_resources(ioc);
12522 return PCI_ERS_RESULT_NEED_RESET;
12523 case pci_channel_io_perm_failure:
12524
12525 ioc->pci_error_recovery = 1;
12526 mpt3sas_base_stop_watchdog(ioc);
12527 _scsih_flush_running_cmds(ioc);
12528 return PCI_ERS_RESULT_DISCONNECT;
12529 }
12530 return PCI_ERS_RESULT_NEED_RESET;
12531}
12532
12533
12534
12535
12536
12537
12538
12539
12540
12541static pci_ers_result_t
12542scsih_pci_slot_reset(struct pci_dev *pdev)
12543{
12544 struct Scsi_Host *shost;
12545 struct MPT3SAS_ADAPTER *ioc;
12546 int rc;
12547
12548 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12549 return PCI_ERS_RESULT_DISCONNECT;
12550
12551 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12552
12553 ioc->pci_error_recovery = 0;
12554 ioc->pdev = pdev;
12555 pci_restore_state(pdev);
12556 rc = mpt3sas_base_map_resources(ioc);
12557 if (rc)
12558 return PCI_ERS_RESULT_DISCONNECT;
12559
12560 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12561 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12562
12563 ioc_warn(ioc, "hard reset: %s\n",
12564 (rc == 0) ? "success" : "failed");
12565
12566 if (!rc)
12567 return PCI_ERS_RESULT_RECOVERED;
12568 else
12569 return PCI_ERS_RESULT_DISCONNECT;
12570}
12571
12572
12573
12574
12575
12576
12577
12578
12579
12580static void
12581scsih_pci_resume(struct pci_dev *pdev)
12582{
12583 struct Scsi_Host *shost;
12584 struct MPT3SAS_ADAPTER *ioc;
12585
12586 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12587 return;
12588
12589 ioc_info(ioc, "PCI error: resume callback!!\n");
12590
12591 mpt3sas_base_start_watchdog(ioc);
12592 scsi_unblock_requests(ioc->shost);
12593}
12594
12595
12596
12597
12598
12599static pci_ers_result_t
12600scsih_pci_mmio_enabled(struct pci_dev *pdev)
12601{
12602 struct Scsi_Host *shost;
12603 struct MPT3SAS_ADAPTER *ioc;
12604
12605 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12606 return PCI_ERS_RESULT_DISCONNECT;
12607
12608 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12609
12610
12611
12612
12613
12614
12615
12616 return PCI_ERS_RESULT_RECOVERED;
12617}
12618
12619
12620
12621
12622
12623
12624
12625
12626bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12627{
12628 unsigned char *buf;
12629 bool ncq_prio_supp = false;
12630
12631 if (!scsi_device_supports_vpd(sdev))
12632 return ncq_prio_supp;
12633
12634 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12635 if (!buf)
12636 return ncq_prio_supp;
12637
12638 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12639 ncq_prio_supp = (buf[213] >> 4) & 1;
12640
12641 kfree(buf);
12642 return ncq_prio_supp;
12643}
12644
12645
12646
12647static const struct pci_device_id mpt3sas_pci_table[] = {
12648
12649 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12650 PCI_ANY_ID, PCI_ANY_ID },
12651
12652 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12653 PCI_ANY_ID, PCI_ANY_ID },
12654
12655 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12656 PCI_ANY_ID, PCI_ANY_ID },
12657 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12658 PCI_ANY_ID, PCI_ANY_ID },
12659 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12660 PCI_ANY_ID, PCI_ANY_ID },
12661
12662 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12663 PCI_ANY_ID, PCI_ANY_ID },
12664 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12665 PCI_ANY_ID, PCI_ANY_ID },
12666
12667 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12668 PCI_ANY_ID, PCI_ANY_ID },
12669 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12670 PCI_ANY_ID, PCI_ANY_ID },
12671 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12672 PCI_ANY_ID, PCI_ANY_ID },
12673 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12674 PCI_ANY_ID, PCI_ANY_ID },
12675 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12676 PCI_ANY_ID, PCI_ANY_ID },
12677 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12678 PCI_ANY_ID, PCI_ANY_ID },
12679
12680 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12681 PCI_ANY_ID, PCI_ANY_ID },
12682 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12683 PCI_ANY_ID, PCI_ANY_ID },
12684 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12685 PCI_ANY_ID, PCI_ANY_ID },
12686 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12687 PCI_ANY_ID, PCI_ANY_ID },
12688 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12689 PCI_ANY_ID, PCI_ANY_ID },
12690
12691 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12692 PCI_ANY_ID, PCI_ANY_ID },
12693
12694 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12695 PCI_ANY_ID, PCI_ANY_ID },
12696 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12697 PCI_ANY_ID, PCI_ANY_ID },
12698
12699 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12700 PCI_ANY_ID, PCI_ANY_ID },
12701 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12702 PCI_ANY_ID, PCI_ANY_ID },
12703 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12704 PCI_ANY_ID, PCI_ANY_ID },
12705 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12706 PCI_ANY_ID, PCI_ANY_ID },
12707
12708 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12709 PCI_ANY_ID, PCI_ANY_ID },
12710 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12711 PCI_ANY_ID, PCI_ANY_ID },
12712
12713 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12714 PCI_ANY_ID, PCI_ANY_ID },
12715 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12716 PCI_ANY_ID, PCI_ANY_ID },
12717 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12718 PCI_ANY_ID, PCI_ANY_ID },
12719 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12720 PCI_ANY_ID, PCI_ANY_ID },
12721 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12722 PCI_ANY_ID, PCI_ANY_ID },
12723 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12724 PCI_ANY_ID, PCI_ANY_ID },
12725 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12726 PCI_ANY_ID, PCI_ANY_ID },
12727 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12728 PCI_ANY_ID, PCI_ANY_ID },
12729
12730 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12731 PCI_ANY_ID, PCI_ANY_ID },
12732 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12733 PCI_ANY_ID, PCI_ANY_ID },
12734 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12735 PCI_ANY_ID, PCI_ANY_ID },
12736 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12737 PCI_ANY_ID, PCI_ANY_ID },
12738 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12739 PCI_ANY_ID, PCI_ANY_ID },
12740 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12741 PCI_ANY_ID, PCI_ANY_ID },
12742
12743 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12744 PCI_ANY_ID, PCI_ANY_ID },
12745
12746
12747
12748
12749 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12750 PCI_ANY_ID, PCI_ANY_ID },
12751 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12752 PCI_ANY_ID, PCI_ANY_ID },
12753
12754
12755
12756
12757 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12758 PCI_ANY_ID, PCI_ANY_ID },
12759 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12760 PCI_ANY_ID, PCI_ANY_ID },
12761
12762
12763 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12764 PCI_ANY_ID, PCI_ANY_ID },
12765
12766
12767
12768
12769 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12770 PCI_ANY_ID, PCI_ANY_ID },
12771 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12772 PCI_ANY_ID, PCI_ANY_ID },
12773
12774
12775
12776
12777 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12778 PCI_ANY_ID, PCI_ANY_ID },
12779 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12780 PCI_ANY_ID, PCI_ANY_ID },
12781
12782 {0}
12783};
12784MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12785
12786static struct pci_error_handlers _mpt3sas_err_handler = {
12787 .error_detected = scsih_pci_error_detected,
12788 .mmio_enabled = scsih_pci_mmio_enabled,
12789 .slot_reset = scsih_pci_slot_reset,
12790 .resume = scsih_pci_resume,
12791};
12792
12793static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12794
12795static struct pci_driver mpt3sas_driver = {
12796 .name = MPT3SAS_DRIVER_NAME,
12797 .id_table = mpt3sas_pci_table,
12798 .probe = _scsih_probe,
12799 .remove = scsih_remove,
12800 .shutdown = scsih_shutdown,
12801 .err_handler = &_mpt3sas_err_handler,
12802 .driver.pm = &scsih_pm_ops,
12803};
12804
12805
12806
12807
12808
12809
12810static int
12811scsih_init(void)
12812{
12813 mpt2_ids = 0;
12814 mpt3_ids = 0;
12815
12816 mpt3sas_base_initialize_callback_handler();
12817
12818
12819 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12820
12821
12822 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12823
12824
12825 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12826 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12827 mpt3sas_port_enable_done);
12828
12829
12830 transport_cb_idx = mpt3sas_base_register_callback_handler(
12831 mpt3sas_transport_done);
12832
12833
12834 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12835
12836
12837 config_cb_idx = mpt3sas_base_register_callback_handler(
12838 mpt3sas_config_done);
12839
12840
12841 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12842
12843 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12844 _scsih_tm_tr_complete);
12845
12846 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12847 _scsih_tm_volume_tr_complete);
12848
12849 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12850 _scsih_sas_control_complete);
12851
12852 mpt3sas_init_debugfs();
12853 return 0;
12854}
12855
12856
12857
12858
12859
12860
12861static void
12862scsih_exit(void)
12863{
12864
12865 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12866 mpt3sas_base_release_callback_handler(tm_cb_idx);
12867 mpt3sas_base_release_callback_handler(base_cb_idx);
12868 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12869 mpt3sas_base_release_callback_handler(transport_cb_idx);
12870 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12871 mpt3sas_base_release_callback_handler(config_cb_idx);
12872 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12873
12874 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12875 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12876 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12877
12878
12879 if (hbas_to_enumerate != 1)
12880 raid_class_release(mpt3sas_raid_template);
12881 if (hbas_to_enumerate != 2)
12882 raid_class_release(mpt2sas_raid_template);
12883 sas_release_transport(mpt3sas_transport_template);
12884 mpt3sas_exit_debugfs();
12885}
12886
12887
12888
12889
12890
12891
12892static int __init
12893_mpt3sas_init(void)
12894{
12895 int error;
12896
12897 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12898 MPT3SAS_DRIVER_VERSION);
12899
12900 mpt3sas_transport_template =
12901 sas_attach_transport(&mpt3sas_transport_functions);
12902 if (!mpt3sas_transport_template)
12903 return -ENODEV;
12904
12905
12906
12907
12908 if (hbas_to_enumerate != 1) {
12909 mpt3sas_raid_template =
12910 raid_class_attach(&mpt3sas_raid_functions);
12911 if (!mpt3sas_raid_template) {
12912 sas_release_transport(mpt3sas_transport_template);
12913 return -ENODEV;
12914 }
12915 }
12916
12917
12918
12919
12920 if (hbas_to_enumerate != 2) {
12921 mpt2sas_raid_template =
12922 raid_class_attach(&mpt2sas_raid_functions);
12923 if (!mpt2sas_raid_template) {
12924 sas_release_transport(mpt3sas_transport_template);
12925 return -ENODEV;
12926 }
12927 }
12928
12929 error = scsih_init();
12930 if (error) {
12931 scsih_exit();
12932 return error;
12933 }
12934
12935 mpt3sas_ctl_init(hbas_to_enumerate);
12936
12937 error = pci_register_driver(&mpt3sas_driver);
12938 if (error)
12939 scsih_exit();
12940
12941 return error;
12942}
12943
12944
12945
12946
12947
12948static void __exit
12949_mpt3sas_exit(void)
12950{
12951 pr_info("mpt3sas version %s unloading\n",
12952 MPT3SAS_DRIVER_VERSION);
12953
12954 mpt3sas_ctl_exit(hbas_to_enumerate);
12955
12956 pci_unregister_driver(&mpt3sas_driver);
12957
12958 scsih_exit();
12959}
12960
12961module_init(_mpt3sas_init);
12962module_exit(_mpt3sas_exit);
12963