1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/io.h>
59#include <linux/time.h>
60#include <linux/ktime.h>
61#include <linux/kthread.h>
62#include <asm/page.h>
63#include <linux/aer.h>
64
65
66#include "mpt3sas_base.h"
67
68static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
69
70
71#define FAULT_POLLING_INTERVAL 1000
72
73
74#define MAX_HBA_QUEUE_DEPTH 30000
75#define MAX_CHAIN_DEPTH 100000
76static int max_queue_depth = -1;
77module_param(max_queue_depth, int, 0);
78MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79
80static int max_sgl_entries = -1;
81module_param(max_sgl_entries, int, 0);
82MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83
84static int msix_disable = -1;
85module_param(msix_disable, int, 0);
86MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87
88static int smp_affinity_enable = 1;
89module_param(smp_affinity_enable, int, S_IRUGO);
90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91
92static int max_msix_vectors = -1;
93module_param(max_msix_vectors, int, 0);
94MODULE_PARM_DESC(max_msix_vectors,
95 " max msix vectors");
96
97static int mpt3sas_fwfault_debug;
98MODULE_PARM_DESC(mpt3sas_fwfault_debug,
99 " enable detection of firmware fault and halt firmware - (default=0)");
100
101static int
102_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
103
104
105
106
107
108
109
110
111
112
113
114
115
116u8
117mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
118 u8 status, void *mpi_request, int sz)
119{
120 u8 issue_reset = 0;
121
122 if (!(status & MPT3_CMD_RESET))
123 issue_reset = 1;
124
125 ioc_err(ioc, "Command %s\n",
126 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
127 _debug_dump_mf(mpi_request, sz);
128
129 return issue_reset;
130}
131
132
133
134
135
136
137
138
139static int
140_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
141{
142 int ret = param_set_int(val, kp);
143 struct MPT3SAS_ADAPTER *ioc;
144
145 if (ret)
146 return ret;
147
148
149 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
150 spin_lock(&gioc_lock);
151 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
152 ioc->fwfault_debug = mpt3sas_fwfault_debug;
153 spin_unlock(&gioc_lock);
154 return 0;
155}
156module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
157 param_get_int, &mpt3sas_fwfault_debug, 0644);
158
159
160
161
162
163
164
165
166static inline u32
167_base_readl_aero(const volatile void __iomem *addr)
168{
169 u32 i = 0, ret_val;
170
171 do {
172 ret_val = readl(addr);
173 i++;
174 } while (ret_val == 0 && i < 3);
175
176 return ret_val;
177}
178
179static inline u32
180_base_readl(const volatile void __iomem *addr)
181{
182 return readl(addr);
183}
184
185
186
187
188
189
190
191
192
193static void
194_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
195 u32 index)
196{
197
198
199
200
201
202 u16 cmd_credit = ioc->facts.RequestCredit + 1;
203 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
204 MPI_FRAME_START_OFFSET +
205 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
206
207 writel(reply, reply_free_iomem);
208}
209
210
211
212
213
214
215
216
217
218static void
219_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
220{
221 int i;
222 u32 *src_virt_mem = (u32 *)src;
223
224 for (i = 0; i < size/4; i++)
225 writel((u32)src_virt_mem[i],
226 (void __iomem *)dst_iomem + (i * 4));
227}
228
229
230
231
232
233
234
235
236static void
237_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
238{
239 int i;
240 u32 *src_virt_mem = (u32 *)(src);
241
242 for (i = 0; i < size/4; i++)
243 writel((u32)src_virt_mem[i],
244 (void __iomem *)dst_iomem + (i * 4));
245}
246
247
248
249
250
251
252
253
254
255
256
257static inline void __iomem*
258_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
259 u8 sge_chain_count)
260{
261 void __iomem *base_chain, *chain_virt;
262 u16 cmd_credit = ioc->facts.RequestCredit + 1;
263
264 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
265 (cmd_credit * ioc->request_sz) +
266 REPLY_FREE_POOL_SIZE;
267 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
268 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
269 return chain_virt;
270}
271
272
273
274
275
276
277
278
279
280
281
282
283static inline phys_addr_t
284_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
285 u8 sge_chain_count)
286{
287 phys_addr_t base_chain_phys, chain_phys;
288 u16 cmd_credit = ioc->facts.RequestCredit + 1;
289
290 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
291 (cmd_credit * ioc->request_sz) +
292 REPLY_FREE_POOL_SIZE;
293 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
294 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
295 return chain_phys;
296}
297
298
299
300
301
302
303
304
305
306
307
308
309static void __iomem *
310_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
311{
312 u16 cmd_credit = ioc->facts.RequestCredit + 1;
313
314 void __iomem *chain_end = _base_get_chain(ioc,
315 cmd_credit + 1,
316 ioc->facts.MaxChainDepth);
317 return chain_end + (smid * 64 * 1024);
318}
319
320
321
322
323
324
325
326
327
328
329
330static phys_addr_t
331_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
332{
333 u16 cmd_credit = ioc->facts.RequestCredit + 1;
334 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
335 cmd_credit + 1,
336 ioc->facts.MaxChainDepth);
337 return chain_end_phys + (smid * 64 * 1024);
338}
339
340
341
342
343
344
345
346
347
348
349
350
351static void *
352_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
353 dma_addr_t chain_buffer_dma)
354{
355 u16 index, j;
356 struct chain_tracker *ct;
357
358 for (index = 0; index < ioc->scsiio_depth; index++) {
359 for (j = 0; j < ioc->chains_needed_per_io; j++) {
360 ct = &ioc->chain_lookup[index].chains_per_smid[j];
361 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
362 return ct->chain_buffer;
363 }
364 }
365 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
366 return NULL;
367}
368
369
370
371
372
373
374
375
376
377
378
379static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
380 void *mpi_request, u16 smid)
381{
382 Mpi2SGESimple32_t *sgel, *sgel_next;
383 u32 sgl_flags, sge_chain_count = 0;
384 bool is_write = 0;
385 u16 i = 0;
386 void __iomem *buffer_iomem;
387 phys_addr_t buffer_iomem_phys;
388 void __iomem *buff_ptr;
389 phys_addr_t buff_ptr_phys;
390 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
391 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
392 phys_addr_t dst_addr_phys;
393 MPI2RequestHeader_t *request_hdr;
394 struct scsi_cmnd *scmd;
395 struct scatterlist *sg_scmd = NULL;
396 int is_scsiio_req = 0;
397
398 request_hdr = (MPI2RequestHeader_t *) mpi_request;
399
400 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
401 Mpi25SCSIIORequest_t *scsiio_request =
402 (Mpi25SCSIIORequest_t *)mpi_request;
403 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
404 is_scsiio_req = 1;
405 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
406 Mpi2ConfigRequest_t *config_req =
407 (Mpi2ConfigRequest_t *)mpi_request;
408 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
409 } else
410 return;
411
412
413
414
415
416
417 if (is_scsiio_req) {
418
419 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
420 if (scmd == NULL) {
421 ioc_err(ioc, "scmd is NULL\n");
422 return;
423 }
424
425
426 sg_scmd = scsi_sglist(scmd);
427 }
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
445 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
446
447 buff_ptr = buffer_iomem;
448 buff_ptr_phys = buffer_iomem_phys;
449 WARN_ON(buff_ptr_phys > U32_MAX);
450
451 if (le32_to_cpu(sgel->FlagsLength) &
452 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
453 is_write = 1;
454
455 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
456
457 sgl_flags =
458 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
459
460 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
461 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
462
463
464
465
466
467 sgel_next =
468 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
469 le32_to_cpu(sgel->Address));
470 if (sgel_next == NULL)
471 return;
472
473
474
475
476 dst_chain_addr[sge_chain_count] =
477 _base_get_chain(ioc,
478 smid, sge_chain_count);
479 src_chain_addr[sge_chain_count] =
480 (void *) sgel_next;
481 dst_addr_phys = _base_get_chain_phys(ioc,
482 smid, sge_chain_count);
483 WARN_ON(dst_addr_phys > U32_MAX);
484 sgel->Address =
485 cpu_to_le32(lower_32_bits(dst_addr_phys));
486 sgel = sgel_next;
487 sge_chain_count++;
488 break;
489 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
490 if (is_write) {
491 if (is_scsiio_req) {
492 _base_clone_to_sys_mem(buff_ptr,
493 sg_virt(sg_scmd),
494 (le32_to_cpu(sgel->FlagsLength) &
495 0x00ffffff));
496
497
498
499
500 sgel->Address =
501 cpu_to_le32((u32)buff_ptr_phys);
502 } else {
503 _base_clone_to_sys_mem(buff_ptr,
504 ioc->config_vaddr,
505 (le32_to_cpu(sgel->FlagsLength) &
506 0x00ffffff));
507 sgel->Address =
508 cpu_to_le32((u32)buff_ptr_phys);
509 }
510 }
511 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
512 0x00ffffff);
513 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
514 0x00ffffff);
515 if ((le32_to_cpu(sgel->FlagsLength) &
516 (MPI2_SGE_FLAGS_END_OF_BUFFER
517 << MPI2_SGE_FLAGS_SHIFT)))
518 goto eob_clone_chain;
519 else {
520
521
522
523
524
525
526 if (is_scsiio_req) {
527 sg_scmd = sg_next(sg_scmd);
528 if (sg_scmd)
529 sgel++;
530 else
531 goto eob_clone_chain;
532 }
533 }
534 break;
535 }
536 }
537
538eob_clone_chain:
539 for (i = 0; i < sge_chain_count; i++) {
540 if (is_scsiio_req)
541 _base_clone_to_sys_mem(dst_chain_addr[i],
542 src_chain_addr[i], ioc->request_sz);
543 }
544}
545
546
547
548
549
550
551
552
553
554static int mpt3sas_remove_dead_ioc_func(void *arg)
555{
556 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
557 struct pci_dev *pdev;
558
559 if (!ioc)
560 return -1;
561
562 pdev = ioc->pdev;
563 if (!pdev)
564 return -1;
565 pci_stop_and_remove_bus_device_locked(pdev);
566 return 0;
567}
568
569
570
571
572
573
574
575static void
576_base_fault_reset_work(struct work_struct *work)
577{
578 struct MPT3SAS_ADAPTER *ioc =
579 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
580 unsigned long flags;
581 u32 doorbell;
582 int rc;
583 struct task_struct *p;
584
585
586 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
587 if (ioc->shost_recovery || ioc->pci_error_recovery)
588 goto rearm_timer;
589 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
590
591 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
592 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
593 ioc_err(ioc, "SAS host is non-operational !!!!\n");
594
595
596
597
598
599
600
601
602
603 if (ioc->non_operational_loop++ < 5) {
604 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
605 flags);
606 goto rearm_timer;
607 }
608
609
610
611
612
613
614
615
616 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
617
618
619
620
621 ioc->remove_host = 1;
622
623 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
624 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
625 if (IS_ERR(p))
626 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
627 __func__);
628 else
629 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
630 __func__);
631 return;
632 }
633
634 ioc->non_operational_loop = 0;
635
636 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
637 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
638 ioc_warn(ioc, "%s: hard reset: %s\n",
639 __func__, rc == 0 ? "success" : "failed");
640 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
641 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
642 mpt3sas_base_fault_info(ioc, doorbell &
643 MPI2_DOORBELL_DATA_MASK);
644 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
645 MPI2_IOC_STATE_OPERATIONAL)
646 return;
647 }
648
649 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
650 rearm_timer:
651 if (ioc->fault_reset_work_q)
652 queue_delayed_work(ioc->fault_reset_work_q,
653 &ioc->fault_reset_work,
654 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
655 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
656}
657
658
659
660
661
662
663
664void
665mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
666{
667 unsigned long flags;
668
669 if (ioc->fault_reset_work_q)
670 return;
671
672
673
674 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
675 snprintf(ioc->fault_reset_work_q_name,
676 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
677 ioc->driver_name, ioc->id);
678 ioc->fault_reset_work_q =
679 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
680 if (!ioc->fault_reset_work_q) {
681 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
682 return;
683 }
684 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
685 if (ioc->fault_reset_work_q)
686 queue_delayed_work(ioc->fault_reset_work_q,
687 &ioc->fault_reset_work,
688 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
689 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
690}
691
692
693
694
695
696
697
698void
699mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
700{
701 unsigned long flags;
702 struct workqueue_struct *wq;
703
704 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
705 wq = ioc->fault_reset_work_q;
706 ioc->fault_reset_work_q = NULL;
707 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
708 if (wq) {
709 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
710 flush_workqueue(wq);
711 destroy_workqueue(wq);
712 }
713}
714
715
716
717
718
719
720void
721mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
722{
723 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
724}
725
726
727
728
729
730
731
732
733
734
735void
736mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
737{
738 u32 doorbell;
739
740 if (!ioc->fwfault_debug)
741 return;
742
743 dump_stack();
744
745 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
746 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
747 mpt3sas_base_fault_info(ioc , doorbell);
748 else {
749 writel(0xC0FFEE00, &ioc->chip->Doorbell);
750 ioc_err(ioc, "Firmware is halted due to command timeout\n");
751 }
752
753 if (ioc->fwfault_debug == 2)
754 for (;;)
755 ;
756 else
757 panic("panic in %s\n", __func__);
758}
759
760
761
762
763
764
765
766static void
767_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
768 MPI2RequestHeader_t *request_hdr)
769{
770 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
771 MPI2_IOCSTATUS_MASK;
772 char *desc = NULL;
773 u16 frame_sz;
774 char *func_str = NULL;
775
776
777 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
778 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
779 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
780 return;
781
782 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
783 return;
784
785 switch (ioc_status) {
786
787
788
789
790
791 case MPI2_IOCSTATUS_INVALID_FUNCTION:
792 desc = "invalid function";
793 break;
794 case MPI2_IOCSTATUS_BUSY:
795 desc = "busy";
796 break;
797 case MPI2_IOCSTATUS_INVALID_SGL:
798 desc = "invalid sgl";
799 break;
800 case MPI2_IOCSTATUS_INTERNAL_ERROR:
801 desc = "internal error";
802 break;
803 case MPI2_IOCSTATUS_INVALID_VPID:
804 desc = "invalid vpid";
805 break;
806 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
807 desc = "insufficient resources";
808 break;
809 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
810 desc = "insufficient power";
811 break;
812 case MPI2_IOCSTATUS_INVALID_FIELD:
813 desc = "invalid field";
814 break;
815 case MPI2_IOCSTATUS_INVALID_STATE:
816 desc = "invalid state";
817 break;
818 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
819 desc = "op state not supported";
820 break;
821
822
823
824
825
826 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
827 desc = "config invalid action";
828 break;
829 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
830 desc = "config invalid type";
831 break;
832 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
833 desc = "config invalid page";
834 break;
835 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
836 desc = "config invalid data";
837 break;
838 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
839 desc = "config no defaults";
840 break;
841 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
842 desc = "config cant commit";
843 break;
844
845
846
847
848
849 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
850 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
851 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
852 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
853 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
854 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
855 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
856 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
857 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
858 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
859 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
860 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
861 break;
862
863
864
865
866
867 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
868 desc = "eedp guard error";
869 break;
870 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
871 desc = "eedp ref tag error";
872 break;
873 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
874 desc = "eedp app tag error";
875 break;
876
877
878
879
880
881 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
882 desc = "target invalid io index";
883 break;
884 case MPI2_IOCSTATUS_TARGET_ABORTED:
885 desc = "target aborted";
886 break;
887 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
888 desc = "target no conn retryable";
889 break;
890 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
891 desc = "target no connection";
892 break;
893 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
894 desc = "target xfer count mismatch";
895 break;
896 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
897 desc = "target data offset error";
898 break;
899 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
900 desc = "target too much write data";
901 break;
902 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
903 desc = "target iu too short";
904 break;
905 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
906 desc = "target ack nak timeout";
907 break;
908 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
909 desc = "target nak received";
910 break;
911
912
913
914
915
916 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
917 desc = "smp request failed";
918 break;
919 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
920 desc = "smp data overrun";
921 break;
922
923
924
925
926
927 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
928 desc = "diagnostic released";
929 break;
930 default:
931 break;
932 }
933
934 if (!desc)
935 return;
936
937 switch (request_hdr->Function) {
938 case MPI2_FUNCTION_CONFIG:
939 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
940 func_str = "config_page";
941 break;
942 case MPI2_FUNCTION_SCSI_TASK_MGMT:
943 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
944 func_str = "task_mgmt";
945 break;
946 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
947 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
948 func_str = "sas_iounit_ctl";
949 break;
950 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
951 frame_sz = sizeof(Mpi2SepRequest_t);
952 func_str = "enclosure";
953 break;
954 case MPI2_FUNCTION_IOC_INIT:
955 frame_sz = sizeof(Mpi2IOCInitRequest_t);
956 func_str = "ioc_init";
957 break;
958 case MPI2_FUNCTION_PORT_ENABLE:
959 frame_sz = sizeof(Mpi2PortEnableRequest_t);
960 func_str = "port_enable";
961 break;
962 case MPI2_FUNCTION_SMP_PASSTHROUGH:
963 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
964 func_str = "smp_passthru";
965 break;
966 case MPI2_FUNCTION_NVME_ENCAPSULATED:
967 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
968 ioc->sge_size;
969 func_str = "nvme_encapsulated";
970 break;
971 default:
972 frame_sz = 32;
973 func_str = "unknown";
974 break;
975 }
976
977 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
978 desc, ioc_status, request_hdr, func_str);
979
980 _debug_dump_mf(request_hdr, frame_sz/4);
981}
982
983
984
985
986
987
988static void
989_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
990 Mpi2EventNotificationReply_t *mpi_reply)
991{
992 char *desc = NULL;
993 u16 event;
994
995 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
996 return;
997
998 event = le16_to_cpu(mpi_reply->Event);
999
1000 switch (event) {
1001 case MPI2_EVENT_LOG_DATA:
1002 desc = "Log Data";
1003 break;
1004 case MPI2_EVENT_STATE_CHANGE:
1005 desc = "Status Change";
1006 break;
1007 case MPI2_EVENT_HARD_RESET_RECEIVED:
1008 desc = "Hard Reset Received";
1009 break;
1010 case MPI2_EVENT_EVENT_CHANGE:
1011 desc = "Event Change";
1012 break;
1013 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1014 desc = "Device Status Change";
1015 break;
1016 case MPI2_EVENT_IR_OPERATION_STATUS:
1017 if (!ioc->hide_ir_msg)
1018 desc = "IR Operation Status";
1019 break;
1020 case MPI2_EVENT_SAS_DISCOVERY:
1021 {
1022 Mpi2EventDataSasDiscovery_t *event_data =
1023 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1024 ioc_info(ioc, "Discovery: (%s)",
1025 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1026 "start" : "stop");
1027 if (event_data->DiscoveryStatus)
1028 pr_cont(" discovery_status(0x%08x)",
1029 le32_to_cpu(event_data->DiscoveryStatus));
1030 pr_cont("\n");
1031 return;
1032 }
1033 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1034 desc = "SAS Broadcast Primitive";
1035 break;
1036 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1037 desc = "SAS Init Device Status Change";
1038 break;
1039 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1040 desc = "SAS Init Table Overflow";
1041 break;
1042 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1043 desc = "SAS Topology Change List";
1044 break;
1045 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1046 desc = "SAS Enclosure Device Status Change";
1047 break;
1048 case MPI2_EVENT_IR_VOLUME:
1049 if (!ioc->hide_ir_msg)
1050 desc = "IR Volume";
1051 break;
1052 case MPI2_EVENT_IR_PHYSICAL_DISK:
1053 if (!ioc->hide_ir_msg)
1054 desc = "IR Physical Disk";
1055 break;
1056 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1057 if (!ioc->hide_ir_msg)
1058 desc = "IR Configuration Change List";
1059 break;
1060 case MPI2_EVENT_LOG_ENTRY_ADDED:
1061 if (!ioc->hide_ir_msg)
1062 desc = "Log Entry Added";
1063 break;
1064 case MPI2_EVENT_TEMP_THRESHOLD:
1065 desc = "Temperature Threshold";
1066 break;
1067 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1068 desc = "Cable Event";
1069 break;
1070 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1071 desc = "SAS Device Discovery Error";
1072 break;
1073 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1074 desc = "PCIE Device Status Change";
1075 break;
1076 case MPI2_EVENT_PCIE_ENUMERATION:
1077 {
1078 Mpi26EventDataPCIeEnumeration_t *event_data =
1079 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1080 ioc_info(ioc, "PCIE Enumeration: (%s)",
1081 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1082 "start" : "stop");
1083 if (event_data->EnumerationStatus)
1084 pr_cont("enumeration_status(0x%08x)",
1085 le32_to_cpu(event_data->EnumerationStatus));
1086 pr_cont("\n");
1087 return;
1088 }
1089 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1090 desc = "PCIE Topology Change List";
1091 break;
1092 }
1093
1094 if (!desc)
1095 return;
1096
1097 ioc_info(ioc, "%s\n", desc);
1098}
1099
1100
1101
1102
1103
1104
1105static void
1106_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1107{
1108 union loginfo_type {
1109 u32 loginfo;
1110 struct {
1111 u32 subcode:16;
1112 u32 code:8;
1113 u32 originator:4;
1114 u32 bus_type:4;
1115 } dw;
1116 };
1117 union loginfo_type sas_loginfo;
1118 char *originator_str = NULL;
1119
1120 sas_loginfo.loginfo = log_info;
1121 if (sas_loginfo.dw.bus_type != 3 )
1122 return;
1123
1124
1125 if (log_info == 0x31170000)
1126 return;
1127
1128
1129 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1130 0x31140000 || log_info == 0x31130000))
1131 return;
1132
1133 switch (sas_loginfo.dw.originator) {
1134 case 0:
1135 originator_str = "IOP";
1136 break;
1137 case 1:
1138 originator_str = "PL";
1139 break;
1140 case 2:
1141 if (!ioc->hide_ir_msg)
1142 originator_str = "IR";
1143 else
1144 originator_str = "WarpDrive";
1145 break;
1146 }
1147
1148 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1149 log_info,
1150 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160static void
1161_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1162 u32 reply)
1163{
1164 MPI2DefaultReply_t *mpi_reply;
1165 u16 ioc_status;
1166 u32 loginfo = 0;
1167
1168 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1169 if (unlikely(!mpi_reply)) {
1170 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1171 __FILE__, __LINE__, __func__);
1172 return;
1173 }
1174 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1175
1176 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1177 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1178 _base_sas_ioc_info(ioc , mpi_reply,
1179 mpt3sas_base_get_msg_frame(ioc, smid));
1180 }
1181
1182 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1183 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1184 _base_sas_log_info(ioc, loginfo);
1185 }
1186
1187 if (ioc_status || loginfo) {
1188 ioc_status &= MPI2_IOCSTATUS_MASK;
1189 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1190 }
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204u8
1205mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1206 u32 reply)
1207{
1208 MPI2DefaultReply_t *mpi_reply;
1209
1210 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1211 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1212 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1213
1214 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1215 return 1;
1216
1217 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1218 if (mpi_reply) {
1219 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1220 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1221 }
1222 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1223
1224 complete(&ioc->base_cmds.done);
1225 return 1;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static u8
1239_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1240{
1241 Mpi2EventNotificationReply_t *mpi_reply;
1242 Mpi2EventAckRequest_t *ack_request;
1243 u16 smid;
1244 struct _event_ack_list *delayed_event_ack;
1245
1246 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1247 if (!mpi_reply)
1248 return 1;
1249 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1250 return 1;
1251
1252 _base_display_event_data(ioc, mpi_reply);
1253
1254 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1255 goto out;
1256 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1257 if (!smid) {
1258 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1259 GFP_ATOMIC);
1260 if (!delayed_event_ack)
1261 goto out;
1262 INIT_LIST_HEAD(&delayed_event_ack->list);
1263 delayed_event_ack->Event = mpi_reply->Event;
1264 delayed_event_ack->EventContext = mpi_reply->EventContext;
1265 list_add_tail(&delayed_event_ack->list,
1266 &ioc->delayed_event_ack_list);
1267 dewtprintk(ioc,
1268 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1269 le16_to_cpu(mpi_reply->Event)));
1270 goto out;
1271 }
1272
1273 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1274 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1275 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1276 ack_request->Event = mpi_reply->Event;
1277 ack_request->EventContext = mpi_reply->EventContext;
1278 ack_request->VF_ID = 0;
1279 ack_request->VP_ID = 0;
1280 mpt3sas_base_put_smid_default(ioc, smid);
1281
1282 out:
1283
1284
1285 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1286
1287
1288 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1289
1290 return 1;
1291}
1292
1293static struct scsiio_tracker *
1294_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1295{
1296 struct scsi_cmnd *cmd;
1297
1298 if (WARN_ON(!smid) ||
1299 WARN_ON(smid >= ioc->hi_priority_smid))
1300 return NULL;
1301
1302 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1303 if (cmd)
1304 return scsi_cmd_priv(cmd);
1305
1306 return NULL;
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316static u8
1317_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1318{
1319 int i;
1320 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1321 u8 cb_idx = 0xFF;
1322
1323 if (smid < ioc->hi_priority_smid) {
1324 struct scsiio_tracker *st;
1325
1326 if (smid < ctl_smid) {
1327 st = _get_st_from_smid(ioc, smid);
1328 if (st)
1329 cb_idx = st->cb_idx;
1330 } else if (smid == ctl_smid)
1331 cb_idx = ioc->ctl_cb_idx;
1332 } else if (smid < ioc->internal_smid) {
1333 i = smid - ioc->hi_priority_smid;
1334 cb_idx = ioc->hpr_lookup[i].cb_idx;
1335 } else if (smid <= ioc->hba_queue_depth) {
1336 i = smid - ioc->internal_smid;
1337 cb_idx = ioc->internal_lookup[i].cb_idx;
1338 }
1339 return cb_idx;
1340}
1341
1342
1343
1344
1345
1346
1347
1348static void
1349_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1350{
1351 u32 him_register;
1352
1353 ioc->mask_interrupts = 1;
1354 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1355 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1356 writel(him_register, &ioc->chip->HostInterruptMask);
1357 ioc->base_readl(&ioc->chip->HostInterruptMask);
1358}
1359
1360
1361
1362
1363
1364
1365
1366static void
1367_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1368{
1369 u32 him_register;
1370
1371 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1372 him_register &= ~MPI2_HIM_RIM;
1373 writel(him_register, &ioc->chip->HostInterruptMask);
1374 ioc->mask_interrupts = 0;
1375}
1376
1377union reply_descriptor {
1378 u64 word;
1379 struct {
1380 u32 low;
1381 u32 high;
1382 } u;
1383};
1384
1385
1386
1387
1388
1389
1390
1391
1392static irqreturn_t
1393_base_interrupt(int irq, void *bus_id)
1394{
1395 struct adapter_reply_queue *reply_q = bus_id;
1396 union reply_descriptor rd;
1397 u32 completed_cmds;
1398 u8 request_desript_type;
1399 u16 smid;
1400 u8 cb_idx;
1401 u32 reply;
1402 u8 msix_index = reply_q->msix_index;
1403 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1404 Mpi2ReplyDescriptorsUnion_t *rpf;
1405 u8 rc;
1406
1407 if (ioc->mask_interrupts)
1408 return IRQ_NONE;
1409
1410 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1411 return IRQ_NONE;
1412
1413 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1414 request_desript_type = rpf->Default.ReplyFlags
1415 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1416 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1417 atomic_dec(&reply_q->busy);
1418 return IRQ_NONE;
1419 }
1420
1421 completed_cmds = 0;
1422 cb_idx = 0xFF;
1423 do {
1424 rd.word = le64_to_cpu(rpf->Words);
1425 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1426 goto out;
1427 reply = 0;
1428 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1429 if (request_desript_type ==
1430 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1431 request_desript_type ==
1432 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1433 request_desript_type ==
1434 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1435 cb_idx = _base_get_cb_idx(ioc, smid);
1436 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1437 (likely(mpt_callbacks[cb_idx] != NULL))) {
1438 rc = mpt_callbacks[cb_idx](ioc, smid,
1439 msix_index, 0);
1440 if (rc)
1441 mpt3sas_base_free_smid(ioc, smid);
1442 }
1443 } else if (request_desript_type ==
1444 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1445 reply = le32_to_cpu(
1446 rpf->AddressReply.ReplyFrameAddress);
1447 if (reply > ioc->reply_dma_max_address ||
1448 reply < ioc->reply_dma_min_address)
1449 reply = 0;
1450 if (smid) {
1451 cb_idx = _base_get_cb_idx(ioc, smid);
1452 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1453 (likely(mpt_callbacks[cb_idx] != NULL))) {
1454 rc = mpt_callbacks[cb_idx](ioc, smid,
1455 msix_index, reply);
1456 if (reply)
1457 _base_display_reply_info(ioc,
1458 smid, msix_index, reply);
1459 if (rc)
1460 mpt3sas_base_free_smid(ioc,
1461 smid);
1462 }
1463 } else {
1464 _base_async_event(ioc, msix_index, reply);
1465 }
1466
1467
1468 if (reply) {
1469 ioc->reply_free_host_index =
1470 (ioc->reply_free_host_index ==
1471 (ioc->reply_free_queue_depth - 1)) ?
1472 0 : ioc->reply_free_host_index + 1;
1473 ioc->reply_free[ioc->reply_free_host_index] =
1474 cpu_to_le32(reply);
1475 if (ioc->is_mcpu_endpoint)
1476 _base_clone_reply_to_sys_mem(ioc,
1477 reply,
1478 ioc->reply_free_host_index);
1479 writel(ioc->reply_free_host_index,
1480 &ioc->chip->ReplyFreeHostIndex);
1481 }
1482 }
1483
1484 rpf->Words = cpu_to_le64(ULLONG_MAX);
1485 reply_q->reply_post_host_index =
1486 (reply_q->reply_post_host_index ==
1487 (ioc->reply_post_queue_depth - 1)) ? 0 :
1488 reply_q->reply_post_host_index + 1;
1489 request_desript_type =
1490 reply_q->reply_post_free[reply_q->reply_post_host_index].
1491 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1492 completed_cmds++;
1493
1494
1495
1496
1497
1498 if (completed_cmds > ioc->hba_queue_depth/3) {
1499 if (ioc->combined_reply_queue) {
1500 writel(reply_q->reply_post_host_index |
1501 ((msix_index & 7) <<
1502 MPI2_RPHI_MSIX_INDEX_SHIFT),
1503 ioc->replyPostRegisterIndex[msix_index/8]);
1504 } else {
1505 writel(reply_q->reply_post_host_index |
1506 (msix_index <<
1507 MPI2_RPHI_MSIX_INDEX_SHIFT),
1508 &ioc->chip->ReplyPostHostIndex);
1509 }
1510 completed_cmds = 1;
1511 }
1512 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1513 goto out;
1514 if (!reply_q->reply_post_host_index)
1515 rpf = reply_q->reply_post_free;
1516 else
1517 rpf++;
1518 } while (1);
1519
1520 out:
1521
1522 if (!completed_cmds) {
1523 atomic_dec(&reply_q->busy);
1524 return IRQ_NONE;
1525 }
1526
1527 if (ioc->is_warpdrive) {
1528 writel(reply_q->reply_post_host_index,
1529 ioc->reply_post_host_index[msix_index]);
1530 atomic_dec(&reply_q->busy);
1531 return IRQ_HANDLED;
1532 }
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 if (ioc->combined_reply_queue)
1550 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1551 MPI2_RPHI_MSIX_INDEX_SHIFT),
1552 ioc->replyPostRegisterIndex[msix_index/8]);
1553 else
1554 writel(reply_q->reply_post_host_index | (msix_index <<
1555 MPI2_RPHI_MSIX_INDEX_SHIFT),
1556 &ioc->chip->ReplyPostHostIndex);
1557 atomic_dec(&reply_q->busy);
1558 return IRQ_HANDLED;
1559}
1560
1561
1562
1563
1564
1565
1566
1567static inline int
1568_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1569{
1570 return (ioc->facts.IOCCapabilities &
1571 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581void
1582mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
1583{
1584 struct adapter_reply_queue *reply_q;
1585
1586
1587
1588
1589 if (!_base_is_controller_msix_enabled(ioc))
1590 return;
1591
1592 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1593 if (ioc->shost_recovery || ioc->remove_host ||
1594 ioc->pci_error_recovery)
1595 return;
1596
1597 if (reply_q->msix_index == 0)
1598 continue;
1599 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1600 }
1601}
1602
1603
1604
1605
1606
1607void
1608mpt3sas_base_release_callback_handler(u8 cb_idx)
1609{
1610 mpt_callbacks[cb_idx] = NULL;
1611}
1612
1613
1614
1615
1616
1617
1618
1619u8
1620mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1621{
1622 u8 cb_idx;
1623
1624 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1625 if (mpt_callbacks[cb_idx] == NULL)
1626 break;
1627
1628 mpt_callbacks[cb_idx] = cb_func;
1629 return cb_idx;
1630}
1631
1632
1633
1634
1635void
1636mpt3sas_base_initialize_callback_handler(void)
1637{
1638 u8 cb_idx;
1639
1640 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1641 mpt3sas_base_release_callback_handler(cb_idx);
1642}
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654static void
1655_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1656{
1657 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1658 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1659 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1660 MPI2_SGE_FLAGS_SHIFT);
1661 ioc->base_add_sg_single(paddr, flags_length, -1);
1662}
1663
1664
1665
1666
1667
1668
1669
1670static void
1671_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1672{
1673 Mpi2SGESimple32_t *sgel = paddr;
1674
1675 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1676 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1677 sgel->FlagsLength = cpu_to_le32(flags_length);
1678 sgel->Address = cpu_to_le32(dma_addr);
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688static void
1689_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1690{
1691 Mpi2SGESimple64_t *sgel = paddr;
1692
1693 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1694 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1695 sgel->FlagsLength = cpu_to_le32(flags_length);
1696 sgel->Address = cpu_to_le64(dma_addr);
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707static struct chain_tracker *
1708_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1709 struct scsi_cmnd *scmd)
1710{
1711 struct chain_tracker *chain_req;
1712 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1713 u16 smid = st->smid;
1714 u8 chain_offset =
1715 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1716
1717 if (chain_offset == ioc->chains_needed_per_io)
1718 return NULL;
1719
1720 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1721 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1722 return chain_req;
1723}
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735static void
1736_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1737 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1738 size_t data_in_sz)
1739{
1740 u32 sgl_flags;
1741
1742 if (!data_out_sz && !data_in_sz) {
1743 _base_build_zero_len_sge(ioc, psge);
1744 return;
1745 }
1746
1747 if (data_out_sz && data_in_sz) {
1748
1749 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1750 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1751 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1752 ioc->base_add_sg_single(psge, sgl_flags |
1753 data_out_sz, data_out_dma);
1754
1755
1756 psge += ioc->sge_size;
1757
1758
1759 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1760 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1761 MPI2_SGE_FLAGS_END_OF_LIST);
1762 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1763 ioc->base_add_sg_single(psge, sgl_flags |
1764 data_in_sz, data_in_dma);
1765 } else if (data_out_sz) {
1766 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1767 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1768 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1769 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1770 ioc->base_add_sg_single(psge, sgl_flags |
1771 data_out_sz, data_out_dma);
1772 } else if (data_in_sz) {
1773 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1774 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1775 MPI2_SGE_FLAGS_END_OF_LIST);
1776 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1777 ioc->base_add_sg_single(psge, sgl_flags |
1778 data_in_sz, data_in_dma);
1779 }
1780}
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838static void
1839_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1840 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
1841 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1842 size_t data_in_sz)
1843{
1844 int prp_size = NVME_PRP_SIZE;
1845 __le64 *prp_entry, *prp1_entry, *prp2_entry;
1846 __le64 *prp_page;
1847 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
1848 u32 offset, entry_len;
1849 u32 page_mask_result, page_mask;
1850 size_t length;
1851 struct mpt3sas_nvme_cmd *nvme_cmd =
1852 (void *)nvme_encap_request->NVMe_Command;
1853
1854
1855
1856
1857
1858 if (!data_in_sz && !data_out_sz)
1859 return;
1860 prp1_entry = &nvme_cmd->prp1;
1861 prp2_entry = &nvme_cmd->prp2;
1862 prp_entry = prp1_entry;
1863
1864
1865
1866
1867 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
1868 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
1869
1870
1871
1872
1873
1874 page_mask = ioc->page_size - 1;
1875 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
1876 if (!page_mask_result) {
1877
1878 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
1879 prp_page_dma = prp_page_dma + prp_size;
1880 }
1881
1882
1883
1884
1885
1886 prp_entry_dma = prp_page_dma;
1887
1888
1889 if (data_in_sz) {
1890 dma_addr = data_in_dma;
1891 length = data_in_sz;
1892 } else {
1893 dma_addr = data_out_dma;
1894 length = data_out_sz;
1895 }
1896
1897
1898 while (length) {
1899
1900
1901
1902
1903 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
1904 if (!page_mask_result) {
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917 prp_entry_dma++;
1918 *prp_entry = cpu_to_le64(prp_entry_dma);
1919 prp_entry++;
1920 }
1921
1922
1923 offset = dma_addr & page_mask;
1924 entry_len = ioc->page_size - offset;
1925
1926 if (prp_entry == prp1_entry) {
1927
1928
1929
1930
1931 *prp1_entry = cpu_to_le64(dma_addr);
1932
1933
1934
1935
1936
1937 prp_entry = prp2_entry;
1938 } else if (prp_entry == prp2_entry) {
1939
1940
1941
1942
1943
1944 if (length > ioc->page_size) {
1945
1946
1947
1948
1949
1950
1951 *prp2_entry = cpu_to_le64(prp_entry_dma);
1952
1953
1954
1955
1956
1957 prp_entry = prp_page;
1958 } else {
1959
1960
1961
1962
1963 *prp2_entry = cpu_to_le64(dma_addr);
1964 }
1965 } else {
1966
1967
1968
1969
1970
1971
1972
1973 *prp_entry = cpu_to_le64(dma_addr);
1974 prp_entry++;
1975 prp_entry_dma++;
1976 }
1977
1978
1979
1980
1981
1982 dma_addr += entry_len;
1983
1984
1985 if (entry_len > length)
1986 length = 0;
1987 else
1988 length -= entry_len;
1989 }
1990}
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005static void
2006base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2007 struct scsi_cmnd *scmd,
2008 Mpi25SCSIIORequest_t *mpi_request,
2009 u16 smid, int sge_count)
2010{
2011 int sge_len, num_prp_in_chain = 0;
2012 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2013 __le64 *curr_buff;
2014 dma_addr_t msg_dma, sge_addr, offset;
2015 u32 page_mask, page_mask_result;
2016 struct scatterlist *sg_scmd;
2017 u32 first_prp_len;
2018 int data_len = scsi_bufflen(scmd);
2019 u32 nvme_pg_size;
2020
2021 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034 page_mask = nvme_pg_size - 1;
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2047
2048
2049
2050
2051 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2052 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2053
2054
2055
2056
2057
2058
2059
2060 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2061 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2062
2063 main_chain_element->Address = cpu_to_le64(msg_dma);
2064 main_chain_element->NextChainOffset = 0;
2065 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2066 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2067 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2068
2069
2070 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2071 sg_scmd = scsi_sglist(scmd);
2072 sge_addr = sg_dma_address(sg_scmd);
2073 sge_len = sg_dma_len(sg_scmd);
2074
2075 offset = sge_addr & page_mask;
2076 first_prp_len = nvme_pg_size - offset;
2077
2078 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2079 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2080
2081 data_len -= first_prp_len;
2082
2083 if (sge_len > first_prp_len) {
2084 sge_addr += first_prp_len;
2085 sge_len -= first_prp_len;
2086 } else if (data_len && (sge_len == first_prp_len)) {
2087 sg_scmd = sg_next(sg_scmd);
2088 sge_addr = sg_dma_address(sg_scmd);
2089 sge_len = sg_dma_len(sg_scmd);
2090 }
2091
2092 for (;;) {
2093 offset = sge_addr & page_mask;
2094
2095
2096 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2097 if (unlikely(!page_mask_result)) {
2098 scmd_printk(KERN_NOTICE,
2099 scmd, "page boundary curr_buff: 0x%p\n",
2100 curr_buff);
2101 msg_dma += 8;
2102 *curr_buff = cpu_to_le64(msg_dma);
2103 curr_buff++;
2104 num_prp_in_chain++;
2105 }
2106
2107 *curr_buff = cpu_to_le64(sge_addr);
2108 curr_buff++;
2109 msg_dma += 8;
2110 num_prp_in_chain++;
2111
2112 sge_addr += nvme_pg_size;
2113 sge_len -= nvme_pg_size;
2114 data_len -= nvme_pg_size;
2115
2116 if (data_len <= 0)
2117 break;
2118
2119 if (sge_len > 0)
2120 continue;
2121
2122 sg_scmd = sg_next(sg_scmd);
2123 sge_addr = sg_dma_address(sg_scmd);
2124 sge_len = sg_dma_len(sg_scmd);
2125 }
2126
2127 main_chain_element->Length =
2128 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2129 return;
2130}
2131
2132static bool
2133base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2134 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2135{
2136 u32 data_length = 0;
2137 bool build_prp = true;
2138
2139 data_length = scsi_bufflen(scmd);
2140
2141
2142
2143
2144 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2145 build_prp = false;
2146
2147 return build_prp;
2148}
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165static int
2166_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2167 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2168 struct _pcie_device *pcie_device)
2169{
2170 int sges_left;
2171
2172
2173 sges_left = scsi_dma_map(scmd);
2174 if (sges_left < 0) {
2175 sdev_printk(KERN_ERR, scmd->device,
2176 "scsi_dma_map failed: request for %d bytes!\n",
2177 scsi_bufflen(scmd));
2178 return 1;
2179 }
2180
2181
2182 if (base_is_prp_possible(ioc, pcie_device,
2183 scmd, sges_left) == 0) {
2184
2185 goto out;
2186 }
2187
2188
2189
2190
2191 base_make_prp_nvme(ioc, scmd, mpi_request,
2192 smid, sges_left);
2193
2194 return 0;
2195out:
2196 scsi_dma_unmap(scmd);
2197 return 1;
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208static void
2209_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2210 dma_addr_t dma_addr)
2211{
2212 Mpi25IeeeSgeChain64_t *sgel = paddr;
2213
2214 sgel->Flags = flags;
2215 sgel->NextChainOffset = chain_offset;
2216 sgel->Length = cpu_to_le32(length);
2217 sgel->Address = cpu_to_le64(dma_addr);
2218}
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229static void
2230_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2231{
2232 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2233 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2234 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2235
2236 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2237}
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253static int
2254_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2255 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2256{
2257 Mpi2SCSIIORequest_t *mpi_request;
2258 dma_addr_t chain_dma;
2259 struct scatterlist *sg_scmd;
2260 void *sg_local, *chain;
2261 u32 chain_offset;
2262 u32 chain_length;
2263 u32 chain_flags;
2264 int sges_left;
2265 u32 sges_in_segment;
2266 u32 sgl_flags;
2267 u32 sgl_flags_last_element;
2268 u32 sgl_flags_end_buffer;
2269 struct chain_tracker *chain_req;
2270
2271 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2272
2273
2274 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2275 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2276 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2277 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2278 << MPI2_SGE_FLAGS_SHIFT;
2279 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2280 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2281 << MPI2_SGE_FLAGS_SHIFT;
2282 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2283
2284 sg_scmd = scsi_sglist(scmd);
2285 sges_left = scsi_dma_map(scmd);
2286 if (sges_left < 0) {
2287 sdev_printk(KERN_ERR, scmd->device,
2288 "pci_map_sg failed: request for %d bytes!\n",
2289 scsi_bufflen(scmd));
2290 return -ENOMEM;
2291 }
2292
2293 sg_local = &mpi_request->SGL;
2294 sges_in_segment = ioc->max_sges_in_main_message;
2295 if (sges_left <= sges_in_segment)
2296 goto fill_in_last_segment;
2297
2298 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2299 (sges_in_segment * ioc->sge_size))/4;
2300
2301
2302 while (sges_in_segment) {
2303 if (sges_in_segment == 1)
2304 ioc->base_add_sg_single(sg_local,
2305 sgl_flags_last_element | sg_dma_len(sg_scmd),
2306 sg_dma_address(sg_scmd));
2307 else
2308 ioc->base_add_sg_single(sg_local, sgl_flags |
2309 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2310 sg_scmd = sg_next(sg_scmd);
2311 sg_local += ioc->sge_size;
2312 sges_left--;
2313 sges_in_segment--;
2314 }
2315
2316
2317 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2318 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2319 if (!chain_req)
2320 return -1;
2321 chain = chain_req->chain_buffer;
2322 chain_dma = chain_req->chain_buffer_dma;
2323 do {
2324 sges_in_segment = (sges_left <=
2325 ioc->max_sges_in_chain_message) ? sges_left :
2326 ioc->max_sges_in_chain_message;
2327 chain_offset = (sges_left == sges_in_segment) ?
2328 0 : (sges_in_segment * ioc->sge_size)/4;
2329 chain_length = sges_in_segment * ioc->sge_size;
2330 if (chain_offset) {
2331 chain_offset = chain_offset <<
2332 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2333 chain_length += ioc->sge_size;
2334 }
2335 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2336 chain_length, chain_dma);
2337 sg_local = chain;
2338 if (!chain_offset)
2339 goto fill_in_last_segment;
2340
2341
2342 while (sges_in_segment) {
2343 if (sges_in_segment == 1)
2344 ioc->base_add_sg_single(sg_local,
2345 sgl_flags_last_element |
2346 sg_dma_len(sg_scmd),
2347 sg_dma_address(sg_scmd));
2348 else
2349 ioc->base_add_sg_single(sg_local, sgl_flags |
2350 sg_dma_len(sg_scmd),
2351 sg_dma_address(sg_scmd));
2352 sg_scmd = sg_next(sg_scmd);
2353 sg_local += ioc->sge_size;
2354 sges_left--;
2355 sges_in_segment--;
2356 }
2357
2358 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2359 if (!chain_req)
2360 return -1;
2361 chain = chain_req->chain_buffer;
2362 chain_dma = chain_req->chain_buffer_dma;
2363 } while (1);
2364
2365
2366 fill_in_last_segment:
2367
2368
2369 while (sges_left) {
2370 if (sges_left == 1)
2371 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2372 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2373 else
2374 ioc->base_add_sg_single(sg_local, sgl_flags |
2375 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2376 sg_scmd = sg_next(sg_scmd);
2377 sg_local += ioc->sge_size;
2378 sges_left--;
2379 }
2380
2381 return 0;
2382}
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398static int
2399_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2400 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2401{
2402 Mpi25SCSIIORequest_t *mpi_request;
2403 dma_addr_t chain_dma;
2404 struct scatterlist *sg_scmd;
2405 void *sg_local, *chain;
2406 u32 chain_offset;
2407 u32 chain_length;
2408 int sges_left;
2409 u32 sges_in_segment;
2410 u8 simple_sgl_flags;
2411 u8 simple_sgl_flags_last;
2412 u8 chain_sgl_flags;
2413 struct chain_tracker *chain_req;
2414
2415 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2416
2417
2418 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2419 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2420 simple_sgl_flags_last = simple_sgl_flags |
2421 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2422 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2423 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2424
2425
2426 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2427 smid, scmd, pcie_device) == 0)) {
2428
2429 return 0;
2430 }
2431
2432 sg_scmd = scsi_sglist(scmd);
2433 sges_left = scsi_dma_map(scmd);
2434 if (sges_left < 0) {
2435 sdev_printk(KERN_ERR, scmd->device,
2436 "pci_map_sg failed: request for %d bytes!\n",
2437 scsi_bufflen(scmd));
2438 return -ENOMEM;
2439 }
2440
2441 sg_local = &mpi_request->SGL;
2442 sges_in_segment = (ioc->request_sz -
2443 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2444 if (sges_left <= sges_in_segment)
2445 goto fill_in_last_segment;
2446
2447 mpi_request->ChainOffset = (sges_in_segment - 1 ) +
2448 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2449
2450
2451 while (sges_in_segment > 1) {
2452 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2453 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2454 sg_scmd = sg_next(sg_scmd);
2455 sg_local += ioc->sge_size_ieee;
2456 sges_left--;
2457 sges_in_segment--;
2458 }
2459
2460
2461 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2462 if (!chain_req)
2463 return -1;
2464 chain = chain_req->chain_buffer;
2465 chain_dma = chain_req->chain_buffer_dma;
2466 do {
2467 sges_in_segment = (sges_left <=
2468 ioc->max_sges_in_chain_message) ? sges_left :
2469 ioc->max_sges_in_chain_message;
2470 chain_offset = (sges_left == sges_in_segment) ?
2471 0 : sges_in_segment;
2472 chain_length = sges_in_segment * ioc->sge_size_ieee;
2473 if (chain_offset)
2474 chain_length += ioc->sge_size_ieee;
2475 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2476 chain_offset, chain_length, chain_dma);
2477
2478 sg_local = chain;
2479 if (!chain_offset)
2480 goto fill_in_last_segment;
2481
2482
2483 while (sges_in_segment) {
2484 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2485 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2486 sg_scmd = sg_next(sg_scmd);
2487 sg_local += ioc->sge_size_ieee;
2488 sges_left--;
2489 sges_in_segment--;
2490 }
2491
2492 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2493 if (!chain_req)
2494 return -1;
2495 chain = chain_req->chain_buffer;
2496 chain_dma = chain_req->chain_buffer_dma;
2497 } while (1);
2498
2499
2500 fill_in_last_segment:
2501
2502
2503 while (sges_left > 0) {
2504 if (sges_left == 1)
2505 _base_add_sg_single_ieee(sg_local,
2506 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2507 sg_dma_address(sg_scmd));
2508 else
2509 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2510 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2511 sg_scmd = sg_next(sg_scmd);
2512 sg_local += ioc->sge_size_ieee;
2513 sges_left--;
2514 }
2515
2516 return 0;
2517}
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528static void
2529_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2530 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2531 size_t data_in_sz)
2532{
2533 u8 sgl_flags;
2534
2535 if (!data_out_sz && !data_in_sz) {
2536 _base_build_zero_len_sge_ieee(ioc, psge);
2537 return;
2538 }
2539
2540 if (data_out_sz && data_in_sz) {
2541
2542 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2543 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2544 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2545 data_out_dma);
2546
2547
2548 psge += ioc->sge_size_ieee;
2549
2550
2551 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2552 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2553 data_in_dma);
2554 } else if (data_out_sz) {
2555 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2556 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2557 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2558 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2559 data_out_dma);
2560 } else if (data_in_sz) {
2561 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2562 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2563 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2564 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2565 data_in_dma);
2566 }
2567}
2568
2569#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2570
2571
2572
2573
2574
2575
2576
2577
2578static int
2579_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2580{
2581 struct sysinfo s;
2582 u64 consistent_dma_mask;
2583
2584 if (ioc->is_mcpu_endpoint)
2585 goto try_32bit;
2586
2587 if (ioc->dma_mask)
2588 consistent_dma_mask = DMA_BIT_MASK(64);
2589 else
2590 consistent_dma_mask = DMA_BIT_MASK(32);
2591
2592 if (sizeof(dma_addr_t) > 4) {
2593 const uint64_t required_mask =
2594 dma_get_required_mask(&pdev->dev);
2595 if ((required_mask > DMA_BIT_MASK(32)) &&
2596 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
2597 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
2598 ioc->base_add_sg_single = &_base_add_sg_single_64;
2599 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2600 ioc->dma_mask = 64;
2601 goto out;
2602 }
2603 }
2604
2605 try_32bit:
2606 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2607 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2608 ioc->base_add_sg_single = &_base_add_sg_single_32;
2609 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2610 ioc->dma_mask = 32;
2611 } else
2612 return -ENODEV;
2613
2614 out:
2615 si_meminfo(&s);
2616 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2617 ioc->dma_mask, convert_to_kb(s.totalram));
2618
2619 return 0;
2620}
2621
2622static int
2623_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
2624 struct pci_dev *pdev)
2625{
2626 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2627 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2628 return -ENODEV;
2629 }
2630 return 0;
2631}
2632
2633
2634
2635
2636
2637
2638
2639
2640static int
2641_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2642{
2643 int base;
2644 u16 message_control;
2645
2646
2647
2648
2649 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2650 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2651 return -EINVAL;
2652 }
2653
2654 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2655 if (!base) {
2656 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2657 return -EINVAL;
2658 }
2659
2660
2661
2662 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2663 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2664 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2665 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2666 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2667 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2668 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2669 ioc->msix_vector_count = 1;
2670 else {
2671 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2672 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2673 }
2674 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2675 ioc->msix_vector_count));
2676 return 0;
2677}
2678
2679
2680
2681
2682
2683
2684
2685static void
2686_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2687{
2688 struct adapter_reply_queue *reply_q, *next;
2689
2690 if (list_empty(&ioc->reply_queue_list))
2691 return;
2692
2693 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2694 list_del(&reply_q->list);
2695 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2696 reply_q);
2697 kfree(reply_q);
2698 }
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708static int
2709_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2710{
2711 struct pci_dev *pdev = ioc->pdev;
2712 struct adapter_reply_queue *reply_q;
2713 int r;
2714
2715 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2716 if (!reply_q) {
2717 ioc_err(ioc, "unable to allocate memory %zu!\n",
2718 sizeof(struct adapter_reply_queue));
2719 return -ENOMEM;
2720 }
2721 reply_q->ioc = ioc;
2722 reply_q->msix_index = index;
2723
2724 atomic_set(&reply_q->busy, 0);
2725 if (ioc->msix_enable)
2726 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2727 ioc->driver_name, ioc->id, index);
2728 else
2729 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2730 ioc->driver_name, ioc->id);
2731 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2732 IRQF_SHARED, reply_q->name, reply_q);
2733 if (r) {
2734 pr_err("%s: unable to allocate interrupt %d!\n",
2735 reply_q->name, pci_irq_vector(pdev, index));
2736 kfree(reply_q);
2737 return -EBUSY;
2738 }
2739
2740 INIT_LIST_HEAD(&reply_q->list);
2741 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2742 return 0;
2743}
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754static void
2755_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2756{
2757 unsigned int cpu, nr_cpus, nr_msix, index = 0;
2758 struct adapter_reply_queue *reply_q;
2759
2760 if (!_base_is_controller_msix_enabled(ioc))
2761 return;
2762
2763 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2764
2765 nr_cpus = num_online_cpus();
2766 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2767 ioc->facts.MaxMSIxVectors);
2768 if (!nr_msix)
2769 return;
2770
2771 if (smp_affinity_enable) {
2772 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2773 const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
2774 reply_q->msix_index);
2775 if (!mask) {
2776 ioc_warn(ioc, "no affinity for msi %x\n",
2777 reply_q->msix_index);
2778 continue;
2779 }
2780
2781 for_each_cpu_and(cpu, mask, cpu_online_mask) {
2782 if (cpu >= ioc->cpu_msix_table_sz)
2783 break;
2784 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2785 }
2786 }
2787 return;
2788 }
2789 cpu = cpumask_first(cpu_online_mask);
2790
2791 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2792
2793 unsigned int i, group = nr_cpus / nr_msix;
2794
2795 if (cpu >= nr_cpus)
2796 break;
2797
2798 if (index < nr_cpus % nr_msix)
2799 group++;
2800
2801 for (i = 0 ; i < group ; i++) {
2802 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2803 cpu = cpumask_next(cpu, cpu_online_mask);
2804 }
2805 index++;
2806 }
2807}
2808
2809
2810
2811
2812
2813
2814static void
2815_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
2816{
2817 if (!ioc->msix_enable)
2818 return;
2819 pci_disable_msix(ioc->pdev);
2820 ioc->msix_enable = 0;
2821}
2822
2823
2824
2825
2826
2827
2828static int
2829_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2830{
2831 int r;
2832 int i, local_max_msix_vectors;
2833 u8 try_msix = 0;
2834 unsigned int irq_flags = PCI_IRQ_MSIX;
2835
2836 if (msix_disable == -1 || msix_disable == 0)
2837 try_msix = 1;
2838
2839 if (!try_msix)
2840 goto try_ioapic;
2841
2842 if (_base_check_enable_msix(ioc) != 0)
2843 goto try_ioapic;
2844
2845 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
2846 ioc->msix_vector_count);
2847
2848 ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
2849 ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
2850
2851 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
2852 local_max_msix_vectors = (reset_devices) ? 1 : 8;
2853 else
2854 local_max_msix_vectors = max_msix_vectors;
2855
2856 if (local_max_msix_vectors > 0)
2857 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
2858 ioc->reply_queue_count);
2859 else if (local_max_msix_vectors == 0)
2860 goto try_ioapic;
2861
2862 if (ioc->msix_vector_count < ioc->cpu_count)
2863 smp_affinity_enable = 0;
2864
2865 if (smp_affinity_enable)
2866 irq_flags |= PCI_IRQ_AFFINITY;
2867
2868 r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
2869 irq_flags);
2870 if (r < 0) {
2871 dfailprintk(ioc,
2872 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
2873 r));
2874 goto try_ioapic;
2875 }
2876
2877 ioc->msix_enable = 1;
2878 ioc->reply_queue_count = r;
2879 for (i = 0; i < ioc->reply_queue_count; i++) {
2880 r = _base_request_irq(ioc, i);
2881 if (r) {
2882 _base_free_irq(ioc);
2883 _base_disable_msix(ioc);
2884 goto try_ioapic;
2885 }
2886 }
2887
2888 return 0;
2889
2890
2891 try_ioapic:
2892
2893 ioc->reply_queue_count = 1;
2894 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
2895 if (r < 0) {
2896 dfailprintk(ioc,
2897 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
2898 r));
2899 } else
2900 r = _base_request_irq(ioc, 0);
2901
2902 return r;
2903}
2904
2905
2906
2907
2908
2909static void
2910mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2911{
2912 struct pci_dev *pdev = ioc->pdev;
2913
2914 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
2915
2916 _base_free_irq(ioc);
2917 _base_disable_msix(ioc);
2918
2919 kfree(ioc->replyPostRegisterIndex);
2920 ioc->replyPostRegisterIndex = NULL;
2921
2922
2923 if (ioc->chip_phys) {
2924 iounmap(ioc->chip);
2925 ioc->chip_phys = 0;
2926 }
2927
2928 if (pci_is_enabled(pdev)) {
2929 pci_release_selected_regions(ioc->pdev, ioc->bars);
2930 pci_disable_pcie_error_reporting(pdev);
2931 pci_disable_device(pdev);
2932 }
2933}
2934
2935
2936
2937
2938
2939
2940
2941int
2942mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2943{
2944 struct pci_dev *pdev = ioc->pdev;
2945 u32 memap_sz;
2946 u32 pio_sz;
2947 int i, r = 0;
2948 u64 pio_chip = 0;
2949 phys_addr_t chip_phys = 0;
2950 struct adapter_reply_queue *reply_q;
2951
2952 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
2953
2954 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2955 if (pci_enable_device_mem(pdev)) {
2956 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
2957 ioc->bars = 0;
2958 return -ENODEV;
2959 }
2960
2961
2962 if (pci_request_selected_regions(pdev, ioc->bars,
2963 ioc->driver_name)) {
2964 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
2965 ioc->bars = 0;
2966 r = -ENODEV;
2967 goto out_fail;
2968 }
2969
2970
2971 pci_enable_pcie_error_reporting(pdev);
2972
2973 pci_set_master(pdev);
2974
2975
2976 if (_base_config_dma_addressing(ioc, pdev) != 0) {
2977 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
2978 r = -ENODEV;
2979 goto out_fail;
2980 }
2981
2982 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2983 (!memap_sz || !pio_sz); i++) {
2984 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2985 if (pio_sz)
2986 continue;
2987 pio_chip = (u64)pci_resource_start(pdev, i);
2988 pio_sz = pci_resource_len(pdev, i);
2989 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
2990 if (memap_sz)
2991 continue;
2992 ioc->chip_phys = pci_resource_start(pdev, i);
2993 chip_phys = ioc->chip_phys;
2994 memap_sz = pci_resource_len(pdev, i);
2995 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
2996 }
2997 }
2998
2999 if (ioc->chip == NULL) {
3000 ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
3001 r = -EINVAL;
3002 goto out_fail;
3003 }
3004
3005 _base_mask_interrupts(ioc);
3006
3007 r = _base_get_ioc_facts(ioc);
3008 if (r)
3009 goto out_fail;
3010
3011 if (!ioc->rdpq_array_enable_assigned) {
3012 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3013 ioc->rdpq_array_enable_assigned = 1;
3014 }
3015
3016 r = _base_enable_msix(ioc);
3017 if (r)
3018 goto out_fail;
3019
3020
3021
3022
3023 if (ioc->combined_reply_queue) {
3024
3025
3026
3027
3028
3029
3030 ioc->replyPostRegisterIndex = kcalloc(
3031 ioc->combined_reply_index_count,
3032 sizeof(resource_size_t *), GFP_KERNEL);
3033 if (!ioc->replyPostRegisterIndex) {
3034 dfailprintk(ioc,
3035 ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
3036 r = -ENOMEM;
3037 goto out_fail;
3038 }
3039
3040 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3041 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3042 ((u8 __force *)&ioc->chip->Doorbell +
3043 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3044 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3045 }
3046 }
3047
3048 if (ioc->is_warpdrive) {
3049 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3050 &ioc->chip->ReplyPostHostIndex;
3051
3052 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3053 ioc->reply_post_host_index[i] =
3054 (resource_size_t __iomem *)
3055 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3056 * 4)));
3057 }
3058
3059 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3060 pr_info("%s: %s enabled: IRQ %d\n",
3061 reply_q->name,
3062 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3063 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3064
3065 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3066 &chip_phys, ioc->chip, memap_sz);
3067 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3068 (unsigned long long)pio_chip, pio_sz);
3069
3070
3071 pci_save_state(pdev);
3072 return 0;
3073
3074 out_fail:
3075 mpt3sas_base_unmap_resources(ioc);
3076 return r;
3077}
3078
3079
3080
3081
3082
3083
3084
3085
3086void *
3087mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3088{
3089 return (void *)(ioc->request + (smid * ioc->request_sz));
3090}
3091
3092
3093
3094
3095
3096
3097
3098
3099void *
3100mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3101{
3102 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3103}
3104
3105
3106
3107
3108
3109
3110
3111
3112__le32
3113mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3114{
3115 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3116 SCSI_SENSE_BUFFERSIZE));
3117}
3118
3119
3120
3121
3122
3123
3124
3125
3126void *
3127mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3128{
3129 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3130}
3131
3132
3133
3134
3135
3136
3137
3138
3139dma_addr_t
3140mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3141{
3142 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3143}
3144
3145
3146
3147
3148
3149
3150
3151
3152void *
3153mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3154{
3155 if (!phys_addr)
3156 return NULL;
3157 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3158}
3159
3160static inline u8
3161_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
3162{
3163 return ioc->cpu_msix_table[raw_smp_processor_id()];
3164}
3165
3166
3167
3168
3169
3170
3171
3172
3173u16
3174mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3175{
3176 unsigned long flags;
3177 struct request_tracker *request;
3178 u16 smid;
3179
3180 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3181 if (list_empty(&ioc->internal_free_list)) {
3182 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3183 ioc_err(ioc, "%s: smid not available\n", __func__);
3184 return 0;
3185 }
3186
3187 request = list_entry(ioc->internal_free_list.next,
3188 struct request_tracker, tracker_list);
3189 request->cb_idx = cb_idx;
3190 smid = request->smid;
3191 list_del(&request->tracker_list);
3192 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3193 return smid;
3194}
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204u16
3205mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3206 struct scsi_cmnd *scmd)
3207{
3208 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3209 unsigned int tag = scmd->request->tag;
3210 u16 smid;
3211
3212 smid = tag + 1;
3213 request->cb_idx = cb_idx;
3214 request->msix_io = _base_get_msix_index(ioc);
3215 request->smid = smid;
3216 INIT_LIST_HEAD(&request->chain_list);
3217 return smid;
3218}
3219
3220
3221
3222
3223
3224
3225
3226
3227u16
3228mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3229{
3230 unsigned long flags;
3231 struct request_tracker *request;
3232 u16 smid;
3233
3234 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3235 if (list_empty(&ioc->hpr_free_list)) {
3236 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3237 return 0;
3238 }
3239
3240 request = list_entry(ioc->hpr_free_list.next,
3241 struct request_tracker, tracker_list);
3242 request->cb_idx = cb_idx;
3243 smid = request->smid;
3244 list_del(&request->tracker_list);
3245 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3246 return smid;
3247}
3248
3249static void
3250_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3251{
3252
3253
3254
3255 if (ioc->shost_recovery && ioc->pending_io_count) {
3256 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
3257 if (ioc->pending_io_count == 0)
3258 wake_up(&ioc->reset_wq);
3259 }
3260}
3261
3262void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3263 struct scsiio_tracker *st)
3264{
3265 if (WARN_ON(st->smid == 0))
3266 return;
3267 st->cb_idx = 0xFF;
3268 st->direct_io = 0;
3269 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3270 st->smid = 0;
3271}
3272
3273
3274
3275
3276
3277
3278void
3279mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3280{
3281 unsigned long flags;
3282 int i;
3283
3284 if (smid < ioc->hi_priority_smid) {
3285 struct scsiio_tracker *st;
3286
3287 st = _get_st_from_smid(ioc, smid);
3288 if (!st) {
3289 _base_recovery_check(ioc);
3290 return;
3291 }
3292 mpt3sas_base_clear_st(ioc, st);
3293 _base_recovery_check(ioc);
3294 return;
3295 }
3296
3297 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3298 if (smid < ioc->internal_smid) {
3299
3300 i = smid - ioc->hi_priority_smid;
3301 ioc->hpr_lookup[i].cb_idx = 0xFF;
3302 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3303 } else if (smid <= ioc->hba_queue_depth) {
3304
3305 i = smid - ioc->internal_smid;
3306 ioc->internal_lookup[i].cb_idx = 0xFF;
3307 list_add(&ioc->internal_lookup[i].tracker_list,
3308 &ioc->internal_free_list);
3309 }
3310 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3311}
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323static inline void
3324_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3325 spinlock_t *writeq_lock)
3326{
3327 unsigned long flags;
3328
3329 spin_lock_irqsave(writeq_lock, flags);
3330 __raw_writel((u32)(b), addr);
3331 __raw_writel((u32)(b >> 32), (addr + 4));
3332 mmiowb();
3333 spin_unlock_irqrestore(writeq_lock, flags);
3334}
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346#if defined(writeq) && defined(CONFIG_64BIT)
3347static inline void
3348_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3349{
3350 __raw_writeq(b, addr);
3351 mmiowb();
3352}
3353#else
3354static inline void
3355_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3356{
3357 _base_mpi_ep_writeq(b, addr, writeq_lock);
3358}
3359#endif
3360
3361
3362
3363
3364
3365
3366
3367static void
3368_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3369{
3370 Mpi2RequestDescriptorUnion_t descriptor;
3371 u64 *request = (u64 *)&descriptor;
3372 void *mpi_req_iomem;
3373 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3374
3375 _clone_sg_entries(ioc, (void *) mfp, smid);
3376 mpi_req_iomem = (void __force *)ioc->chip +
3377 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3378 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3379 ioc->request_sz);
3380 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3381 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3382 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3383 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3384 descriptor.SCSIIO.LMID = 0;
3385 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3386 &ioc->scsi_lookup_lock);
3387}
3388
3389
3390
3391
3392
3393
3394
3395static void
3396_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3397{
3398 Mpi2RequestDescriptorUnion_t descriptor;
3399 u64 *request = (u64 *)&descriptor;
3400
3401
3402 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3403 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3404 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3405 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3406 descriptor.SCSIIO.LMID = 0;
3407 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3408 &ioc->scsi_lookup_lock);
3409}
3410
3411
3412
3413
3414
3415
3416
3417void
3418mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3419 u16 handle)
3420{
3421 Mpi2RequestDescriptorUnion_t descriptor;
3422 u64 *request = (u64 *)&descriptor;
3423
3424 descriptor.SCSIIO.RequestFlags =
3425 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3426 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3427 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3428 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3429 descriptor.SCSIIO.LMID = 0;
3430 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3431 &ioc->scsi_lookup_lock);
3432}
3433
3434
3435
3436
3437
3438
3439
3440void
3441mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3442 u16 msix_task)
3443{
3444 Mpi2RequestDescriptorUnion_t descriptor;
3445 void *mpi_req_iomem;
3446 u64 *request;
3447
3448 if (ioc->is_mcpu_endpoint) {
3449 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3450
3451
3452 mpi_req_iomem = (void __force *)ioc->chip
3453 + MPI_FRAME_START_OFFSET
3454 + (smid * ioc->request_sz);
3455 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3456 ioc->request_sz);
3457 }
3458
3459 request = (u64 *)&descriptor;
3460
3461 descriptor.HighPriority.RequestFlags =
3462 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3463 descriptor.HighPriority.MSIxIndex = msix_task;
3464 descriptor.HighPriority.SMID = cpu_to_le16(smid);
3465 descriptor.HighPriority.LMID = 0;
3466 descriptor.HighPriority.Reserved1 = 0;
3467 if (ioc->is_mcpu_endpoint)
3468 _base_mpi_ep_writeq(*request,
3469 &ioc->chip->RequestDescriptorPostLow,
3470 &ioc->scsi_lookup_lock);
3471 else
3472 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3473 &ioc->scsi_lookup_lock);
3474}
3475
3476
3477
3478
3479
3480
3481
3482void
3483mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3484{
3485 Mpi2RequestDescriptorUnion_t descriptor;
3486 u64 *request = (u64 *)&descriptor;
3487
3488 descriptor.Default.RequestFlags =
3489 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3490 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
3491 descriptor.Default.SMID = cpu_to_le16(smid);
3492 descriptor.Default.LMID = 0;
3493 descriptor.Default.DescriptorTypeDependent = 0;
3494 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3495 &ioc->scsi_lookup_lock);
3496}
3497
3498
3499
3500
3501
3502
3503void
3504mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3505{
3506 Mpi2RequestDescriptorUnion_t descriptor;
3507 void *mpi_req_iomem;
3508 u64 *request;
3509
3510 if (ioc->is_mcpu_endpoint) {
3511 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3512
3513 _clone_sg_entries(ioc, (void *) mfp, smid);
3514
3515 mpi_req_iomem = (void __force *)ioc->chip +
3516 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3517 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3518 ioc->request_sz);
3519 }
3520 request = (u64 *)&descriptor;
3521 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3522 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
3523 descriptor.Default.SMID = cpu_to_le16(smid);
3524 descriptor.Default.LMID = 0;
3525 descriptor.Default.DescriptorTypeDependent = 0;
3526 if (ioc->is_mcpu_endpoint)
3527 _base_mpi_ep_writeq(*request,
3528 &ioc->chip->RequestDescriptorPostLow,
3529 &ioc->scsi_lookup_lock);
3530 else
3531 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3532 &ioc->scsi_lookup_lock);
3533}
3534
3535
3536
3537
3538
3539static void
3540_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3541{
3542 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
3543 return;
3544
3545 switch (ioc->pdev->subsystem_vendor) {
3546 case PCI_VENDOR_ID_INTEL:
3547 switch (ioc->pdev->device) {
3548 case MPI2_MFGPAGE_DEVID_SAS2008:
3549 switch (ioc->pdev->subsystem_device) {
3550 case MPT2SAS_INTEL_RMS2LL080_SSDID:
3551 ioc_info(ioc, "%s\n",
3552 MPT2SAS_INTEL_RMS2LL080_BRANDING);
3553 break;
3554 case MPT2SAS_INTEL_RMS2LL040_SSDID:
3555 ioc_info(ioc, "%s\n",
3556 MPT2SAS_INTEL_RMS2LL040_BRANDING);
3557 break;
3558 case MPT2SAS_INTEL_SSD910_SSDID:
3559 ioc_info(ioc, "%s\n",
3560 MPT2SAS_INTEL_SSD910_BRANDING);
3561 break;
3562 default:
3563 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3564 ioc->pdev->subsystem_device);
3565 break;
3566 }
3567 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3568 switch (ioc->pdev->subsystem_device) {
3569 case MPT2SAS_INTEL_RS25GB008_SSDID:
3570 ioc_info(ioc, "%s\n",
3571 MPT2SAS_INTEL_RS25GB008_BRANDING);
3572 break;
3573 case MPT2SAS_INTEL_RMS25JB080_SSDID:
3574 ioc_info(ioc, "%s\n",
3575 MPT2SAS_INTEL_RMS25JB080_BRANDING);
3576 break;
3577 case MPT2SAS_INTEL_RMS25JB040_SSDID:
3578 ioc_info(ioc, "%s\n",
3579 MPT2SAS_INTEL_RMS25JB040_BRANDING);
3580 break;
3581 case MPT2SAS_INTEL_RMS25KB080_SSDID:
3582 ioc_info(ioc, "%s\n",
3583 MPT2SAS_INTEL_RMS25KB080_BRANDING);
3584 break;
3585 case MPT2SAS_INTEL_RMS25KB040_SSDID:
3586 ioc_info(ioc, "%s\n",
3587 MPT2SAS_INTEL_RMS25KB040_BRANDING);
3588 break;
3589 case MPT2SAS_INTEL_RMS25LB040_SSDID:
3590 ioc_info(ioc, "%s\n",
3591 MPT2SAS_INTEL_RMS25LB040_BRANDING);
3592 break;
3593 case MPT2SAS_INTEL_RMS25LB080_SSDID:
3594 ioc_info(ioc, "%s\n",
3595 MPT2SAS_INTEL_RMS25LB080_BRANDING);
3596 break;
3597 default:
3598 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3599 ioc->pdev->subsystem_device);
3600 break;
3601 }
3602 case MPI25_MFGPAGE_DEVID_SAS3008:
3603 switch (ioc->pdev->subsystem_device) {
3604 case MPT3SAS_INTEL_RMS3JC080_SSDID:
3605 ioc_info(ioc, "%s\n",
3606 MPT3SAS_INTEL_RMS3JC080_BRANDING);
3607 break;
3608
3609 case MPT3SAS_INTEL_RS3GC008_SSDID:
3610 ioc_info(ioc, "%s\n",
3611 MPT3SAS_INTEL_RS3GC008_BRANDING);
3612 break;
3613 case MPT3SAS_INTEL_RS3FC044_SSDID:
3614 ioc_info(ioc, "%s\n",
3615 MPT3SAS_INTEL_RS3FC044_BRANDING);
3616 break;
3617 case MPT3SAS_INTEL_RS3UC080_SSDID:
3618 ioc_info(ioc, "%s\n",
3619 MPT3SAS_INTEL_RS3UC080_BRANDING);
3620 break;
3621 default:
3622 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3623 ioc->pdev->subsystem_device);
3624 break;
3625 }
3626 break;
3627 default:
3628 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3629 ioc->pdev->subsystem_device);
3630 break;
3631 }
3632 break;
3633 case PCI_VENDOR_ID_DELL:
3634 switch (ioc->pdev->device) {
3635 case MPI2_MFGPAGE_DEVID_SAS2008:
3636 switch (ioc->pdev->subsystem_device) {
3637 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
3638 ioc_info(ioc, "%s\n",
3639 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
3640 break;
3641 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
3642 ioc_info(ioc, "%s\n",
3643 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
3644 break;
3645 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
3646 ioc_info(ioc, "%s\n",
3647 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
3648 break;
3649 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
3650 ioc_info(ioc, "%s\n",
3651 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
3652 break;
3653 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
3654 ioc_info(ioc, "%s\n",
3655 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
3656 break;
3657 case MPT2SAS_DELL_PERC_H200_SSDID:
3658 ioc_info(ioc, "%s\n",
3659 MPT2SAS_DELL_PERC_H200_BRANDING);
3660 break;
3661 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
3662 ioc_info(ioc, "%s\n",
3663 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
3664 break;
3665 default:
3666 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
3667 ioc->pdev->subsystem_device);
3668 break;
3669 }
3670 break;
3671 case MPI25_MFGPAGE_DEVID_SAS3008:
3672 switch (ioc->pdev->subsystem_device) {
3673 case MPT3SAS_DELL_12G_HBA_SSDID:
3674 ioc_info(ioc, "%s\n",
3675 MPT3SAS_DELL_12G_HBA_BRANDING);
3676 break;
3677 default:
3678 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
3679 ioc->pdev->subsystem_device);
3680 break;
3681 }
3682 break;
3683 default:
3684 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
3685 ioc->pdev->subsystem_device);
3686 break;
3687 }
3688 break;
3689 case PCI_VENDOR_ID_CISCO:
3690 switch (ioc->pdev->device) {
3691 case MPI25_MFGPAGE_DEVID_SAS3008:
3692 switch (ioc->pdev->subsystem_device) {
3693 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
3694 ioc_info(ioc, "%s\n",
3695 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
3696 break;
3697 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
3698 ioc_info(ioc, "%s\n",
3699 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
3700 break;
3701 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3702 ioc_info(ioc, "%s\n",
3703 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3704 break;
3705 default:
3706 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3707 ioc->pdev->subsystem_device);
3708 break;
3709 }
3710 break;
3711 case MPI25_MFGPAGE_DEVID_SAS3108_1:
3712 switch (ioc->pdev->subsystem_device) {
3713 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3714 ioc_info(ioc, "%s\n",
3715 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3716 break;
3717 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
3718 ioc_info(ioc, "%s\n",
3719 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
3720 break;
3721 default:
3722 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3723 ioc->pdev->subsystem_device);
3724 break;
3725 }
3726 break;
3727 default:
3728 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
3729 ioc->pdev->subsystem_device);
3730 break;
3731 }
3732 break;
3733 case MPT2SAS_HP_3PAR_SSVID:
3734 switch (ioc->pdev->device) {
3735 case MPI2_MFGPAGE_DEVID_SAS2004:
3736 switch (ioc->pdev->subsystem_device) {
3737 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
3738 ioc_info(ioc, "%s\n",
3739 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
3740 break;
3741 default:
3742 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3743 ioc->pdev->subsystem_device);
3744 break;
3745 }
3746 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3747 switch (ioc->pdev->subsystem_device) {
3748 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
3749 ioc_info(ioc, "%s\n",
3750 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
3751 break;
3752 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
3753 ioc_info(ioc, "%s\n",
3754 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
3755 break;
3756 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
3757 ioc_info(ioc, "%s\n",
3758 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
3759 break;
3760 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
3761 ioc_info(ioc, "%s\n",
3762 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
3763 break;
3764 default:
3765 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3766 ioc->pdev->subsystem_device);
3767 break;
3768 }
3769 default:
3770 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
3771 ioc->pdev->subsystem_device);
3772 break;
3773 }
3774 default:
3775 break;
3776 }
3777}
3778
3779
3780
3781
3782
3783
3784
3785
3786 static int
3787_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3788{
3789 Mpi2FWImageHeader_t *FWImgHdr;
3790 Mpi25FWUploadRequest_t *mpi_request;
3791 Mpi2FWUploadReply_t mpi_reply;
3792 int r = 0;
3793 void *fwpkg_data = NULL;
3794 dma_addr_t fwpkg_data_dma;
3795 u16 smid, ioc_status;
3796 size_t data_length;
3797
3798 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3799
3800 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3801 ioc_err(ioc, "%s: internal command already in use\n", __func__);
3802 return -EAGAIN;
3803 }
3804
3805 data_length = sizeof(Mpi2FWImageHeader_t);
3806 fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
3807 &fwpkg_data_dma);
3808 if (!fwpkg_data) {
3809 ioc_err(ioc, "failure at %s:%d/%s()!\n",
3810 __FILE__, __LINE__, __func__);
3811 return -ENOMEM;
3812 }
3813
3814 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3815 if (!smid) {
3816 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3817 r = -EAGAIN;
3818 goto out;
3819 }
3820
3821 ioc->base_cmds.status = MPT3_CMD_PENDING;
3822 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3823 ioc->base_cmds.smid = smid;
3824 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
3825 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
3826 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
3827 mpi_request->ImageSize = cpu_to_le32(data_length);
3828 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
3829 data_length);
3830 init_completion(&ioc->base_cmds.done);
3831 mpt3sas_base_put_smid_default(ioc, smid);
3832
3833 wait_for_completion_timeout(&ioc->base_cmds.done,
3834 FW_IMG_HDR_READ_TIMEOUT*HZ);
3835 ioc_info(ioc, "%s: complete\n", __func__);
3836 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3837 ioc_err(ioc, "%s: timeout\n", __func__);
3838 _debug_dump_mf(mpi_request,
3839 sizeof(Mpi25FWUploadRequest_t)/4);
3840 r = -ETIME;
3841 } else {
3842 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
3843 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
3844 memcpy(&mpi_reply, ioc->base_cmds.reply,
3845 sizeof(Mpi2FWUploadReply_t));
3846 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3847 MPI2_IOCSTATUS_MASK;
3848 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3849 FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
3850 if (FWImgHdr->PackageVersion.Word) {
3851 ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
3852 FWImgHdr->PackageVersion.Struct.Major,
3853 FWImgHdr->PackageVersion.Struct.Minor,
3854 FWImgHdr->PackageVersion.Struct.Unit,
3855 FWImgHdr->PackageVersion.Struct.Dev);
3856 }
3857 } else {
3858 _debug_dump_mf(&mpi_reply,
3859 sizeof(Mpi2FWUploadReply_t)/4);
3860 }
3861 }
3862 }
3863 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3864out:
3865 if (fwpkg_data)
3866 pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
3867 fwpkg_data_dma);
3868 return r;
3869}
3870
3871
3872
3873
3874
3875static void
3876_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
3877{
3878 int i = 0;
3879 char desc[16];
3880 u32 iounit_pg1_flags;
3881 u32 bios_version;
3882
3883 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
3884 strncpy(desc, ioc->manu_pg0.ChipName, 16);
3885 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
3886 desc,
3887 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
3888 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
3889 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
3890 ioc->facts.FWVersion.Word & 0x000000FF,
3891 ioc->pdev->revision,
3892 (bios_version & 0xFF000000) >> 24,
3893 (bios_version & 0x00FF0000) >> 16,
3894 (bios_version & 0x0000FF00) >> 8,
3895 bios_version & 0x000000FF);
3896
3897 _base_display_OEMs_branding(ioc);
3898
3899 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
3900 pr_info("%sNVMe", i ? "," : "");
3901 i++;
3902 }
3903
3904 ioc_info(ioc, "Protocol=(");
3905
3906 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
3907 pr_cont("Initiator");
3908 i++;
3909 }
3910
3911 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
3912 pr_cont("%sTarget", i ? "," : "");
3913 i++;
3914 }
3915
3916 i = 0;
3917 pr_cont("), Capabilities=(");
3918
3919 if (!ioc->hide_ir_msg) {
3920 if (ioc->facts.IOCCapabilities &
3921 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
3922 pr_cont("Raid");
3923 i++;
3924 }
3925 }
3926
3927 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
3928 pr_cont("%sTLR", i ? "," : "");
3929 i++;
3930 }
3931
3932 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
3933 pr_cont("%sMulticast", i ? "," : "");
3934 i++;
3935 }
3936
3937 if (ioc->facts.IOCCapabilities &
3938 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
3939 pr_cont("%sBIDI Target", i ? "," : "");
3940 i++;
3941 }
3942
3943 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
3944 pr_cont("%sEEDP", i ? "," : "");
3945 i++;
3946 }
3947
3948 if (ioc->facts.IOCCapabilities &
3949 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
3950 pr_cont("%sSnapshot Buffer", i ? "," : "");
3951 i++;
3952 }
3953
3954 if (ioc->facts.IOCCapabilities &
3955 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
3956 pr_cont("%sDiag Trace Buffer", i ? "," : "");
3957 i++;
3958 }
3959
3960 if (ioc->facts.IOCCapabilities &
3961 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
3962 pr_cont("%sDiag Extended Buffer", i ? "," : "");
3963 i++;
3964 }
3965
3966 if (ioc->facts.IOCCapabilities &
3967 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
3968 pr_cont("%sTask Set Full", i ? "," : "");
3969 i++;
3970 }
3971
3972 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3973 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
3974 pr_cont("%sNCQ", i ? "," : "");
3975 i++;
3976 }
3977
3978 pr_cont(")\n");
3979}
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991void
3992mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
3993 u16 device_missing_delay, u8 io_missing_delay)
3994{
3995 u16 dmd, dmd_new, dmd_orignal;
3996 u8 io_missing_delay_original;
3997 u16 sz;
3998 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
3999 Mpi2ConfigReply_t mpi_reply;
4000 u8 num_phys = 0;
4001 u16 ioc_status;
4002
4003 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4004 if (!num_phys)
4005 return;
4006
4007 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4008 sizeof(Mpi2SasIOUnit1PhyData_t));
4009 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4010 if (!sas_iounit_pg1) {
4011 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4012 __FILE__, __LINE__, __func__);
4013 goto out;
4014 }
4015 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4016 sas_iounit_pg1, sz))) {
4017 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4018 __FILE__, __LINE__, __func__);
4019 goto out;
4020 }
4021 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4022 MPI2_IOCSTATUS_MASK;
4023 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4024 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4025 __FILE__, __LINE__, __func__);
4026 goto out;
4027 }
4028
4029
4030 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4031 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4032 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4033 else
4034 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4035 dmd_orignal = dmd;
4036 if (device_missing_delay > 0x7F) {
4037 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4038 device_missing_delay;
4039 dmd = dmd / 16;
4040 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4041 } else
4042 dmd = device_missing_delay;
4043 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4044
4045
4046 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4047 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4048
4049 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4050 sz)) {
4051 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4052 dmd_new = (dmd &
4053 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4054 else
4055 dmd_new =
4056 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4057 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4058 dmd_orignal, dmd_new);
4059 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4060 io_missing_delay_original,
4061 io_missing_delay);
4062 ioc->device_missing_delay = dmd_new;
4063 ioc->io_missing_delay = io_missing_delay;
4064 }
4065
4066out:
4067 kfree(sas_iounit_pg1);
4068}
4069
4070
4071
4072
4073
4074static void
4075_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4076{
4077 Mpi2ConfigReply_t mpi_reply;
4078 u32 iounit_pg1_flags;
4079
4080 ioc->nvme_abort_timeout = 30;
4081 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4082 if (ioc->ir_firmware)
4083 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4084 &ioc->manu_pg10);
4085
4086
4087
4088
4089
4090 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4091 if (ioc->manu_pg11.EEDPTagMode == 0) {
4092 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4093 ioc->name);
4094 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4095 ioc->manu_pg11.EEDPTagMode |= 0x1;
4096 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4097 &ioc->manu_pg11);
4098 }
4099 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4100 ioc->tm_custom_handling = 1;
4101 else {
4102 ioc->tm_custom_handling = 0;
4103 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4104 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4105 else if (ioc->manu_pg11.NVMeAbortTO >
4106 NVME_TASK_ABORT_MAX_TIMEOUT)
4107 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4108 else
4109 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4110 }
4111
4112 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4113 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4114 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4115 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4116 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4117 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4118 _base_display_ioc_capabilities(ioc);
4119
4120
4121
4122
4123
4124 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4125 if ((ioc->facts.IOCCapabilities &
4126 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4127 iounit_pg1_flags &=
4128 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4129 else
4130 iounit_pg1_flags |=
4131 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4132 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4133 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4134
4135 if (ioc->iounit_pg8.NumSensors)
4136 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4137}
4138
4139
4140
4141
4142
4143
4144
4145void
4146mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4147{
4148 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4149
4150
4151 list_for_each_entry_safe(enclosure_dev,
4152 enclosure_dev_next, &ioc->enclosure_list, list) {
4153 list_del(&enclosure_dev->list);
4154 kfree(enclosure_dev);
4155 }
4156}
4157
4158
4159
4160
4161
4162
4163
4164static void
4165_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4166{
4167 int i = 0;
4168 int j = 0;
4169 struct chain_tracker *ct;
4170 struct reply_post_struct *rps;
4171
4172 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4173
4174 if (ioc->request) {
4175 pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
4176 ioc->request, ioc->request_dma);
4177 dexitprintk(ioc,
4178 ioc_info(ioc, "request_pool(0x%p): free\n",
4179 ioc->request));
4180 ioc->request = NULL;
4181 }
4182
4183 if (ioc->sense) {
4184 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4185 dma_pool_destroy(ioc->sense_dma_pool);
4186 dexitprintk(ioc,
4187 ioc_info(ioc, "sense_pool(0x%p): free\n",
4188 ioc->sense));
4189 ioc->sense = NULL;
4190 }
4191
4192 if (ioc->reply) {
4193 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4194 dma_pool_destroy(ioc->reply_dma_pool);
4195 dexitprintk(ioc,
4196 ioc_info(ioc, "reply_pool(0x%p): free\n",
4197 ioc->reply));
4198 ioc->reply = NULL;
4199 }
4200
4201 if (ioc->reply_free) {
4202 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4203 ioc->reply_free_dma);
4204 dma_pool_destroy(ioc->reply_free_dma_pool);
4205 dexitprintk(ioc,
4206 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4207 ioc->reply_free));
4208 ioc->reply_free = NULL;
4209 }
4210
4211 if (ioc->reply_post) {
4212 do {
4213 rps = &ioc->reply_post[i];
4214 if (rps->reply_post_free) {
4215 dma_pool_free(
4216 ioc->reply_post_free_dma_pool,
4217 rps->reply_post_free,
4218 rps->reply_post_free_dma);
4219 dexitprintk(ioc,
4220 ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
4221 rps->reply_post_free));
4222 rps->reply_post_free = NULL;
4223 }
4224 } while (ioc->rdpq_array_enable &&
4225 (++i < ioc->reply_queue_count));
4226 if (ioc->reply_post_free_array &&
4227 ioc->rdpq_array_enable) {
4228 dma_pool_free(ioc->reply_post_free_array_dma_pool,
4229 ioc->reply_post_free_array,
4230 ioc->reply_post_free_array_dma);
4231 ioc->reply_post_free_array = NULL;
4232 }
4233 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4234 dma_pool_destroy(ioc->reply_post_free_dma_pool);
4235 kfree(ioc->reply_post);
4236 }
4237
4238 if (ioc->pcie_sgl_dma_pool) {
4239 for (i = 0; i < ioc->scsiio_depth; i++) {
4240 dma_pool_free(ioc->pcie_sgl_dma_pool,
4241 ioc->pcie_sg_lookup[i].pcie_sgl,
4242 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4243 }
4244 if (ioc->pcie_sgl_dma_pool)
4245 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4246 }
4247
4248 if (ioc->config_page) {
4249 dexitprintk(ioc,
4250 ioc_info(ioc, "config_page(0x%p): free\n",
4251 ioc->config_page));
4252 pci_free_consistent(ioc->pdev, ioc->config_page_sz,
4253 ioc->config_page, ioc->config_page_dma);
4254 }
4255
4256 kfree(ioc->hpr_lookup);
4257 kfree(ioc->internal_lookup);
4258 if (ioc->chain_lookup) {
4259 for (i = 0; i < ioc->scsiio_depth; i++) {
4260 for (j = ioc->chains_per_prp_buffer;
4261 j < ioc->chains_needed_per_io; j++) {
4262 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4263 if (ct && ct->chain_buffer)
4264 dma_pool_free(ioc->chain_dma_pool,
4265 ct->chain_buffer,
4266 ct->chain_buffer_dma);
4267 }
4268 kfree(ioc->chain_lookup[i].chains_per_smid);
4269 }
4270 dma_pool_destroy(ioc->chain_dma_pool);
4271 kfree(ioc->chain_lookup);
4272 ioc->chain_lookup = NULL;
4273 }
4274}
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286static int
4287is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
4288{
4289 long reply_pool_end_address;
4290
4291 reply_pool_end_address = reply_pool_start_address + pool_sz;
4292
4293 if (upper_32_bits(reply_pool_start_address) ==
4294 upper_32_bits(reply_pool_end_address))
4295 return 1;
4296 else
4297 return 0;
4298}
4299
4300
4301
4302
4303
4304
4305
4306static int
4307_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4308{
4309 struct mpt3sas_facts *facts;
4310 u16 max_sge_elements;
4311 u16 chains_needed_per_io;
4312 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
4313 u32 retry_sz;
4314 u16 max_request_credit, nvme_blocks_needed;
4315 unsigned short sg_tablesize;
4316 u16 sge_size;
4317 int i, j;
4318 struct chain_tracker *ct;
4319
4320 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4321
4322
4323 retry_sz = 0;
4324 facts = &ioc->facts;
4325
4326
4327 if (max_sgl_entries != -1)
4328 sg_tablesize = max_sgl_entries;
4329 else {
4330 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
4331 sg_tablesize = MPT2SAS_SG_DEPTH;
4332 else
4333 sg_tablesize = MPT3SAS_SG_DEPTH;
4334 }
4335
4336
4337 if (reset_devices)
4338 sg_tablesize = min_t(unsigned short, sg_tablesize,
4339 MPT_KDUMP_MIN_PHYS_SEGMENTS);
4340
4341 if (ioc->is_mcpu_endpoint)
4342 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4343 else {
4344 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
4345 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4346 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
4347 sg_tablesize = min_t(unsigned short, sg_tablesize,
4348 SG_MAX_SEGMENTS);
4349 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
4350 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
4351 }
4352 ioc->shost->sg_tablesize = sg_tablesize;
4353 }
4354
4355 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
4356 (facts->RequestCredit / 4));
4357 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
4358 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
4359 INTERNAL_SCSIIO_CMDS_COUNT)) {
4360 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
4361 facts->RequestCredit);
4362 return -ENOMEM;
4363 }
4364 ioc->internal_depth = 10;
4365 }
4366
4367 ioc->hi_priority_depth = ioc->internal_depth - (5);
4368
4369 if (max_queue_depth != -1 && max_queue_depth != 0) {
4370 max_request_credit = min_t(u16, max_queue_depth +
4371 ioc->internal_depth, facts->RequestCredit);
4372 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
4373 max_request_credit = MAX_HBA_QUEUE_DEPTH;
4374 } else if (reset_devices)
4375 max_request_credit = min_t(u16, facts->RequestCredit,
4376 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
4377 else
4378 max_request_credit = min_t(u16, facts->RequestCredit,
4379 MAX_HBA_QUEUE_DEPTH);
4380
4381
4382
4383
4384
4385 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
4386
4387
4388 ioc->request_sz = facts->IOCRequestFrameSize * 4;
4389
4390
4391 ioc->reply_sz = facts->ReplyFrameSize * 4;
4392
4393
4394 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4395 if (facts->IOCMaxChainSegmentSize)
4396 ioc->chain_segment_sz =
4397 facts->IOCMaxChainSegmentSize *
4398 MAX_CHAIN_ELEMT_SZ;
4399 else
4400
4401 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
4402 MAX_CHAIN_ELEMT_SZ;
4403 } else
4404 ioc->chain_segment_sz = ioc->request_sz;
4405
4406
4407 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
4408
4409 retry_allocation:
4410 total_sz = 0;
4411
4412 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
4413 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
4414 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
4415
4416
4417 max_sge_elements = ioc->chain_segment_sz - sge_size;
4418 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
4419
4420
4421
4422
4423 chains_needed_per_io = ((ioc->shost->sg_tablesize -
4424 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
4425 + 1;
4426 if (chains_needed_per_io > facts->MaxChainDepth) {
4427 chains_needed_per_io = facts->MaxChainDepth;
4428 ioc->shost->sg_tablesize = min_t(u16,
4429 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
4430 * chains_needed_per_io), ioc->shost->sg_tablesize);
4431 }
4432 ioc->chains_needed_per_io = chains_needed_per_io;
4433
4434
4435 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4436
4437
4438 if (ioc->is_mcpu_endpoint)
4439 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
4440 else {
4441
4442 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
4443 ioc->reply_free_queue_depth + 1;
4444
4445 if (ioc->reply_post_queue_depth % 16)
4446 ioc->reply_post_queue_depth += 16 -
4447 (ioc->reply_post_queue_depth % 16);
4448 }
4449
4450 if (ioc->reply_post_queue_depth >
4451 facts->MaxReplyDescriptorPostQueueDepth) {
4452 ioc->reply_post_queue_depth =
4453 facts->MaxReplyDescriptorPostQueueDepth -
4454 (facts->MaxReplyDescriptorPostQueueDepth % 16);
4455 ioc->hba_queue_depth =
4456 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
4457 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4458 }
4459
4460 dinitprintk(ioc,
4461 ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
4462 ioc->max_sges_in_main_message,
4463 ioc->max_sges_in_chain_message,
4464 ioc->shost->sg_tablesize,
4465 ioc->chains_needed_per_io));
4466
4467
4468 reply_post_free_sz = ioc->reply_post_queue_depth *
4469 sizeof(Mpi2DefaultReplyDescriptor_t);
4470
4471 sz = reply_post_free_sz;
4472 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
4473 sz *= ioc->reply_queue_count;
4474
4475 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
4476 (ioc->reply_queue_count):1,
4477 sizeof(struct reply_post_struct), GFP_KERNEL);
4478
4479 if (!ioc->reply_post) {
4480 ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
4481 goto out;
4482 }
4483 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
4484 &ioc->pdev->dev, sz, 16, 0);
4485 if (!ioc->reply_post_free_dma_pool) {
4486 ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
4487 goto out;
4488 }
4489 i = 0;
4490 do {
4491 ioc->reply_post[i].reply_post_free =
4492 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
4493 GFP_KERNEL,
4494 &ioc->reply_post[i].reply_post_free_dma);
4495 if (!ioc->reply_post[i].reply_post_free) {
4496 ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
4497 goto out;
4498 }
4499 dinitprintk(ioc,
4500 ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4501 ioc->reply_post[i].reply_post_free,
4502 ioc->reply_post_queue_depth,
4503 8, sz / 1024));
4504 dinitprintk(ioc,
4505 ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
4506 (u64)ioc->reply_post[i].reply_post_free_dma));
4507 total_sz += sz;
4508 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
4509
4510 if (ioc->dma_mask == 64) {
4511 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
4512 ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
4513 pci_name(ioc->pdev));
4514 goto out;
4515 }
4516 }
4517
4518 ioc->scsiio_depth = ioc->hba_queue_depth -
4519 ioc->hi_priority_depth - ioc->internal_depth;
4520
4521
4522
4523
4524 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
4525 dinitprintk(ioc,
4526 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
4527 ioc->shost->can_queue));
4528
4529
4530
4531
4532
4533 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
4534 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
4535
4536
4537 sz += (ioc->hi_priority_depth * ioc->request_sz);
4538
4539
4540 sz += (ioc->internal_depth * ioc->request_sz);
4541
4542 ioc->request_dma_sz = sz;
4543 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
4544 if (!ioc->request) {
4545 ioc_err(ioc, "request pool: pci_alloc_consistent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
4546 ioc->hba_queue_depth, ioc->chains_needed_per_io,
4547 ioc->request_sz, sz / 1024);
4548 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
4549 goto out;
4550 retry_sz = 64;
4551 ioc->hba_queue_depth -= retry_sz;
4552 _base_release_memory_pools(ioc);
4553 goto retry_allocation;
4554 }
4555
4556 if (retry_sz)
4557 ioc_err(ioc, "request pool: pci_alloc_consistent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
4558 ioc->hba_queue_depth, ioc->chains_needed_per_io,
4559 ioc->request_sz, sz / 1024);
4560
4561
4562 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
4563 ioc->request_sz);
4564 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
4565 ioc->request_sz);
4566
4567
4568 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
4569 ioc->request_sz);
4570 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
4571 ioc->request_sz);
4572
4573 dinitprintk(ioc,
4574 ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4575 ioc->request, ioc->hba_queue_depth,
4576 ioc->request_sz,
4577 (ioc->hba_queue_depth * ioc->request_sz) / 1024));
4578
4579 dinitprintk(ioc,
4580 ioc_info(ioc, "request pool: dma(0x%llx)\n",
4581 (unsigned long long)ioc->request_dma));
4582 total_sz += sz;
4583
4584 dinitprintk(ioc,
4585 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
4586 ioc->request, ioc->scsiio_depth));
4587
4588 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
4589 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
4590 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
4591 if (!ioc->chain_lookup) {
4592 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
4593 goto out;
4594 }
4595
4596 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
4597 for (i = 0; i < ioc->scsiio_depth; i++) {
4598 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
4599 if (!ioc->chain_lookup[i].chains_per_smid) {
4600 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
4601 goto out;
4602 }
4603 }
4604
4605
4606 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
4607 sizeof(struct request_tracker), GFP_KERNEL);
4608 if (!ioc->hpr_lookup) {
4609 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
4610 goto out;
4611 }
4612 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
4613 dinitprintk(ioc,
4614 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
4615 ioc->hi_priority,
4616 ioc->hi_priority_depth, ioc->hi_priority_smid));
4617
4618
4619 ioc->internal_lookup = kcalloc(ioc->internal_depth,
4620 sizeof(struct request_tracker), GFP_KERNEL);
4621 if (!ioc->internal_lookup) {
4622 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
4623 goto out;
4624 }
4625 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
4626 dinitprintk(ioc,
4627 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
4628 ioc->internal,
4629 ioc->internal_depth, ioc->internal_smid));
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643 ioc->chains_per_prp_buffer = 0;
4644 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4645 nvme_blocks_needed =
4646 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
4647 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
4648 nvme_blocks_needed++;
4649
4650 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
4651 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
4652 if (!ioc->pcie_sg_lookup) {
4653 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
4654 goto out;
4655 }
4656 sz = nvme_blocks_needed * ioc->page_size;
4657 ioc->pcie_sgl_dma_pool =
4658 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
4659 if (!ioc->pcie_sgl_dma_pool) {
4660 ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
4661 goto out;
4662 }
4663
4664 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
4665 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
4666 ioc->chains_needed_per_io);
4667
4668 for (i = 0; i < ioc->scsiio_depth; i++) {
4669 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
4670 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
4671 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4672 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
4673 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
4674 goto out;
4675 }
4676 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
4677 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4678 ct->chain_buffer =
4679 ioc->pcie_sg_lookup[i].pcie_sgl +
4680 (j * ioc->chain_segment_sz);
4681 ct->chain_buffer_dma =
4682 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
4683 (j * ioc->chain_segment_sz);
4684 }
4685 }
4686
4687 dinitprintk(ioc,
4688 ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
4689 ioc->scsiio_depth, sz,
4690 (sz * ioc->scsiio_depth) / 1024));
4691 dinitprintk(ioc,
4692 ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
4693 ioc->chains_per_prp_buffer));
4694 total_sz += sz * ioc->scsiio_depth;
4695 }
4696
4697 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
4698 ioc->chain_segment_sz, 16, 0);
4699 if (!ioc->chain_dma_pool) {
4700 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
4701 goto out;
4702 }
4703 for (i = 0; i < ioc->scsiio_depth; i++) {
4704 for (j = ioc->chains_per_prp_buffer;
4705 j < ioc->chains_needed_per_io; j++) {
4706 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4707 ct->chain_buffer = dma_pool_alloc(
4708 ioc->chain_dma_pool, GFP_KERNEL,
4709 &ct->chain_buffer_dma);
4710 if (!ct->chain_buffer) {
4711 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
4712 _base_release_memory_pools(ioc);
4713 goto out;
4714 }
4715 }
4716 total_sz += ioc->chain_segment_sz;
4717 }
4718
4719 dinitprintk(ioc,
4720 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
4721 ioc->chain_depth, ioc->chain_segment_sz,
4722 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
4723
4724
4725 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
4726 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4727 4, 0);
4728 if (!ioc->sense_dma_pool) {
4729 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
4730 goto out;
4731 }
4732 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4733 &ioc->sense_dma);
4734 if (!ioc->sense) {
4735 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
4736 goto out;
4737 }
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747 if (!is_MSB_are_same((long)ioc->sense, sz)) {
4748
4749 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4750 dma_pool_destroy(ioc->sense_dma_pool);
4751 ioc->sense = NULL;
4752
4753 ioc->sense_dma_pool =
4754 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4755 roundup_pow_of_two(sz), 0);
4756 if (!ioc->sense_dma_pool) {
4757 ioc_err(ioc, "sense pool: pci_pool_create failed\n");
4758 goto out;
4759 }
4760 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4761 &ioc->sense_dma);
4762 if (!ioc->sense) {
4763 ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
4764 goto out;
4765 }
4766 }
4767 dinitprintk(ioc,
4768 ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4769 ioc->sense, ioc->scsiio_depth,
4770 SCSI_SENSE_BUFFERSIZE, sz / 1024));
4771 dinitprintk(ioc,
4772 ioc_info(ioc, "sense_dma(0x%llx)\n",
4773 (unsigned long long)ioc->sense_dma));
4774 total_sz += sz;
4775
4776
4777 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
4778 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4779 4, 0);
4780 if (!ioc->reply_dma_pool) {
4781 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
4782 goto out;
4783 }
4784 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
4785 &ioc->reply_dma);
4786 if (!ioc->reply) {
4787 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
4788 goto out;
4789 }
4790 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
4791 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
4792 dinitprintk(ioc,
4793 ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4794 ioc->reply, ioc->reply_free_queue_depth,
4795 ioc->reply_sz, sz / 1024));
4796 dinitprintk(ioc,
4797 ioc_info(ioc, "reply_dma(0x%llx)\n",
4798 (unsigned long long)ioc->reply_dma));
4799 total_sz += sz;
4800
4801
4802 sz = ioc->reply_free_queue_depth * 4;
4803 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
4804 &ioc->pdev->dev, sz, 16, 0);
4805 if (!ioc->reply_free_dma_pool) {
4806 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
4807 goto out;
4808 }
4809 ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
4810 &ioc->reply_free_dma);
4811 if (!ioc->reply_free) {
4812 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
4813 goto out;
4814 }
4815 dinitprintk(ioc,
4816 ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4817 ioc->reply_free, ioc->reply_free_queue_depth,
4818 4, sz / 1024));
4819 dinitprintk(ioc,
4820 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
4821 (unsigned long long)ioc->reply_free_dma));
4822 total_sz += sz;
4823
4824 if (ioc->rdpq_array_enable) {
4825 reply_post_free_array_sz = ioc->reply_queue_count *
4826 sizeof(Mpi2IOCInitRDPQArrayEntry);
4827 ioc->reply_post_free_array_dma_pool =
4828 dma_pool_create("reply_post_free_array pool",
4829 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
4830 if (!ioc->reply_post_free_array_dma_pool) {
4831 dinitprintk(ioc,
4832 ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
4833 goto out;
4834 }
4835 ioc->reply_post_free_array =
4836 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
4837 GFP_KERNEL, &ioc->reply_post_free_array_dma);
4838 if (!ioc->reply_post_free_array) {
4839 dinitprintk(ioc,
4840 ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
4841 goto out;
4842 }
4843 }
4844 ioc->config_page_sz = 512;
4845 ioc->config_page = pci_alloc_consistent(ioc->pdev,
4846 ioc->config_page_sz, &ioc->config_page_dma);
4847 if (!ioc->config_page) {
4848 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
4849 goto out;
4850 }
4851 dinitprintk(ioc,
4852 ioc_info(ioc, "config page(0x%p): size(%d)\n",
4853 ioc->config_page, ioc->config_page_sz));
4854 dinitprintk(ioc,
4855 ioc_info(ioc, "config_page_dma(0x%llx)\n",
4856 (unsigned long long)ioc->config_page_dma));
4857 total_sz += ioc->config_page_sz;
4858
4859 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
4860 total_sz / 1024);
4861 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
4862 ioc->shost->can_queue, facts->RequestCredit);
4863 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
4864 ioc->shost->sg_tablesize);
4865 return 0;
4866
4867 out:
4868 return -ENOMEM;
4869}
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879u32
4880mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
4881{
4882 u32 s, sc;
4883
4884 s = ioc->base_readl(&ioc->chip->Doorbell);
4885 sc = s & MPI2_IOC_STATE_MASK;
4886 return cooked ? sc : s;
4887}
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897static int
4898_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
4899{
4900 u32 count, cntdn;
4901 u32 current_state;
4902
4903 count = 0;
4904 cntdn = 1000 * timeout;
4905 do {
4906 current_state = mpt3sas_base_get_iocstate(ioc, 1);
4907 if (current_state == ioc_state)
4908 return 0;
4909 if (count && current_state == MPI2_IOC_STATE_FAULT)
4910 break;
4911
4912 usleep_range(1000, 1500);
4913 count++;
4914 } while (--cntdn);
4915
4916 return current_state;
4917}
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928static int
4929_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
4930
4931static int
4932_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
4933{
4934 u32 cntdn, count;
4935 u32 int_status;
4936
4937 count = 0;
4938 cntdn = 1000 * timeout;
4939 do {
4940 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
4941 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
4942 dhsprintk(ioc,
4943 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
4944 __func__, count, timeout));
4945 return 0;
4946 }
4947
4948 usleep_range(1000, 1500);
4949 count++;
4950 } while (--cntdn);
4951
4952 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
4953 __func__, count, int_status);
4954 return -EFAULT;
4955}
4956
4957static int
4958_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
4959{
4960 u32 cntdn, count;
4961 u32 int_status;
4962
4963 count = 0;
4964 cntdn = 2000 * timeout;
4965 do {
4966 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
4967 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
4968 dhsprintk(ioc,
4969 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
4970 __func__, count, timeout));
4971 return 0;
4972 }
4973
4974 udelay(500);
4975 count++;
4976 } while (--cntdn);
4977
4978 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
4979 __func__, count, int_status);
4980 return -EFAULT;
4981
4982}
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994static int
4995_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
4996{
4997 u32 cntdn, count;
4998 u32 int_status;
4999 u32 doorbell;
5000
5001 count = 0;
5002 cntdn = 1000 * timeout;
5003 do {
5004 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5005 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5006 dhsprintk(ioc,
5007 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5008 __func__, count, timeout));
5009 return 0;
5010 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5011 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
5012 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5013 MPI2_IOC_STATE_FAULT) {
5014 mpt3sas_base_fault_info(ioc , doorbell);
5015 return -EFAULT;
5016 }
5017 } else if (int_status == 0xFFFFFFFF)
5018 goto out;
5019
5020 usleep_range(1000, 1500);
5021 count++;
5022 } while (--cntdn);
5023
5024 out:
5025 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5026 __func__, count, int_status);
5027 return -EFAULT;
5028}
5029
5030
5031
5032
5033
5034
5035
5036
5037static int
5038_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5039{
5040 u32 cntdn, count;
5041 u32 doorbell_reg;
5042
5043 count = 0;
5044 cntdn = 1000 * timeout;
5045 do {
5046 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5047 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5048 dhsprintk(ioc,
5049 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5050 __func__, count, timeout));
5051 return 0;
5052 }
5053
5054 usleep_range(1000, 1500);
5055 count++;
5056 } while (--cntdn);
5057
5058 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5059 __func__, count, doorbell_reg);
5060 return -EFAULT;
5061}
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071static int
5072_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5073{
5074 u32 ioc_state;
5075 int r = 0;
5076
5077 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5078 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5079 return -EFAULT;
5080 }
5081
5082 if (!(ioc->facts.IOCCapabilities &
5083 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5084 return -EFAULT;
5085
5086 ioc_info(ioc, "sending message unit reset !!\n");
5087
5088 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5089 &ioc->chip->Doorbell);
5090 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5091 r = -EFAULT;
5092 goto out;
5093 }
5094 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5095 if (ioc_state) {
5096 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5097 __func__, ioc_state);
5098 r = -EFAULT;
5099 goto out;
5100 }
5101 out:
5102 ioc_info(ioc, "message unit reset: %s\n",
5103 r == 0 ? "SUCCESS" : "FAILED");
5104 return r;
5105}
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118static int
5119_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5120 u32 *request, int reply_bytes, u16 *reply, int timeout)
5121{
5122 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5123 int i;
5124 u8 failed;
5125 __le32 *mfp;
5126
5127
5128 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5129 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5130 return -EFAULT;
5131 }
5132
5133
5134 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5135 MPI2_HIS_IOC2SYS_DB_STATUS)
5136 writel(0, &ioc->chip->HostInterruptStatus);
5137
5138
5139 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5140 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5141 &ioc->chip->Doorbell);
5142
5143 if ((_base_spin_on_doorbell_int(ioc, 5))) {
5144 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5145 __LINE__);
5146 return -EFAULT;
5147 }
5148 writel(0, &ioc->chip->HostInterruptStatus);
5149
5150 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5151 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5152 __LINE__);
5153 return -EFAULT;
5154 }
5155
5156
5157 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5158 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5159 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5160 failed = 1;
5161 }
5162
5163 if (failed) {
5164 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5165 __LINE__);
5166 return -EFAULT;
5167 }
5168
5169
5170 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5171 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5172 __LINE__);
5173 return -EFAULT;
5174 }
5175
5176
5177 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5178 & MPI2_DOORBELL_DATA_MASK);
5179 writel(0, &ioc->chip->HostInterruptStatus);
5180 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5181 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5182 __LINE__);
5183 return -EFAULT;
5184 }
5185 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5186 & MPI2_DOORBELL_DATA_MASK);
5187 writel(0, &ioc->chip->HostInterruptStatus);
5188
5189 for (i = 2; i < default_reply->MsgLength * 2; i++) {
5190 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5191 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5192 __LINE__);
5193 return -EFAULT;
5194 }
5195 if (i >= reply_bytes/2)
5196 ioc->base_readl(&ioc->chip->Doorbell);
5197 else
5198 reply[i] = le16_to_cpu(
5199 ioc->base_readl(&ioc->chip->Doorbell)
5200 & MPI2_DOORBELL_DATA_MASK);
5201 writel(0, &ioc->chip->HostInterruptStatus);
5202 }
5203
5204 _base_wait_for_doorbell_int(ioc, 5);
5205 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5206 dhsprintk(ioc,
5207 ioc_info(ioc, "doorbell is in use (line=%d)\n",
5208 __LINE__));
5209 }
5210 writel(0, &ioc->chip->HostInterruptStatus);
5211
5212 if (ioc->logging_level & MPT_DEBUG_INIT) {
5213 mfp = (__le32 *)reply;
5214 pr_info("\toffset:data\n");
5215 for (i = 0; i < reply_bytes/4; i++)
5216 pr_info("\t[0x%02x]:%08x\n", i*4,
5217 le32_to_cpu(mfp[i]));
5218 }
5219 return 0;
5220}
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236int
5237mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5238 Mpi2SasIoUnitControlReply_t *mpi_reply,
5239 Mpi2SasIoUnitControlRequest_t *mpi_request)
5240{
5241 u16 smid;
5242 u32 ioc_state;
5243 u8 issue_reset = 0;
5244 int rc;
5245 void *request;
5246 u16 wait_state_count;
5247
5248 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5249
5250 mutex_lock(&ioc->base_cmds.mutex);
5251
5252 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5253 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
5254 rc = -EAGAIN;
5255 goto out;
5256 }
5257
5258 wait_state_count = 0;
5259 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5260 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
5261 if (wait_state_count++ == 10) {
5262 ioc_err(ioc, "%s: failed due to ioc not operational\n",
5263 __func__);
5264 rc = -EFAULT;
5265 goto out;
5266 }
5267 ssleep(1);
5268 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5269 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5270 __func__, wait_state_count);
5271 }
5272
5273 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5274 if (!smid) {
5275 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5276 rc = -EAGAIN;
5277 goto out;
5278 }
5279
5280 rc = 0;
5281 ioc->base_cmds.status = MPT3_CMD_PENDING;
5282 request = mpt3sas_base_get_msg_frame(ioc, smid);
5283 ioc->base_cmds.smid = smid;
5284 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
5285 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5286 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
5287 ioc->ioc_link_reset_in_progress = 1;
5288 init_completion(&ioc->base_cmds.done);
5289 mpt3sas_base_put_smid_default(ioc, smid);
5290 wait_for_completion_timeout(&ioc->base_cmds.done,
5291 msecs_to_jiffies(10000));
5292 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5293 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
5294 ioc->ioc_link_reset_in_progress)
5295 ioc->ioc_link_reset_in_progress = 0;
5296 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5297 issue_reset =
5298 mpt3sas_base_check_cmd_timeout(ioc,
5299 ioc->base_cmds.status, mpi_request,
5300 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
5301 goto issue_host_reset;
5302 }
5303 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5304 memcpy(mpi_reply, ioc->base_cmds.reply,
5305 sizeof(Mpi2SasIoUnitControlReply_t));
5306 else
5307 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
5308 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5309 goto out;
5310
5311 issue_host_reset:
5312 if (issue_reset)
5313 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
5314 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5315 rc = -EFAULT;
5316 out:
5317 mutex_unlock(&ioc->base_cmds.mutex);
5318 return rc;
5319}
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332int
5333mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5334 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
5335{
5336 u16 smid;
5337 u32 ioc_state;
5338 u8 issue_reset = 0;
5339 int rc;
5340 void *request;
5341 u16 wait_state_count;
5342
5343 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5344
5345 mutex_lock(&ioc->base_cmds.mutex);
5346
5347 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5348 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
5349 rc = -EAGAIN;
5350 goto out;
5351 }
5352
5353 wait_state_count = 0;
5354 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5355 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
5356 if (wait_state_count++ == 10) {
5357 ioc_err(ioc, "%s: failed due to ioc not operational\n",
5358 __func__);
5359 rc = -EFAULT;
5360 goto out;
5361 }
5362 ssleep(1);
5363 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5364 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5365 __func__, wait_state_count);
5366 }
5367
5368 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5369 if (!smid) {
5370 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5371 rc = -EAGAIN;
5372 goto out;
5373 }
5374
5375 rc = 0;
5376 ioc->base_cmds.status = MPT3_CMD_PENDING;
5377 request = mpt3sas_base_get_msg_frame(ioc, smid);
5378 ioc->base_cmds.smid = smid;
5379 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
5380 init_completion(&ioc->base_cmds.done);
5381 mpt3sas_base_put_smid_default(ioc, smid);
5382 wait_for_completion_timeout(&ioc->base_cmds.done,
5383 msecs_to_jiffies(10000));
5384 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5385 issue_reset =
5386 mpt3sas_base_check_cmd_timeout(ioc,
5387 ioc->base_cmds.status, mpi_request,
5388 sizeof(Mpi2SepRequest_t)/4);
5389 goto issue_host_reset;
5390 }
5391 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5392 memcpy(mpi_reply, ioc->base_cmds.reply,
5393 sizeof(Mpi2SepReply_t));
5394 else
5395 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
5396 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5397 goto out;
5398
5399 issue_host_reset:
5400 if (issue_reset)
5401 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
5402 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5403 rc = -EFAULT;
5404 out:
5405 mutex_unlock(&ioc->base_cmds.mutex);
5406 return rc;
5407}
5408
5409
5410
5411
5412
5413
5414
5415
5416static int
5417_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
5418{
5419 Mpi2PortFactsRequest_t mpi_request;
5420 Mpi2PortFactsReply_t mpi_reply;
5421 struct mpt3sas_port_facts *pfacts;
5422 int mpi_reply_sz, mpi_request_sz, r;
5423
5424 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5425
5426 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
5427 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
5428 memset(&mpi_request, 0, mpi_request_sz);
5429 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
5430 mpi_request.PortNumber = port;
5431 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
5432 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5433
5434 if (r != 0) {
5435 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5436 return r;
5437 }
5438
5439 pfacts = &ioc->pfacts[port];
5440 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
5441 pfacts->PortNumber = mpi_reply.PortNumber;
5442 pfacts->VP_ID = mpi_reply.VP_ID;
5443 pfacts->VF_ID = mpi_reply.VF_ID;
5444 pfacts->MaxPostedCmdBuffers =
5445 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
5446
5447 return 0;
5448}
5449
5450
5451
5452
5453
5454
5455
5456
5457static int
5458_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
5459{
5460 u32 ioc_state;
5461 int rc;
5462
5463 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5464
5465 if (ioc->pci_error_recovery) {
5466 dfailprintk(ioc,
5467 ioc_info(ioc, "%s: host in pci error recovery\n",
5468 __func__));
5469 return -EFAULT;
5470 }
5471
5472 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5473 dhsprintk(ioc,
5474 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
5475 __func__, ioc_state));
5476
5477 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
5478 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5479 return 0;
5480
5481 if (ioc_state & MPI2_DOORBELL_USED) {
5482 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
5483 goto issue_diag_reset;
5484 }
5485
5486 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
5487 mpt3sas_base_fault_info(ioc, ioc_state &
5488 MPI2_DOORBELL_DATA_MASK);
5489 goto issue_diag_reset;
5490 }
5491
5492 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5493 if (ioc_state) {
5494 dfailprintk(ioc,
5495 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5496 __func__, ioc_state));
5497 return -EFAULT;
5498 }
5499
5500 issue_diag_reset:
5501 rc = _base_diag_reset(ioc);
5502 return rc;
5503}
5504
5505
5506
5507
5508
5509
5510
5511static int
5512_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
5513{
5514 Mpi2IOCFactsRequest_t mpi_request;
5515 Mpi2IOCFactsReply_t mpi_reply;
5516 struct mpt3sas_facts *facts;
5517 int mpi_reply_sz, mpi_request_sz, r;
5518
5519 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5520
5521 r = _base_wait_for_iocstate(ioc, 10);
5522 if (r) {
5523 dfailprintk(ioc,
5524 ioc_info(ioc, "%s: failed getting to correct state\n",
5525 __func__));
5526 return r;
5527 }
5528 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
5529 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
5530 memset(&mpi_request, 0, mpi_request_sz);
5531 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
5532 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
5533 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5534
5535 if (r != 0) {
5536 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5537 return r;
5538 }
5539
5540 facts = &ioc->facts;
5541 memset(facts, 0, sizeof(struct mpt3sas_facts));
5542 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
5543 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
5544 facts->VP_ID = mpi_reply.VP_ID;
5545 facts->VF_ID = mpi_reply.VF_ID;
5546 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
5547 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
5548 facts->WhoInit = mpi_reply.WhoInit;
5549 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
5550 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
5551 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
5552 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
5553 ioc->combined_reply_queue = 0;
5554 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
5555 facts->MaxReplyDescriptorPostQueueDepth =
5556 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
5557 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
5558 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
5559 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
5560 ioc->ir_firmware = 1;
5561 if ((facts->IOCCapabilities &
5562 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
5563 ioc->rdpq_array_capable = 1;
5564 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
5565 facts->IOCRequestFrameSize =
5566 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
5567 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5568 facts->IOCMaxChainSegmentSize =
5569 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
5570 }
5571 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
5572 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
5573 ioc->shost->max_id = -1;
5574 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
5575 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
5576 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
5577 facts->HighPriorityCredit =
5578 le16_to_cpu(mpi_reply.HighPriorityCredit);
5579 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
5580 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
5581 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
5582
5583
5584
5585
5586 ioc->page_size = 1 << facts->CurrentHostPageSize;
5587 if (ioc->page_size == 1) {
5588 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
5589 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
5590 }
5591 dinitprintk(ioc,
5592 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
5593 facts->CurrentHostPageSize));
5594
5595 dinitprintk(ioc,
5596 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
5597 facts->RequestCredit, facts->MaxChainDepth));
5598 dinitprintk(ioc,
5599 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
5600 facts->IOCRequestFrameSize * 4,
5601 facts->ReplyFrameSize * 4));
5602 return 0;
5603}
5604
5605
5606
5607
5608
5609
5610
5611static int
5612_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5613{
5614 Mpi2IOCInitRequest_t mpi_request;
5615 Mpi2IOCInitReply_t mpi_reply;
5616 int i, r = 0;
5617 ktime_t current_time;
5618 u16 ioc_status;
5619 u32 reply_post_free_array_sz = 0;
5620
5621 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5622
5623 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
5624 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
5625 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
5626 mpi_request.VF_ID = 0;
5627 mpi_request.VP_ID = 0;
5628 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
5629 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
5630 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
5631
5632 if (_base_is_controller_msix_enabled(ioc))
5633 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
5634 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
5635 mpi_request.ReplyDescriptorPostQueueDepth =
5636 cpu_to_le16(ioc->reply_post_queue_depth);
5637 mpi_request.ReplyFreeQueueDepth =
5638 cpu_to_le16(ioc->reply_free_queue_depth);
5639
5640 mpi_request.SenseBufferAddressHigh =
5641 cpu_to_le32((u64)ioc->sense_dma >> 32);
5642 mpi_request.SystemReplyAddressHigh =
5643 cpu_to_le32((u64)ioc->reply_dma >> 32);
5644 mpi_request.SystemRequestFrameBaseAddress =
5645 cpu_to_le64((u64)ioc->request_dma);
5646 mpi_request.ReplyFreeQueueAddress =
5647 cpu_to_le64((u64)ioc->reply_free_dma);
5648
5649 if (ioc->rdpq_array_enable) {
5650 reply_post_free_array_sz = ioc->reply_queue_count *
5651 sizeof(Mpi2IOCInitRDPQArrayEntry);
5652 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
5653 for (i = 0; i < ioc->reply_queue_count; i++)
5654 ioc->reply_post_free_array[i].RDPQBaseAddress =
5655 cpu_to_le64(
5656 (u64)ioc->reply_post[i].reply_post_free_dma);
5657 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
5658 mpi_request.ReplyDescriptorPostQueueAddress =
5659 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
5660 } else {
5661 mpi_request.ReplyDescriptorPostQueueAddress =
5662 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
5663 }
5664
5665
5666
5667
5668 current_time = ktime_get_real();
5669 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
5670
5671 if (ioc->logging_level & MPT_DEBUG_INIT) {
5672 __le32 *mfp;
5673 int i;
5674
5675 mfp = (__le32 *)&mpi_request;
5676 pr_info("\toffset:data\n");
5677 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
5678 pr_info("\t[0x%02x]:%08x\n", i*4,
5679 le32_to_cpu(mfp[i]));
5680 }
5681
5682 r = _base_handshake_req_reply_wait(ioc,
5683 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
5684 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
5685
5686 if (r != 0) {
5687 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5688 return r;
5689 }
5690
5691 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5692 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
5693 mpi_reply.IOCLogInfo) {
5694 ioc_err(ioc, "%s: failed\n", __func__);
5695 r = -EIO;
5696 }
5697
5698 return r;
5699}
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711u8
5712mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
5713 u32 reply)
5714{
5715 MPI2DefaultReply_t *mpi_reply;
5716 u16 ioc_status;
5717
5718 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
5719 return 1;
5720
5721 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5722 if (!mpi_reply)
5723 return 1;
5724
5725 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
5726 return 1;
5727
5728 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
5729 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
5730 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
5731 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
5732 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5733 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5734 ioc->port_enable_failed = 1;
5735
5736 if (ioc->is_driver_loading) {
5737 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
5738 mpt3sas_port_enable_complete(ioc);
5739 return 1;
5740 } else {
5741 ioc->start_scan_failed = ioc_status;
5742 ioc->start_scan = 0;
5743 return 1;
5744 }
5745 }
5746 complete(&ioc->port_enable_cmds.done);
5747 return 1;
5748}
5749
5750
5751
5752
5753
5754
5755
5756static int
5757_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
5758{
5759 Mpi2PortEnableRequest_t *mpi_request;
5760 Mpi2PortEnableReply_t *mpi_reply;
5761 int r = 0;
5762 u16 smid;
5763 u16 ioc_status;
5764
5765 ioc_info(ioc, "sending port enable !!\n");
5766
5767 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5768 ioc_err(ioc, "%s: internal command already in use\n", __func__);
5769 return -EAGAIN;
5770 }
5771
5772 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5773 if (!smid) {
5774 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5775 return -EAGAIN;
5776 }
5777
5778 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5779 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5780 ioc->port_enable_cmds.smid = smid;
5781 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5782 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5783
5784 init_completion(&ioc->port_enable_cmds.done);
5785 mpt3sas_base_put_smid_default(ioc, smid);
5786 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
5787 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
5788 ioc_err(ioc, "%s: timeout\n", __func__);
5789 _debug_dump_mf(mpi_request,
5790 sizeof(Mpi2PortEnableRequest_t)/4);
5791 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
5792 r = -EFAULT;
5793 else
5794 r = -ETIME;
5795 goto out;
5796 }
5797
5798 mpi_reply = ioc->port_enable_cmds.reply;
5799 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5800 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5801 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
5802 __func__, ioc_status);
5803 r = -EFAULT;
5804 goto out;
5805 }
5806
5807 out:
5808 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5809 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
5810 return r;
5811}
5812
5813
5814
5815
5816
5817
5818
5819int
5820mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
5821{
5822 Mpi2PortEnableRequest_t *mpi_request;
5823 u16 smid;
5824
5825 ioc_info(ioc, "sending port enable !!\n");
5826
5827 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5828 ioc_err(ioc, "%s: internal command already in use\n", __func__);
5829 return -EAGAIN;
5830 }
5831
5832 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5833 if (!smid) {
5834 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5835 return -EAGAIN;
5836 }
5837
5838 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5839 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5840 ioc->port_enable_cmds.smid = smid;
5841 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5842 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5843
5844 mpt3sas_base_put_smid_default(ioc, smid);
5845 return 0;
5846}
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857static int
5858_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
5859{
5860
5861
5862
5863
5864
5865
5866 if (ioc->ir_firmware)
5867 return 1;
5868
5869
5870 if (!ioc->bios_pg3.BiosVersion)
5871 return 0;
5872
5873
5874
5875
5876
5877
5878
5879
5880 if ((ioc->bios_pg2.CurrentBootDeviceForm &
5881 MPI2_BIOSPAGE2_FORM_MASK) ==
5882 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
5883
5884 (ioc->bios_pg2.ReqBootDeviceForm &
5885 MPI2_BIOSPAGE2_FORM_MASK) ==
5886 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
5887
5888 (ioc->bios_pg2.ReqAltBootDeviceForm &
5889 MPI2_BIOSPAGE2_FORM_MASK) ==
5890 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
5891 return 0;
5892
5893 return 1;
5894}
5895
5896
5897
5898
5899
5900
5901
5902
5903static void
5904_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
5905{
5906 u32 desired_event;
5907
5908 if (event >= 128)
5909 return;
5910
5911 desired_event = (1 << (event % 32));
5912
5913 if (event < 32)
5914 ioc->event_masks[0] &= ~desired_event;
5915 else if (event < 64)
5916 ioc->event_masks[1] &= ~desired_event;
5917 else if (event < 96)
5918 ioc->event_masks[2] &= ~desired_event;
5919 else if (event < 128)
5920 ioc->event_masks[3] &= ~desired_event;
5921}
5922
5923
5924
5925
5926
5927
5928
5929static int
5930_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
5931{
5932 Mpi2EventNotificationRequest_t *mpi_request;
5933 u16 smid;
5934 int r = 0;
5935 int i;
5936
5937 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5938
5939 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
5940 ioc_err(ioc, "%s: internal command already in use\n", __func__);
5941 return -EAGAIN;
5942 }
5943
5944 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5945 if (!smid) {
5946 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5947 return -EAGAIN;
5948 }
5949 ioc->base_cmds.status = MPT3_CMD_PENDING;
5950 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5951 ioc->base_cmds.smid = smid;
5952 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
5953 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5954 mpi_request->VF_ID = 0;
5955 mpi_request->VP_ID = 0;
5956 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5957 mpi_request->EventMasks[i] =
5958 cpu_to_le32(ioc->event_masks[i]);
5959 init_completion(&ioc->base_cmds.done);
5960 mpt3sas_base_put_smid_default(ioc, smid);
5961 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
5962 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5963 ioc_err(ioc, "%s: timeout\n", __func__);
5964 _debug_dump_mf(mpi_request,
5965 sizeof(Mpi2EventNotificationRequest_t)/4);
5966 if (ioc->base_cmds.status & MPT3_CMD_RESET)
5967 r = -EFAULT;
5968 else
5969 r = -ETIME;
5970 } else
5971 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
5972 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5973 return r;
5974}
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984void
5985mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
5986{
5987 int i, j;
5988 u32 event_mask, desired_event;
5989 u8 send_update_to_fw;
5990
5991 for (i = 0, send_update_to_fw = 0; i <
5992 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
5993 event_mask = ~event_type[i];
5994 desired_event = 1;
5995 for (j = 0; j < 32; j++) {
5996 if (!(event_mask & desired_event) &&
5997 (ioc->event_masks[i] & desired_event)) {
5998 ioc->event_masks[i] &= ~desired_event;
5999 send_update_to_fw = 1;
6000 }
6001 desired_event = (desired_event << 1);
6002 }
6003 }
6004
6005 if (!send_update_to_fw)
6006 return;
6007
6008 mutex_lock(&ioc->base_cmds.mutex);
6009 _base_event_notification(ioc);
6010 mutex_unlock(&ioc->base_cmds.mutex);
6011}
6012
6013
6014
6015
6016
6017
6018
6019static int
6020_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6021{
6022 u32 host_diagnostic;
6023 u32 ioc_state;
6024 u32 count;
6025 u32 hcb_size;
6026
6027 ioc_info(ioc, "sending diag reset !!\n");
6028
6029 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6030
6031 count = 0;
6032 do {
6033
6034
6035
6036 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6037 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6038 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6039 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6040 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6041 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6042 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6043 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6044
6045
6046 msleep(100);
6047
6048 if (count++ > 20)
6049 goto out;
6050
6051 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6052 drsprintk(ioc,
6053 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6054 count, host_diagnostic));
6055
6056 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6057
6058 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6059
6060 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6061 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6062 &ioc->chip->HostDiagnostic);
6063
6064
6065 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6066
6067
6068 for (count = 0; count < (300000000 /
6069 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6070
6071 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6072
6073 if (host_diagnostic == 0xFFFFFFFF)
6074 goto out;
6075 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6076 break;
6077
6078 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6079 }
6080
6081 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6082
6083 drsprintk(ioc,
6084 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6085 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6086 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6087 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6088
6089 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6090 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6091 &ioc->chip->HCBSize);
6092 }
6093
6094 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6095 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6096 &ioc->chip->HostDiagnostic);
6097
6098 drsprintk(ioc,
6099 ioc_info(ioc, "disable writes to the diagnostic register\n"));
6100 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6101
6102 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6103 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6104 if (ioc_state) {
6105 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6106 __func__, ioc_state);
6107 goto out;
6108 }
6109
6110 ioc_info(ioc, "diag reset: SUCCESS\n");
6111 return 0;
6112
6113 out:
6114 ioc_err(ioc, "diag reset: FAILED\n");
6115 return -EFAULT;
6116}
6117
6118
6119
6120
6121
6122
6123
6124
6125static int
6126_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6127{
6128 u32 ioc_state;
6129 int rc;
6130 int count;
6131
6132 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6133
6134 if (ioc->pci_error_recovery)
6135 return 0;
6136
6137 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6138 dhsprintk(ioc,
6139 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6140 __func__, ioc_state));
6141
6142
6143 count = 0;
6144 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6145 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6146 MPI2_IOC_STATE_READY) {
6147 if (count++ == 10) {
6148 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6149 __func__, ioc_state);
6150 return -EFAULT;
6151 }
6152 ssleep(1);
6153 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6154 }
6155 }
6156
6157 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6158 return 0;
6159
6160 if (ioc_state & MPI2_DOORBELL_USED) {
6161 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6162 goto issue_diag_reset;
6163 }
6164
6165 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6166 mpt3sas_base_fault_info(ioc, ioc_state &
6167 MPI2_DOORBELL_DATA_MASK);
6168 goto issue_diag_reset;
6169 }
6170
6171 if (type == FORCE_BIG_HAMMER)
6172 goto issue_diag_reset;
6173
6174 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6175 if (!(_base_send_ioc_reset(ioc,
6176 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6177 return 0;
6178 }
6179
6180 issue_diag_reset:
6181 rc = _base_diag_reset(ioc);
6182 return rc;
6183}
6184
6185
6186
6187
6188
6189
6190
6191static int
6192_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6193{
6194 int r, i, index;
6195 unsigned long flags;
6196 u32 reply_address;
6197 u16 smid;
6198 struct _tr_list *delayed_tr, *delayed_tr_next;
6199 struct _sc_list *delayed_sc, *delayed_sc_next;
6200 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
6201 u8 hide_flag;
6202 struct adapter_reply_queue *reply_q;
6203 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
6204
6205 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6206
6207
6208 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6209 &ioc->delayed_tr_list, list) {
6210 list_del(&delayed_tr->list);
6211 kfree(delayed_tr);
6212 }
6213
6214
6215 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6216 &ioc->delayed_tr_volume_list, list) {
6217 list_del(&delayed_tr->list);
6218 kfree(delayed_tr);
6219 }
6220
6221 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
6222 &ioc->delayed_sc_list, list) {
6223 list_del(&delayed_sc->list);
6224 kfree(delayed_sc);
6225 }
6226
6227 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
6228 &ioc->delayed_event_ack_list, list) {
6229 list_del(&delayed_event_ack->list);
6230 kfree(delayed_event_ack);
6231 }
6232
6233 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
6234
6235
6236 INIT_LIST_HEAD(&ioc->hpr_free_list);
6237 smid = ioc->hi_priority_smid;
6238 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
6239 ioc->hpr_lookup[i].cb_idx = 0xFF;
6240 ioc->hpr_lookup[i].smid = smid;
6241 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
6242 &ioc->hpr_free_list);
6243 }
6244
6245
6246 INIT_LIST_HEAD(&ioc->internal_free_list);
6247 smid = ioc->internal_smid;
6248 for (i = 0; i < ioc->internal_depth; i++, smid++) {
6249 ioc->internal_lookup[i].cb_idx = 0xFF;
6250 ioc->internal_lookup[i].smid = smid;
6251 list_add_tail(&ioc->internal_lookup[i].tracker_list,
6252 &ioc->internal_free_list);
6253 }
6254
6255 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6256
6257
6258 for (i = 0, reply_address = (u32)ioc->reply_dma ;
6259 i < ioc->reply_free_queue_depth ; i++, reply_address +=
6260 ioc->reply_sz) {
6261 ioc->reply_free[i] = cpu_to_le32(reply_address);
6262 if (ioc->is_mcpu_endpoint)
6263 _base_clone_reply_to_sys_mem(ioc,
6264 reply_address, i);
6265 }
6266
6267
6268 if (ioc->is_driver_loading)
6269 _base_assign_reply_queues(ioc);
6270
6271
6272 index = 0;
6273 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
6274 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
6275
6276
6277
6278
6279 if (ioc->rdpq_array_enable) {
6280 reply_q->reply_post_free =
6281 ioc->reply_post[index++].reply_post_free;
6282 } else {
6283 reply_q->reply_post_free = reply_post_free_contig;
6284 reply_post_free_contig += ioc->reply_post_queue_depth;
6285 }
6286
6287 reply_q->reply_post_host_index = 0;
6288 for (i = 0; i < ioc->reply_post_queue_depth; i++)
6289 reply_q->reply_post_free[i].Words =
6290 cpu_to_le64(ULLONG_MAX);
6291 if (!_base_is_controller_msix_enabled(ioc))
6292 goto skip_init_reply_post_free_queue;
6293 }
6294 skip_init_reply_post_free_queue:
6295
6296 r = _base_send_ioc_init(ioc);
6297 if (r)
6298 return r;
6299
6300
6301 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
6302 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
6303
6304
6305 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
6306 if (ioc->combined_reply_queue)
6307 writel((reply_q->msix_index & 7)<<
6308 MPI2_RPHI_MSIX_INDEX_SHIFT,
6309 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
6310 else
6311 writel(reply_q->msix_index <<
6312 MPI2_RPHI_MSIX_INDEX_SHIFT,
6313 &ioc->chip->ReplyPostHostIndex);
6314
6315 if (!_base_is_controller_msix_enabled(ioc))
6316 goto skip_init_reply_post_host_index;
6317 }
6318
6319 skip_init_reply_post_host_index:
6320
6321 _base_unmask_interrupts(ioc);
6322
6323 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6324 r = _base_display_fwpkg_version(ioc);
6325 if (r)
6326 return r;
6327 }
6328
6329 _base_static_config_pages(ioc);
6330 r = _base_event_notification(ioc);
6331 if (r)
6332 return r;
6333
6334 if (ioc->is_driver_loading) {
6335
6336 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
6337 == 0x80) {
6338 hide_flag = (u8) (
6339 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
6340 MFG_PAGE10_HIDE_SSDS_MASK);
6341 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
6342 ioc->mfg_pg10_hide_flag = hide_flag;
6343 }
6344
6345 ioc->wait_for_discovery_to_complete =
6346 _base_determine_wait_on_discovery(ioc);
6347
6348 return r;
6349 }
6350
6351 r = _base_send_port_enable(ioc);
6352 if (r)
6353 return r;
6354
6355 return r;
6356}
6357
6358
6359
6360
6361
6362void
6363mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
6364{
6365 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6366
6367
6368 mutex_lock(&ioc->pci_access_mutex);
6369 if (ioc->chip_phys && ioc->chip) {
6370 _base_mask_interrupts(ioc);
6371 ioc->shost_recovery = 1;
6372 _base_make_ioc_ready(ioc, SOFT_RESET);
6373 ioc->shost_recovery = 0;
6374 }
6375
6376 mpt3sas_base_unmap_resources(ioc);
6377 mutex_unlock(&ioc->pci_access_mutex);
6378 return;
6379}
6380
6381
6382
6383
6384
6385
6386
6387int
6388mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6389{
6390 int r, i;
6391 int cpu_id, last_cpu_id = 0;
6392
6393 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6394
6395
6396 ioc->cpu_count = num_online_cpus();
6397 for_each_online_cpu(cpu_id)
6398 last_cpu_id = cpu_id;
6399 ioc->cpu_msix_table_sz = last_cpu_id + 1;
6400 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
6401 ioc->reply_queue_count = 1;
6402 if (!ioc->cpu_msix_table) {
6403 dfailprintk(ioc,
6404 ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
6405 r = -ENOMEM;
6406 goto out_free_resources;
6407 }
6408
6409 if (ioc->is_warpdrive) {
6410 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
6411 sizeof(resource_size_t *), GFP_KERNEL);
6412 if (!ioc->reply_post_host_index) {
6413 dfailprintk(ioc,
6414 ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
6415 r = -ENOMEM;
6416 goto out_free_resources;
6417 }
6418 }
6419
6420 ioc->rdpq_array_enable_assigned = 0;
6421 ioc->dma_mask = 0;
6422 if (ioc->is_aero_ioc)
6423 ioc->base_readl = &_base_readl_aero;
6424 else
6425 ioc->base_readl = &_base_readl;
6426 r = mpt3sas_base_map_resources(ioc);
6427 if (r)
6428 goto out_free_resources;
6429
6430 pci_set_drvdata(ioc->pdev, ioc->shost);
6431 r = _base_get_ioc_facts(ioc);
6432 if (r)
6433 goto out_free_resources;
6434
6435 switch (ioc->hba_mpi_version_belonged) {
6436 case MPI2_VERSION:
6437 ioc->build_sg_scmd = &_base_build_sg_scmd;
6438 ioc->build_sg = &_base_build_sg;
6439 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
6440 break;
6441 case MPI25_VERSION:
6442 case MPI26_VERSION:
6443
6444
6445
6446
6447
6448
6449 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
6450 ioc->build_sg = &_base_build_sg_ieee;
6451 ioc->build_nvme_prp = &_base_build_nvme_prp;
6452 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
6453 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
6454
6455 break;
6456 }
6457
6458 if (ioc->is_mcpu_endpoint)
6459 ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
6460 else
6461 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
6462
6463
6464
6465
6466
6467
6468
6469 ioc->build_sg_mpi = &_base_build_sg;
6470 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
6471
6472 r = _base_make_ioc_ready(ioc, SOFT_RESET);
6473 if (r)
6474 goto out_free_resources;
6475
6476 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
6477 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
6478 if (!ioc->pfacts) {
6479 r = -ENOMEM;
6480 goto out_free_resources;
6481 }
6482
6483 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
6484 r = _base_get_port_facts(ioc, i);
6485 if (r)
6486 goto out_free_resources;
6487 }
6488
6489 r = _base_allocate_memory_pools(ioc);
6490 if (r)
6491 goto out_free_resources;
6492
6493 init_waitqueue_head(&ioc->reset_wq);
6494
6495
6496 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
6497 if (ioc->facts.MaxDevHandle % 8)
6498 ioc->pd_handles_sz++;
6499 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
6500 GFP_KERNEL);
6501 if (!ioc->pd_handles) {
6502 r = -ENOMEM;
6503 goto out_free_resources;
6504 }
6505 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
6506 GFP_KERNEL);
6507 if (!ioc->blocking_handles) {
6508 r = -ENOMEM;
6509 goto out_free_resources;
6510 }
6511
6512
6513 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
6514 if (ioc->facts.MaxDevHandle % 8)
6515 ioc->pend_os_device_add_sz++;
6516 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
6517 GFP_KERNEL);
6518 if (!ioc->pend_os_device_add)
6519 goto out_free_resources;
6520
6521 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
6522 ioc->device_remove_in_progress =
6523 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
6524 if (!ioc->device_remove_in_progress)
6525 goto out_free_resources;
6526
6527 ioc->fwfault_debug = mpt3sas_fwfault_debug;
6528
6529
6530 mutex_init(&ioc->base_cmds.mutex);
6531 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6532 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6533
6534
6535 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6536 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6537
6538
6539 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6540 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
6541 mutex_init(&ioc->transport_cmds.mutex);
6542
6543
6544 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6545 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
6546 mutex_init(&ioc->scsih_cmds.mutex);
6547
6548
6549 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6550 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
6551 mutex_init(&ioc->tm_cmds.mutex);
6552
6553
6554 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6555 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
6556 mutex_init(&ioc->config_cmds.mutex);
6557
6558
6559 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6560 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
6561 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
6562 mutex_init(&ioc->ctl_cmds.mutex);
6563
6564 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
6565 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
6566 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
6567 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
6568 r = -ENOMEM;
6569 goto out_free_resources;
6570 }
6571
6572 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6573 ioc->event_masks[i] = -1;
6574
6575
6576 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
6577 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
6578 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
6579 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
6580 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
6581 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
6582 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
6583 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
6584 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
6585 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
6586 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
6587 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
6588 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
6589 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
6590 if (ioc->is_gen35_ioc) {
6591 _base_unmask_events(ioc,
6592 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
6593 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
6594 _base_unmask_events(ioc,
6595 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
6596 }
6597 }
6598 r = _base_make_ioc_operational(ioc);
6599 if (r)
6600 goto out_free_resources;
6601
6602 ioc->non_operational_loop = 0;
6603 ioc->got_task_abort_from_ioctl = 0;
6604 return 0;
6605
6606 out_free_resources:
6607
6608 ioc->remove_host = 1;
6609
6610 mpt3sas_base_free_resources(ioc);
6611 _base_release_memory_pools(ioc);
6612 pci_set_drvdata(ioc->pdev, NULL);
6613 kfree(ioc->cpu_msix_table);
6614 if (ioc->is_warpdrive)
6615 kfree(ioc->reply_post_host_index);
6616 kfree(ioc->pd_handles);
6617 kfree(ioc->blocking_handles);
6618 kfree(ioc->device_remove_in_progress);
6619 kfree(ioc->pend_os_device_add);
6620 kfree(ioc->tm_cmds.reply);
6621 kfree(ioc->transport_cmds.reply);
6622 kfree(ioc->scsih_cmds.reply);
6623 kfree(ioc->config_cmds.reply);
6624 kfree(ioc->base_cmds.reply);
6625 kfree(ioc->port_enable_cmds.reply);
6626 kfree(ioc->ctl_cmds.reply);
6627 kfree(ioc->ctl_cmds.sense);
6628 kfree(ioc->pfacts);
6629 ioc->ctl_cmds.reply = NULL;
6630 ioc->base_cmds.reply = NULL;
6631 ioc->tm_cmds.reply = NULL;
6632 ioc->scsih_cmds.reply = NULL;
6633 ioc->transport_cmds.reply = NULL;
6634 ioc->config_cmds.reply = NULL;
6635 ioc->pfacts = NULL;
6636 return r;
6637}
6638
6639
6640
6641
6642
6643
6644void
6645mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
6646{
6647 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6648
6649 mpt3sas_base_stop_watchdog(ioc);
6650 mpt3sas_base_free_resources(ioc);
6651 _base_release_memory_pools(ioc);
6652 mpt3sas_free_enclosure_list(ioc);
6653 pci_set_drvdata(ioc->pdev, NULL);
6654 kfree(ioc->cpu_msix_table);
6655 if (ioc->is_warpdrive)
6656 kfree(ioc->reply_post_host_index);
6657 kfree(ioc->pd_handles);
6658 kfree(ioc->blocking_handles);
6659 kfree(ioc->device_remove_in_progress);
6660 kfree(ioc->pend_os_device_add);
6661 kfree(ioc->pfacts);
6662 kfree(ioc->ctl_cmds.reply);
6663 kfree(ioc->ctl_cmds.sense);
6664 kfree(ioc->base_cmds.reply);
6665 kfree(ioc->port_enable_cmds.reply);
6666 kfree(ioc->tm_cmds.reply);
6667 kfree(ioc->transport_cmds.reply);
6668 kfree(ioc->scsih_cmds.reply);
6669 kfree(ioc->config_cmds.reply);
6670}
6671
6672
6673
6674
6675
6676static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
6677{
6678 mpt3sas_scsih_pre_reset_handler(ioc);
6679 mpt3sas_ctl_pre_reset_handler(ioc);
6680 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
6681}
6682
6683
6684
6685
6686
6687static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
6688{
6689 mpt3sas_scsih_after_reset_handler(ioc);
6690 mpt3sas_ctl_after_reset_handler(ioc);
6691 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
6692 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
6693 ioc->transport_cmds.status |= MPT3_CMD_RESET;
6694 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
6695 complete(&ioc->transport_cmds.done);
6696 }
6697 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6698 ioc->base_cmds.status |= MPT3_CMD_RESET;
6699 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
6700 complete(&ioc->base_cmds.done);
6701 }
6702 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6703 ioc->port_enable_failed = 1;
6704 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
6705 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
6706 if (ioc->is_driver_loading) {
6707 ioc->start_scan_failed =
6708 MPI2_IOCSTATUS_INTERNAL_ERROR;
6709 ioc->start_scan = 0;
6710 ioc->port_enable_cmds.status =
6711 MPT3_CMD_NOT_USED;
6712 } else {
6713 complete(&ioc->port_enable_cmds.done);
6714 }
6715 }
6716 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
6717 ioc->config_cmds.status |= MPT3_CMD_RESET;
6718 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
6719 ioc->config_cmds.smid = USHRT_MAX;
6720 complete(&ioc->config_cmds.done);
6721 }
6722}
6723
6724
6725
6726
6727
6728static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
6729{
6730 mpt3sas_scsih_reset_done_handler(ioc);
6731 mpt3sas_ctl_reset_done_handler(ioc);
6732 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
6733}
6734
6735
6736
6737
6738
6739
6740
6741
6742void
6743mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
6744{
6745 u32 ioc_state;
6746
6747 ioc->pending_io_count = 0;
6748
6749 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6750 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
6751 return;
6752
6753
6754 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
6755
6756 if (!ioc->pending_io_count)
6757 return;
6758
6759
6760 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
6761}
6762
6763
6764
6765
6766
6767
6768
6769
6770int
6771mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
6772 enum reset_type type)
6773{
6774 int r;
6775 unsigned long flags;
6776 u32 ioc_state;
6777 u8 is_fault = 0, is_trigger = 0;
6778
6779 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
6780
6781 if (ioc->pci_error_recovery) {
6782 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
6783 r = 0;
6784 goto out_unlocked;
6785 }
6786
6787 if (mpt3sas_fwfault_debug)
6788 mpt3sas_halt_firmware(ioc);
6789
6790
6791 mutex_lock(&ioc->reset_in_progress_mutex);
6792
6793 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6794 ioc->shost_recovery = 1;
6795 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6796
6797 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6798 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
6799 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6800 MPT3_DIAG_BUFFER_IS_RELEASED))) {
6801 is_trigger = 1;
6802 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6803 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
6804 is_fault = 1;
6805 }
6806 _base_pre_reset_handler(ioc);
6807 mpt3sas_wait_for_commands_to_complete(ioc);
6808 _base_mask_interrupts(ioc);
6809 r = _base_make_ioc_ready(ioc, type);
6810 if (r)
6811 goto out;
6812 _base_after_reset_handler(ioc);
6813
6814
6815
6816
6817 if (ioc->is_driver_loading && ioc->port_enable_failed) {
6818 ioc->remove_host = 1;
6819 r = -EFAULT;
6820 goto out;
6821 }
6822 r = _base_get_ioc_facts(ioc);
6823 if (r)
6824 goto out;
6825
6826 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
6827 panic("%s: Issue occurred with flashing controller firmware."
6828 "Please reboot the system and ensure that the correct"
6829 " firmware version is running\n", ioc->name);
6830
6831 r = _base_make_ioc_operational(ioc);
6832 if (!r)
6833 _base_reset_done_handler(ioc);
6834
6835 out:
6836 dtmprintk(ioc,
6837 ioc_info(ioc, "%s: %s\n",
6838 __func__, r == 0 ? "SUCCESS" : "FAILED"));
6839
6840 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6841 ioc->shost_recovery = 0;
6842 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6843 ioc->ioc_reset_count++;
6844 mutex_unlock(&ioc->reset_in_progress_mutex);
6845
6846 out_unlocked:
6847 if ((r == 0) && is_trigger) {
6848 if (is_fault)
6849 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
6850 else
6851 mpt3sas_trigger_master(ioc,
6852 MASTER_TRIGGER_ADAPTER_RESET);
6853 }
6854 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
6855 return r;
6856}
6857