1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/io.h>
59#include <linux/time.h>
60#include <linux/ktime.h>
61#include <linux/kthread.h>
62#include <asm/page.h>
63#include <linux/aer.h>
64
65
66#include "mpt3sas_base.h"
67
68static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
69
70
71#define FAULT_POLLING_INTERVAL 1000
72
73
74#define MAX_HBA_QUEUE_DEPTH 30000
75#define MAX_CHAIN_DEPTH 100000
76static int max_queue_depth = -1;
77module_param(max_queue_depth, int, 0444);
78MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79
80static int max_sgl_entries = -1;
81module_param(max_sgl_entries, int, 0444);
82MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83
84static int msix_disable = -1;
85module_param(msix_disable, int, 0444);
86MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87
88static int smp_affinity_enable = 1;
89module_param(smp_affinity_enable, int, 0444);
90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91
92static int max_msix_vectors = -1;
93module_param(max_msix_vectors, int, 0444);
94MODULE_PARM_DESC(max_msix_vectors,
95 " max msix vectors");
96
97static int irqpoll_weight = -1;
98module_param(irqpoll_weight, int, 0444);
99MODULE_PARM_DESC(irqpoll_weight,
100 "irq poll weight (default= one fourth of HBA queue depth)");
101
102static int mpt3sas_fwfault_debug;
103MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104 " enable detection of firmware fault and halt firmware - (default=0)");
105
106static int perf_mode = -1;
107module_param(perf_mode, int, 0444);
108MODULE_PARM_DESC(perf_mode,
109 "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
110 "0 - balanced: high iops mode is enabled &\n\t\t"
111 "interrupt coalescing is enabled only on high iops queues,\n\t\t"
112 "1 - iops: high iops mode is disabled &\n\t\t"
113 "interrupt coalescing is enabled on all queues,\n\t\t"
114 "2 - latency: high iops mode is disabled &\n\t\t"
115 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
116 "\t\tdefault - default perf_mode is 'balanced'"
117 );
118
119enum mpt3sas_perf_mode {
120 MPT_PERF_MODE_DEFAULT = -1,
121 MPT_PERF_MODE_BALANCED = 0,
122 MPT_PERF_MODE_IOPS = 1,
123 MPT_PERF_MODE_LATENCY = 2,
124};
125
126static int
127_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
128 u32 ioc_state, int timeout);
129static int
130_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
131static void
132_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
133
134
135
136
137
138
139
140
141
142
143
144
145
146u8
147mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
148 u8 status, void *mpi_request, int sz)
149{
150 u8 issue_reset = 0;
151
152 if (!(status & MPT3_CMD_RESET))
153 issue_reset = 1;
154
155 ioc_err(ioc, "Command %s\n",
156 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
157 _debug_dump_mf(mpi_request, sz);
158
159 return issue_reset;
160}
161
162
163
164
165
166
167
168
169static int
170_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
171{
172 int ret = param_set_int(val, kp);
173 struct MPT3SAS_ADAPTER *ioc;
174
175 if (ret)
176 return ret;
177
178
179 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
180 spin_lock(&gioc_lock);
181 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
182 ioc->fwfault_debug = mpt3sas_fwfault_debug;
183 spin_unlock(&gioc_lock);
184 return 0;
185}
186module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
187 param_get_int, &mpt3sas_fwfault_debug, 0644);
188
189
190
191
192
193
194
195
196static inline u32
197_base_readl_aero(const volatile void __iomem *addr)
198{
199 u32 i = 0, ret_val;
200
201 do {
202 ret_val = readl(addr);
203 i++;
204 } while (ret_val == 0 && i < 3);
205
206 return ret_val;
207}
208
209static inline u32
210_base_readl(const volatile void __iomem *addr)
211{
212 return readl(addr);
213}
214
215
216
217
218
219
220
221
222
223static void
224_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
225 u32 index)
226{
227
228
229
230
231
232 u16 cmd_credit = ioc->facts.RequestCredit + 1;
233 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
234 MPI_FRAME_START_OFFSET +
235 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
236
237 writel(reply, reply_free_iomem);
238}
239
240
241
242
243
244
245
246
247
248static void
249_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
250{
251 int i;
252 u32 *src_virt_mem = (u32 *)src;
253
254 for (i = 0; i < size/4; i++)
255 writel((u32)src_virt_mem[i],
256 (void __iomem *)dst_iomem + (i * 4));
257}
258
259
260
261
262
263
264
265
266static void
267_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
268{
269 int i;
270 u32 *src_virt_mem = (u32 *)(src);
271
272 for (i = 0; i < size/4; i++)
273 writel((u32)src_virt_mem[i],
274 (void __iomem *)dst_iomem + (i * 4));
275}
276
277
278
279
280
281
282
283
284
285
286
287static inline void __iomem*
288_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
289 u8 sge_chain_count)
290{
291 void __iomem *base_chain, *chain_virt;
292 u16 cmd_credit = ioc->facts.RequestCredit + 1;
293
294 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
295 (cmd_credit * ioc->request_sz) +
296 REPLY_FREE_POOL_SIZE;
297 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
298 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
299 return chain_virt;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313static inline phys_addr_t
314_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
315 u8 sge_chain_count)
316{
317 phys_addr_t base_chain_phys, chain_phys;
318 u16 cmd_credit = ioc->facts.RequestCredit + 1;
319
320 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
321 (cmd_credit * ioc->request_sz) +
322 REPLY_FREE_POOL_SIZE;
323 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
324 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
325 return chain_phys;
326}
327
328
329
330
331
332
333
334
335
336
337
338
339static void __iomem *
340_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
341{
342 u16 cmd_credit = ioc->facts.RequestCredit + 1;
343
344 void __iomem *chain_end = _base_get_chain(ioc,
345 cmd_credit + 1,
346 ioc->facts.MaxChainDepth);
347 return chain_end + (smid * 64 * 1024);
348}
349
350
351
352
353
354
355
356
357
358
359
360static phys_addr_t
361_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
362{
363 u16 cmd_credit = ioc->facts.RequestCredit + 1;
364 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
365 cmd_credit + 1,
366 ioc->facts.MaxChainDepth);
367 return chain_end_phys + (smid * 64 * 1024);
368}
369
370
371
372
373
374
375
376
377
378
379
380
381static void *
382_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
383 dma_addr_t chain_buffer_dma)
384{
385 u16 index, j;
386 struct chain_tracker *ct;
387
388 for (index = 0; index < ioc->scsiio_depth; index++) {
389 for (j = 0; j < ioc->chains_needed_per_io; j++) {
390 ct = &ioc->chain_lookup[index].chains_per_smid[j];
391 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
392 return ct->chain_buffer;
393 }
394 }
395 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
396 return NULL;
397}
398
399
400
401
402
403
404
405
406
407
408
409static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
410 void *mpi_request, u16 smid)
411{
412 Mpi2SGESimple32_t *sgel, *sgel_next;
413 u32 sgl_flags, sge_chain_count = 0;
414 bool is_write = false;
415 u16 i = 0;
416 void __iomem *buffer_iomem;
417 phys_addr_t buffer_iomem_phys;
418 void __iomem *buff_ptr;
419 phys_addr_t buff_ptr_phys;
420 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
421 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
422 phys_addr_t dst_addr_phys;
423 MPI2RequestHeader_t *request_hdr;
424 struct scsi_cmnd *scmd;
425 struct scatterlist *sg_scmd = NULL;
426 int is_scsiio_req = 0;
427
428 request_hdr = (MPI2RequestHeader_t *) mpi_request;
429
430 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
431 Mpi25SCSIIORequest_t *scsiio_request =
432 (Mpi25SCSIIORequest_t *)mpi_request;
433 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
434 is_scsiio_req = 1;
435 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
436 Mpi2ConfigRequest_t *config_req =
437 (Mpi2ConfigRequest_t *)mpi_request;
438 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
439 } else
440 return;
441
442
443
444
445
446
447 if (is_scsiio_req) {
448
449 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
450 if (scmd == NULL) {
451 ioc_err(ioc, "scmd is NULL\n");
452 return;
453 }
454
455
456 sg_scmd = scsi_sglist(scmd);
457 }
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
475 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
476
477 buff_ptr = buffer_iomem;
478 buff_ptr_phys = buffer_iomem_phys;
479 WARN_ON(buff_ptr_phys > U32_MAX);
480
481 if (le32_to_cpu(sgel->FlagsLength) &
482 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
483 is_write = true;
484
485 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
486
487 sgl_flags =
488 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
489
490 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
491 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
492
493
494
495
496
497 sgel_next =
498 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
499 le32_to_cpu(sgel->Address));
500 if (sgel_next == NULL)
501 return;
502
503
504
505
506 dst_chain_addr[sge_chain_count] =
507 _base_get_chain(ioc,
508 smid, sge_chain_count);
509 src_chain_addr[sge_chain_count] =
510 (void *) sgel_next;
511 dst_addr_phys = _base_get_chain_phys(ioc,
512 smid, sge_chain_count);
513 WARN_ON(dst_addr_phys > U32_MAX);
514 sgel->Address =
515 cpu_to_le32(lower_32_bits(dst_addr_phys));
516 sgel = sgel_next;
517 sge_chain_count++;
518 break;
519 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
520 if (is_write) {
521 if (is_scsiio_req) {
522 _base_clone_to_sys_mem(buff_ptr,
523 sg_virt(sg_scmd),
524 (le32_to_cpu(sgel->FlagsLength) &
525 0x00ffffff));
526
527
528
529
530 sgel->Address =
531 cpu_to_le32((u32)buff_ptr_phys);
532 } else {
533 _base_clone_to_sys_mem(buff_ptr,
534 ioc->config_vaddr,
535 (le32_to_cpu(sgel->FlagsLength) &
536 0x00ffffff));
537 sgel->Address =
538 cpu_to_le32((u32)buff_ptr_phys);
539 }
540 }
541 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
542 0x00ffffff);
543 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
544 0x00ffffff);
545 if ((le32_to_cpu(sgel->FlagsLength) &
546 (MPI2_SGE_FLAGS_END_OF_BUFFER
547 << MPI2_SGE_FLAGS_SHIFT)))
548 goto eob_clone_chain;
549 else {
550
551
552
553
554
555
556 if (is_scsiio_req) {
557 sg_scmd = sg_next(sg_scmd);
558 if (sg_scmd)
559 sgel++;
560 else
561 goto eob_clone_chain;
562 }
563 }
564 break;
565 }
566 }
567
568eob_clone_chain:
569 for (i = 0; i < sge_chain_count; i++) {
570 if (is_scsiio_req)
571 _base_clone_to_sys_mem(dst_chain_addr[i],
572 src_chain_addr[i], ioc->request_sz);
573 }
574}
575
576
577
578
579
580
581
582
583
584static int mpt3sas_remove_dead_ioc_func(void *arg)
585{
586 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
587 struct pci_dev *pdev;
588
589 if (!ioc)
590 return -1;
591
592 pdev = ioc->pdev;
593 if (!pdev)
594 return -1;
595 pci_stop_and_remove_bus_device_locked(pdev);
596 return 0;
597}
598
599
600
601
602
603
604
605static void
606_base_fault_reset_work(struct work_struct *work)
607{
608 struct MPT3SAS_ADAPTER *ioc =
609 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
610 unsigned long flags;
611 u32 doorbell;
612 int rc;
613 struct task_struct *p;
614
615
616 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
617 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
618 ioc->pci_error_recovery)
619 goto rearm_timer;
620 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
621
622 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
623 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
624 ioc_err(ioc, "SAS host is non-operational !!!!\n");
625
626
627
628
629
630
631
632
633
634 if (ioc->non_operational_loop++ < 5) {
635 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
636 flags);
637 goto rearm_timer;
638 }
639
640
641
642
643
644
645
646
647 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
648
649
650
651
652 ioc->remove_host = 1;
653
654 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
655 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
656 if (IS_ERR(p))
657 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
658 __func__);
659 else
660 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
661 __func__);
662 return;
663 }
664
665 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
666 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
667 ioc->manu_pg11.CoreDumpTOSec :
668 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
669
670 timeout /= (FAULT_POLLING_INTERVAL/1000);
671
672 if (ioc->ioc_coredump_loop == 0) {
673 mpt3sas_print_coredump_info(ioc,
674 doorbell & MPI2_DOORBELL_DATA_MASK);
675
676 spin_lock_irqsave(
677 &ioc->ioc_reset_in_progress_lock, flags);
678 ioc->shost_recovery = 1;
679 spin_unlock_irqrestore(
680 &ioc->ioc_reset_in_progress_lock, flags);
681 mpt3sas_base_mask_interrupts(ioc);
682 _base_clear_outstanding_commands(ioc);
683 }
684
685 ioc_info(ioc, "%s: CoreDump loop %d.",
686 __func__, ioc->ioc_coredump_loop);
687
688
689 if (ioc->ioc_coredump_loop++ < timeout) {
690 spin_lock_irqsave(
691 &ioc->ioc_reset_in_progress_lock, flags);
692 goto rearm_timer;
693 }
694 }
695
696 if (ioc->ioc_coredump_loop) {
697 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
698 ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
699 __func__, ioc->ioc_coredump_loop);
700 else
701 ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
702 __func__, ioc->ioc_coredump_loop);
703 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
704 }
705 ioc->non_operational_loop = 0;
706 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
707 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
708 ioc_warn(ioc, "%s: hard reset: %s\n",
709 __func__, rc == 0 ? "success" : "failed");
710 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
711 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
712 mpt3sas_print_fault_code(ioc, doorbell &
713 MPI2_DOORBELL_DATA_MASK);
714 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
715 MPI2_IOC_STATE_COREDUMP)
716 mpt3sas_print_coredump_info(ioc, doorbell &
717 MPI2_DOORBELL_DATA_MASK);
718 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
719 MPI2_IOC_STATE_OPERATIONAL)
720 return;
721 }
722 ioc->ioc_coredump_loop = 0;
723
724 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
725 rearm_timer:
726 if (ioc->fault_reset_work_q)
727 queue_delayed_work(ioc->fault_reset_work_q,
728 &ioc->fault_reset_work,
729 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
730 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
731}
732
733
734
735
736
737
738
739void
740mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
741{
742 unsigned long flags;
743
744 if (ioc->fault_reset_work_q)
745 return;
746
747
748
749 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
750 snprintf(ioc->fault_reset_work_q_name,
751 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
752 ioc->driver_name, ioc->id);
753 ioc->fault_reset_work_q =
754 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
755 if (!ioc->fault_reset_work_q) {
756 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
757 return;
758 }
759 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
760 if (ioc->fault_reset_work_q)
761 queue_delayed_work(ioc->fault_reset_work_q,
762 &ioc->fault_reset_work,
763 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
764 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
765}
766
767
768
769
770
771
772
773void
774mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
775{
776 unsigned long flags;
777 struct workqueue_struct *wq;
778
779 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
780 wq = ioc->fault_reset_work_q;
781 ioc->fault_reset_work_q = NULL;
782 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
783 if (wq) {
784 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
785 flush_workqueue(wq);
786 destroy_workqueue(wq);
787 }
788}
789
790
791
792
793
794
795void
796mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
797{
798 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
799}
800
801
802
803
804
805
806
807
808void
809mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
810{
811 ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
812}
813
814
815
816
817
818
819
820
821
822int
823mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
824 const char *caller)
825{
826 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
827 ioc->manu_pg11.CoreDumpTOSec :
828 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
829
830 int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
831 timeout);
832
833 if (ioc_state)
834 ioc_err(ioc,
835 "%s: CoreDump timed out. (ioc_state=0x%x)\n",
836 caller, ioc_state);
837 else
838 ioc_info(ioc,
839 "%s: CoreDump completed. (ioc_state=0x%x)\n",
840 caller, ioc_state);
841
842 return ioc_state;
843}
844
845
846
847
848
849
850
851
852
853
854void
855mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
856{
857 u32 doorbell;
858
859 if (!ioc->fwfault_debug)
860 return;
861
862 dump_stack();
863
864 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
865 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
866 mpt3sas_print_fault_code(ioc, doorbell &
867 MPI2_DOORBELL_DATA_MASK);
868 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
869 MPI2_IOC_STATE_COREDUMP) {
870 mpt3sas_print_coredump_info(ioc, doorbell &
871 MPI2_DOORBELL_DATA_MASK);
872 } else {
873 writel(0xC0FFEE00, &ioc->chip->Doorbell);
874 ioc_err(ioc, "Firmware is halted due to command timeout\n");
875 }
876
877 if (ioc->fwfault_debug == 2)
878 for (;;)
879 ;
880 else
881 panic("panic in %s\n", __func__);
882}
883
884
885
886
887
888
889
890static void
891_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
892 MPI2RequestHeader_t *request_hdr)
893{
894 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
895 MPI2_IOCSTATUS_MASK;
896 char *desc = NULL;
897 u16 frame_sz;
898 char *func_str = NULL;
899
900
901 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
902 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
903 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
904 return;
905
906 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
907 return;
908
909 switch (ioc_status) {
910
911
912
913
914
915 case MPI2_IOCSTATUS_INVALID_FUNCTION:
916 desc = "invalid function";
917 break;
918 case MPI2_IOCSTATUS_BUSY:
919 desc = "busy";
920 break;
921 case MPI2_IOCSTATUS_INVALID_SGL:
922 desc = "invalid sgl";
923 break;
924 case MPI2_IOCSTATUS_INTERNAL_ERROR:
925 desc = "internal error";
926 break;
927 case MPI2_IOCSTATUS_INVALID_VPID:
928 desc = "invalid vpid";
929 break;
930 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
931 desc = "insufficient resources";
932 break;
933 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
934 desc = "insufficient power";
935 break;
936 case MPI2_IOCSTATUS_INVALID_FIELD:
937 desc = "invalid field";
938 break;
939 case MPI2_IOCSTATUS_INVALID_STATE:
940 desc = "invalid state";
941 break;
942 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
943 desc = "op state not supported";
944 break;
945
946
947
948
949
950 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
951 desc = "config invalid action";
952 break;
953 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
954 desc = "config invalid type";
955 break;
956 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
957 desc = "config invalid page";
958 break;
959 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
960 desc = "config invalid data";
961 break;
962 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
963 desc = "config no defaults";
964 break;
965 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
966 desc = "config cant commit";
967 break;
968
969
970
971
972
973 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
974 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
975 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
976 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
977 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
978 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
979 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
980 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
981 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
982 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
983 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
984 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
985 break;
986
987
988
989
990
991 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
992 desc = "eedp guard error";
993 break;
994 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
995 desc = "eedp ref tag error";
996 break;
997 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
998 desc = "eedp app tag error";
999 break;
1000
1001
1002
1003
1004
1005 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1006 desc = "target invalid io index";
1007 break;
1008 case MPI2_IOCSTATUS_TARGET_ABORTED:
1009 desc = "target aborted";
1010 break;
1011 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1012 desc = "target no conn retryable";
1013 break;
1014 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1015 desc = "target no connection";
1016 break;
1017 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1018 desc = "target xfer count mismatch";
1019 break;
1020 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1021 desc = "target data offset error";
1022 break;
1023 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1024 desc = "target too much write data";
1025 break;
1026 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1027 desc = "target iu too short";
1028 break;
1029 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1030 desc = "target ack nak timeout";
1031 break;
1032 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1033 desc = "target nak received";
1034 break;
1035
1036
1037
1038
1039
1040 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1041 desc = "smp request failed";
1042 break;
1043 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1044 desc = "smp data overrun";
1045 break;
1046
1047
1048
1049
1050
1051 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1052 desc = "diagnostic released";
1053 break;
1054 default:
1055 break;
1056 }
1057
1058 if (!desc)
1059 return;
1060
1061 switch (request_hdr->Function) {
1062 case MPI2_FUNCTION_CONFIG:
1063 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1064 func_str = "config_page";
1065 break;
1066 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1067 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1068 func_str = "task_mgmt";
1069 break;
1070 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1071 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1072 func_str = "sas_iounit_ctl";
1073 break;
1074 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1075 frame_sz = sizeof(Mpi2SepRequest_t);
1076 func_str = "enclosure";
1077 break;
1078 case MPI2_FUNCTION_IOC_INIT:
1079 frame_sz = sizeof(Mpi2IOCInitRequest_t);
1080 func_str = "ioc_init";
1081 break;
1082 case MPI2_FUNCTION_PORT_ENABLE:
1083 frame_sz = sizeof(Mpi2PortEnableRequest_t);
1084 func_str = "port_enable";
1085 break;
1086 case MPI2_FUNCTION_SMP_PASSTHROUGH:
1087 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1088 func_str = "smp_passthru";
1089 break;
1090 case MPI2_FUNCTION_NVME_ENCAPSULATED:
1091 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1092 ioc->sge_size;
1093 func_str = "nvme_encapsulated";
1094 break;
1095 default:
1096 frame_sz = 32;
1097 func_str = "unknown";
1098 break;
1099 }
1100
1101 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1102 desc, ioc_status, request_hdr, func_str);
1103
1104 _debug_dump_mf(request_hdr, frame_sz/4);
1105}
1106
1107
1108
1109
1110
1111
1112static void
1113_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1114 Mpi2EventNotificationReply_t *mpi_reply)
1115{
1116 char *desc = NULL;
1117 u16 event;
1118
1119 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1120 return;
1121
1122 event = le16_to_cpu(mpi_reply->Event);
1123
1124 switch (event) {
1125 case MPI2_EVENT_LOG_DATA:
1126 desc = "Log Data";
1127 break;
1128 case MPI2_EVENT_STATE_CHANGE:
1129 desc = "Status Change";
1130 break;
1131 case MPI2_EVENT_HARD_RESET_RECEIVED:
1132 desc = "Hard Reset Received";
1133 break;
1134 case MPI2_EVENT_EVENT_CHANGE:
1135 desc = "Event Change";
1136 break;
1137 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1138 desc = "Device Status Change";
1139 break;
1140 case MPI2_EVENT_IR_OPERATION_STATUS:
1141 if (!ioc->hide_ir_msg)
1142 desc = "IR Operation Status";
1143 break;
1144 case MPI2_EVENT_SAS_DISCOVERY:
1145 {
1146 Mpi2EventDataSasDiscovery_t *event_data =
1147 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1148 ioc_info(ioc, "Discovery: (%s)",
1149 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1150 "start" : "stop");
1151 if (event_data->DiscoveryStatus)
1152 pr_cont(" discovery_status(0x%08x)",
1153 le32_to_cpu(event_data->DiscoveryStatus));
1154 pr_cont("\n");
1155 return;
1156 }
1157 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1158 desc = "SAS Broadcast Primitive";
1159 break;
1160 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1161 desc = "SAS Init Device Status Change";
1162 break;
1163 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1164 desc = "SAS Init Table Overflow";
1165 break;
1166 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1167 desc = "SAS Topology Change List";
1168 break;
1169 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1170 desc = "SAS Enclosure Device Status Change";
1171 break;
1172 case MPI2_EVENT_IR_VOLUME:
1173 if (!ioc->hide_ir_msg)
1174 desc = "IR Volume";
1175 break;
1176 case MPI2_EVENT_IR_PHYSICAL_DISK:
1177 if (!ioc->hide_ir_msg)
1178 desc = "IR Physical Disk";
1179 break;
1180 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1181 if (!ioc->hide_ir_msg)
1182 desc = "IR Configuration Change List";
1183 break;
1184 case MPI2_EVENT_LOG_ENTRY_ADDED:
1185 if (!ioc->hide_ir_msg)
1186 desc = "Log Entry Added";
1187 break;
1188 case MPI2_EVENT_TEMP_THRESHOLD:
1189 desc = "Temperature Threshold";
1190 break;
1191 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1192 desc = "Cable Event";
1193 break;
1194 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1195 desc = "SAS Device Discovery Error";
1196 break;
1197 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1198 desc = "PCIE Device Status Change";
1199 break;
1200 case MPI2_EVENT_PCIE_ENUMERATION:
1201 {
1202 Mpi26EventDataPCIeEnumeration_t *event_data =
1203 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1204 ioc_info(ioc, "PCIE Enumeration: (%s)",
1205 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1206 "start" : "stop");
1207 if (event_data->EnumerationStatus)
1208 pr_cont("enumeration_status(0x%08x)",
1209 le32_to_cpu(event_data->EnumerationStatus));
1210 pr_cont("\n");
1211 return;
1212 }
1213 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1214 desc = "PCIE Topology Change List";
1215 break;
1216 }
1217
1218 if (!desc)
1219 return;
1220
1221 ioc_info(ioc, "%s\n", desc);
1222}
1223
1224
1225
1226
1227
1228
1229static void
1230_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1231{
1232 union loginfo_type {
1233 u32 loginfo;
1234 struct {
1235 u32 subcode:16;
1236 u32 code:8;
1237 u32 originator:4;
1238 u32 bus_type:4;
1239 } dw;
1240 };
1241 union loginfo_type sas_loginfo;
1242 char *originator_str = NULL;
1243
1244 sas_loginfo.loginfo = log_info;
1245 if (sas_loginfo.dw.bus_type != 3 )
1246 return;
1247
1248
1249 if (log_info == 0x31170000)
1250 return;
1251
1252
1253 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1254 0x31140000 || log_info == 0x31130000))
1255 return;
1256
1257 switch (sas_loginfo.dw.originator) {
1258 case 0:
1259 originator_str = "IOP";
1260 break;
1261 case 1:
1262 originator_str = "PL";
1263 break;
1264 case 2:
1265 if (!ioc->hide_ir_msg)
1266 originator_str = "IR";
1267 else
1268 originator_str = "WarpDrive";
1269 break;
1270 }
1271
1272 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1273 log_info,
1274 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284static void
1285_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1286 u32 reply)
1287{
1288 MPI2DefaultReply_t *mpi_reply;
1289 u16 ioc_status;
1290 u32 loginfo = 0;
1291
1292 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1293 if (unlikely(!mpi_reply)) {
1294 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1295 __FILE__, __LINE__, __func__);
1296 return;
1297 }
1298 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1299
1300 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1301 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1302 _base_sas_ioc_info(ioc , mpi_reply,
1303 mpt3sas_base_get_msg_frame(ioc, smid));
1304 }
1305
1306 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1307 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1308 _base_sas_log_info(ioc, loginfo);
1309 }
1310
1311 if (ioc_status || loginfo) {
1312 ioc_status &= MPI2_IOCSTATUS_MASK;
1313 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1314 }
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328u8
1329mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1330 u32 reply)
1331{
1332 MPI2DefaultReply_t *mpi_reply;
1333
1334 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1335 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1336 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1337
1338 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1339 return 1;
1340
1341 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1342 if (mpi_reply) {
1343 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1344 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1345 }
1346 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1347
1348 complete(&ioc->base_cmds.done);
1349 return 1;
1350}
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362static u8
1363_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1364{
1365 Mpi2EventNotificationReply_t *mpi_reply;
1366 Mpi2EventAckRequest_t *ack_request;
1367 u16 smid;
1368 struct _event_ack_list *delayed_event_ack;
1369
1370 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1371 if (!mpi_reply)
1372 return 1;
1373 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1374 return 1;
1375
1376 _base_display_event_data(ioc, mpi_reply);
1377
1378 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1379 goto out;
1380 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1381 if (!smid) {
1382 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1383 GFP_ATOMIC);
1384 if (!delayed_event_ack)
1385 goto out;
1386 INIT_LIST_HEAD(&delayed_event_ack->list);
1387 delayed_event_ack->Event = mpi_reply->Event;
1388 delayed_event_ack->EventContext = mpi_reply->EventContext;
1389 list_add_tail(&delayed_event_ack->list,
1390 &ioc->delayed_event_ack_list);
1391 dewtprintk(ioc,
1392 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1393 le16_to_cpu(mpi_reply->Event)));
1394 goto out;
1395 }
1396
1397 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1398 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1399 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1400 ack_request->Event = mpi_reply->Event;
1401 ack_request->EventContext = mpi_reply->EventContext;
1402 ack_request->VF_ID = 0;
1403 ack_request->VP_ID = 0;
1404 ioc->put_smid_default(ioc, smid);
1405
1406 out:
1407
1408
1409 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1410
1411
1412 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1413
1414 return 1;
1415}
1416
1417static struct scsiio_tracker *
1418_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1419{
1420 struct scsi_cmnd *cmd;
1421
1422 if (WARN_ON(!smid) ||
1423 WARN_ON(smid >= ioc->hi_priority_smid))
1424 return NULL;
1425
1426 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1427 if (cmd)
1428 return scsi_cmd_priv(cmd);
1429
1430 return NULL;
1431}
1432
1433
1434
1435
1436
1437
1438
1439
1440static u8
1441_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1442{
1443 int i;
1444 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1445 u8 cb_idx = 0xFF;
1446
1447 if (smid < ioc->hi_priority_smid) {
1448 struct scsiio_tracker *st;
1449
1450 if (smid < ctl_smid) {
1451 st = _get_st_from_smid(ioc, smid);
1452 if (st)
1453 cb_idx = st->cb_idx;
1454 } else if (smid == ctl_smid)
1455 cb_idx = ioc->ctl_cb_idx;
1456 } else if (smid < ioc->internal_smid) {
1457 i = smid - ioc->hi_priority_smid;
1458 cb_idx = ioc->hpr_lookup[i].cb_idx;
1459 } else if (smid <= ioc->hba_queue_depth) {
1460 i = smid - ioc->internal_smid;
1461 cb_idx = ioc->internal_lookup[i].cb_idx;
1462 }
1463 return cb_idx;
1464}
1465
1466
1467
1468
1469
1470
1471
1472void
1473mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1474{
1475 u32 him_register;
1476
1477 ioc->mask_interrupts = 1;
1478 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1479 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1480 writel(him_register, &ioc->chip->HostInterruptMask);
1481 ioc->base_readl(&ioc->chip->HostInterruptMask);
1482}
1483
1484
1485
1486
1487
1488
1489
1490void
1491mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1492{
1493 u32 him_register;
1494
1495 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1496 him_register &= ~MPI2_HIM_RIM;
1497 writel(him_register, &ioc->chip->HostInterruptMask);
1498 ioc->mask_interrupts = 0;
1499}
1500
1501union reply_descriptor {
1502 u64 word;
1503 struct {
1504 u32 low;
1505 u32 high;
1506 } u;
1507};
1508
1509static u32 base_mod64(u64 dividend, u32 divisor)
1510{
1511 u32 remainder;
1512
1513 if (!divisor)
1514 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1515 remainder = do_div(dividend, divisor);
1516 return remainder;
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527static int
1528_base_process_reply_queue(struct adapter_reply_queue *reply_q)
1529{
1530 union reply_descriptor rd;
1531 u64 completed_cmds;
1532 u8 request_descript_type;
1533 u16 smid;
1534 u8 cb_idx;
1535 u32 reply;
1536 u8 msix_index = reply_q->msix_index;
1537 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1538 Mpi2ReplyDescriptorsUnion_t *rpf;
1539 u8 rc;
1540
1541 completed_cmds = 0;
1542 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1543 return completed_cmds;
1544
1545 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1546 request_descript_type = rpf->Default.ReplyFlags
1547 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1548 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1549 atomic_dec(&reply_q->busy);
1550 return completed_cmds;
1551 }
1552
1553 cb_idx = 0xFF;
1554 do {
1555 rd.word = le64_to_cpu(rpf->Words);
1556 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1557 goto out;
1558 reply = 0;
1559 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1560 if (request_descript_type ==
1561 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1562 request_descript_type ==
1563 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1564 request_descript_type ==
1565 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1566 cb_idx = _base_get_cb_idx(ioc, smid);
1567 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1568 (likely(mpt_callbacks[cb_idx] != NULL))) {
1569 rc = mpt_callbacks[cb_idx](ioc, smid,
1570 msix_index, 0);
1571 if (rc)
1572 mpt3sas_base_free_smid(ioc, smid);
1573 }
1574 } else if (request_descript_type ==
1575 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1576 reply = le32_to_cpu(
1577 rpf->AddressReply.ReplyFrameAddress);
1578 if (reply > ioc->reply_dma_max_address ||
1579 reply < ioc->reply_dma_min_address)
1580 reply = 0;
1581 if (smid) {
1582 cb_idx = _base_get_cb_idx(ioc, smid);
1583 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1584 (likely(mpt_callbacks[cb_idx] != NULL))) {
1585 rc = mpt_callbacks[cb_idx](ioc, smid,
1586 msix_index, reply);
1587 if (reply)
1588 _base_display_reply_info(ioc,
1589 smid, msix_index, reply);
1590 if (rc)
1591 mpt3sas_base_free_smid(ioc,
1592 smid);
1593 }
1594 } else {
1595 _base_async_event(ioc, msix_index, reply);
1596 }
1597
1598
1599 if (reply) {
1600 ioc->reply_free_host_index =
1601 (ioc->reply_free_host_index ==
1602 (ioc->reply_free_queue_depth - 1)) ?
1603 0 : ioc->reply_free_host_index + 1;
1604 ioc->reply_free[ioc->reply_free_host_index] =
1605 cpu_to_le32(reply);
1606 if (ioc->is_mcpu_endpoint)
1607 _base_clone_reply_to_sys_mem(ioc,
1608 reply,
1609 ioc->reply_free_host_index);
1610 writel(ioc->reply_free_host_index,
1611 &ioc->chip->ReplyFreeHostIndex);
1612 }
1613 }
1614
1615 rpf->Words = cpu_to_le64(ULLONG_MAX);
1616 reply_q->reply_post_host_index =
1617 (reply_q->reply_post_host_index ==
1618 (ioc->reply_post_queue_depth - 1)) ? 0 :
1619 reply_q->reply_post_host_index + 1;
1620 request_descript_type =
1621 reply_q->reply_post_free[reply_q->reply_post_host_index].
1622 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1623 completed_cmds++;
1624
1625
1626
1627
1628
1629 if (completed_cmds >= ioc->thresh_hold) {
1630 if (ioc->combined_reply_queue) {
1631 writel(reply_q->reply_post_host_index |
1632 ((msix_index & 7) <<
1633 MPI2_RPHI_MSIX_INDEX_SHIFT),
1634 ioc->replyPostRegisterIndex[msix_index/8]);
1635 } else {
1636 writel(reply_q->reply_post_host_index |
1637 (msix_index <<
1638 MPI2_RPHI_MSIX_INDEX_SHIFT),
1639 &ioc->chip->ReplyPostHostIndex);
1640 }
1641 if (!reply_q->irq_poll_scheduled) {
1642 reply_q->irq_poll_scheduled = true;
1643 irq_poll_sched(&reply_q->irqpoll);
1644 }
1645 atomic_dec(&reply_q->busy);
1646 return completed_cmds;
1647 }
1648 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1649 goto out;
1650 if (!reply_q->reply_post_host_index)
1651 rpf = reply_q->reply_post_free;
1652 else
1653 rpf++;
1654 } while (1);
1655
1656 out:
1657
1658 if (!completed_cmds) {
1659 atomic_dec(&reply_q->busy);
1660 return completed_cmds;
1661 }
1662
1663 if (ioc->is_warpdrive) {
1664 writel(reply_q->reply_post_host_index,
1665 ioc->reply_post_host_index[msix_index]);
1666 atomic_dec(&reply_q->busy);
1667 return completed_cmds;
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685 if (ioc->combined_reply_queue)
1686 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1687 MPI2_RPHI_MSIX_INDEX_SHIFT),
1688 ioc->replyPostRegisterIndex[msix_index/8]);
1689 else
1690 writel(reply_q->reply_post_host_index | (msix_index <<
1691 MPI2_RPHI_MSIX_INDEX_SHIFT),
1692 &ioc->chip->ReplyPostHostIndex);
1693 atomic_dec(&reply_q->busy);
1694 return completed_cmds;
1695}
1696
1697
1698
1699
1700
1701
1702
1703
1704static irqreturn_t
1705_base_interrupt(int irq, void *bus_id)
1706{
1707 struct adapter_reply_queue *reply_q = bus_id;
1708 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1709
1710 if (ioc->mask_interrupts)
1711 return IRQ_NONE;
1712 if (reply_q->irq_poll_scheduled)
1713 return IRQ_HANDLED;
1714 return ((_base_process_reply_queue(reply_q) > 0) ?
1715 IRQ_HANDLED : IRQ_NONE);
1716}
1717
1718
1719
1720
1721
1722
1723
1724
1725static int
1726_base_irqpoll(struct irq_poll *irqpoll, int budget)
1727{
1728 struct adapter_reply_queue *reply_q;
1729 int num_entries = 0;
1730
1731 reply_q = container_of(irqpoll, struct adapter_reply_queue,
1732 irqpoll);
1733 if (reply_q->irq_line_enable) {
1734 disable_irq_nosync(reply_q->os_irq);
1735 reply_q->irq_line_enable = false;
1736 }
1737 num_entries = _base_process_reply_queue(reply_q);
1738 if (num_entries < budget) {
1739 irq_poll_complete(irqpoll);
1740 reply_q->irq_poll_scheduled = false;
1741 reply_q->irq_line_enable = true;
1742 enable_irq(reply_q->os_irq);
1743
1744
1745
1746
1747
1748
1749 _base_process_reply_queue(reply_q);
1750 }
1751
1752 return num_entries;
1753}
1754
1755
1756
1757
1758
1759
1760
1761static void
1762_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1763{
1764 struct adapter_reply_queue *reply_q, *next;
1765
1766 if (list_empty(&ioc->reply_queue_list))
1767 return;
1768
1769 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1770 irq_poll_init(&reply_q->irqpoll,
1771 ioc->hba_queue_depth/4, _base_irqpoll);
1772 reply_q->irq_poll_scheduled = false;
1773 reply_q->irq_line_enable = true;
1774 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1775 reply_q->msix_index);
1776 }
1777}
1778
1779
1780
1781
1782
1783
1784
1785static inline int
1786_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1787{
1788 return (ioc->facts.IOCCapabilities &
1789 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801void
1802mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1803{
1804 struct adapter_reply_queue *reply_q;
1805
1806
1807
1808
1809 if (!_base_is_controller_msix_enabled(ioc))
1810 return;
1811
1812 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1813 if (ioc->shost_recovery || ioc->remove_host ||
1814 ioc->pci_error_recovery)
1815 return;
1816
1817 if (reply_q->msix_index == 0)
1818 continue;
1819 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1820 if (reply_q->irq_poll_scheduled) {
1821
1822
1823
1824 irq_poll_disable(&reply_q->irqpoll);
1825 irq_poll_enable(&reply_q->irqpoll);
1826
1827
1828
1829 if (reply_q->irq_poll_scheduled) {
1830 reply_q->irq_poll_scheduled = false;
1831 reply_q->irq_line_enable = true;
1832 enable_irq(reply_q->os_irq);
1833 }
1834 }
1835 }
1836 if (poll)
1837 _base_process_reply_queue(reply_q);
1838}
1839
1840
1841
1842
1843
1844void
1845mpt3sas_base_release_callback_handler(u8 cb_idx)
1846{
1847 mpt_callbacks[cb_idx] = NULL;
1848}
1849
1850
1851
1852
1853
1854
1855
1856u8
1857mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1858{
1859 u8 cb_idx;
1860
1861 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1862 if (mpt_callbacks[cb_idx] == NULL)
1863 break;
1864
1865 mpt_callbacks[cb_idx] = cb_func;
1866 return cb_idx;
1867}
1868
1869
1870
1871
1872void
1873mpt3sas_base_initialize_callback_handler(void)
1874{
1875 u8 cb_idx;
1876
1877 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1878 mpt3sas_base_release_callback_handler(cb_idx);
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891static void
1892_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1893{
1894 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1895 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1896 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1897 MPI2_SGE_FLAGS_SHIFT);
1898 ioc->base_add_sg_single(paddr, flags_length, -1);
1899}
1900
1901
1902
1903
1904
1905
1906
1907static void
1908_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1909{
1910 Mpi2SGESimple32_t *sgel = paddr;
1911
1912 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1913 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1914 sgel->FlagsLength = cpu_to_le32(flags_length);
1915 sgel->Address = cpu_to_le32(dma_addr);
1916}
1917
1918
1919
1920
1921
1922
1923
1924
1925static void
1926_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1927{
1928 Mpi2SGESimple64_t *sgel = paddr;
1929
1930 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1931 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1932 sgel->FlagsLength = cpu_to_le32(flags_length);
1933 sgel->Address = cpu_to_le64(dma_addr);
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944static struct chain_tracker *
1945_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1946 struct scsi_cmnd *scmd)
1947{
1948 struct chain_tracker *chain_req;
1949 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1950 u16 smid = st->smid;
1951 u8 chain_offset =
1952 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1953
1954 if (chain_offset == ioc->chains_needed_per_io)
1955 return NULL;
1956
1957 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1958 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1959 return chain_req;
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972static void
1973_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1974 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1975 size_t data_in_sz)
1976{
1977 u32 sgl_flags;
1978
1979 if (!data_out_sz && !data_in_sz) {
1980 _base_build_zero_len_sge(ioc, psge);
1981 return;
1982 }
1983
1984 if (data_out_sz && data_in_sz) {
1985
1986 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1987 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1988 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1989 ioc->base_add_sg_single(psge, sgl_flags |
1990 data_out_sz, data_out_dma);
1991
1992
1993 psge += ioc->sge_size;
1994
1995
1996 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1997 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1998 MPI2_SGE_FLAGS_END_OF_LIST);
1999 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2000 ioc->base_add_sg_single(psge, sgl_flags |
2001 data_in_sz, data_in_dma);
2002 } else if (data_out_sz) {
2003 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2004 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2005 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
2006 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2007 ioc->base_add_sg_single(psge, sgl_flags |
2008 data_out_sz, data_out_dma);
2009 } else if (data_in_sz) {
2010 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2011 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2012 MPI2_SGE_FLAGS_END_OF_LIST);
2013 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2014 ioc->base_add_sg_single(psge, sgl_flags |
2015 data_in_sz, data_in_dma);
2016 }
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075static void
2076_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2077 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2078 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2079 size_t data_in_sz)
2080{
2081 int prp_size = NVME_PRP_SIZE;
2082 __le64 *prp_entry, *prp1_entry, *prp2_entry;
2083 __le64 *prp_page;
2084 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
2085 u32 offset, entry_len;
2086 u32 page_mask_result, page_mask;
2087 size_t length;
2088 struct mpt3sas_nvme_cmd *nvme_cmd =
2089 (void *)nvme_encap_request->NVMe_Command;
2090
2091
2092
2093
2094
2095 if (!data_in_sz && !data_out_sz)
2096 return;
2097 prp1_entry = &nvme_cmd->prp1;
2098 prp2_entry = &nvme_cmd->prp2;
2099 prp_entry = prp1_entry;
2100
2101
2102
2103
2104 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2105 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2106
2107
2108
2109
2110
2111 page_mask = ioc->page_size - 1;
2112 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2113 if (!page_mask_result) {
2114
2115 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2116 prp_page_dma = prp_page_dma + prp_size;
2117 }
2118
2119
2120
2121
2122
2123 prp_entry_dma = prp_page_dma;
2124
2125
2126 if (data_in_sz) {
2127 dma_addr = data_in_dma;
2128 length = data_in_sz;
2129 } else {
2130 dma_addr = data_out_dma;
2131 length = data_out_sz;
2132 }
2133
2134
2135 while (length) {
2136
2137
2138
2139
2140 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2141 if (!page_mask_result) {
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154 prp_entry_dma++;
2155 *prp_entry = cpu_to_le64(prp_entry_dma);
2156 prp_entry++;
2157 }
2158
2159
2160 offset = dma_addr & page_mask;
2161 entry_len = ioc->page_size - offset;
2162
2163 if (prp_entry == prp1_entry) {
2164
2165
2166
2167
2168 *prp1_entry = cpu_to_le64(dma_addr);
2169
2170
2171
2172
2173
2174 prp_entry = prp2_entry;
2175 } else if (prp_entry == prp2_entry) {
2176
2177
2178
2179
2180
2181 if (length > ioc->page_size) {
2182
2183
2184
2185
2186
2187
2188 *prp2_entry = cpu_to_le64(prp_entry_dma);
2189
2190
2191
2192
2193
2194 prp_entry = prp_page;
2195 } else {
2196
2197
2198
2199
2200 *prp2_entry = cpu_to_le64(dma_addr);
2201 }
2202 } else {
2203
2204
2205
2206
2207
2208
2209
2210 *prp_entry = cpu_to_le64(dma_addr);
2211 prp_entry++;
2212 prp_entry_dma++;
2213 }
2214
2215
2216
2217
2218
2219 dma_addr += entry_len;
2220
2221
2222 if (entry_len > length)
2223 length = 0;
2224 else
2225 length -= entry_len;
2226 }
2227}
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242static void
2243base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2244 struct scsi_cmnd *scmd,
2245 Mpi25SCSIIORequest_t *mpi_request,
2246 u16 smid, int sge_count)
2247{
2248 int sge_len, num_prp_in_chain = 0;
2249 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2250 __le64 *curr_buff;
2251 dma_addr_t msg_dma, sge_addr, offset;
2252 u32 page_mask, page_mask_result;
2253 struct scatterlist *sg_scmd;
2254 u32 first_prp_len;
2255 int data_len = scsi_bufflen(scmd);
2256 u32 nvme_pg_size;
2257
2258 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271 page_mask = nvme_pg_size - 1;
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2284
2285
2286
2287
2288 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2289 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2290
2291
2292
2293
2294
2295
2296
2297 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2298 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2299
2300 main_chain_element->Address = cpu_to_le64(msg_dma);
2301 main_chain_element->NextChainOffset = 0;
2302 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2303 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2304 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2305
2306
2307 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2308 sg_scmd = scsi_sglist(scmd);
2309 sge_addr = sg_dma_address(sg_scmd);
2310 sge_len = sg_dma_len(sg_scmd);
2311
2312 offset = sge_addr & page_mask;
2313 first_prp_len = nvme_pg_size - offset;
2314
2315 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2316 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2317
2318 data_len -= first_prp_len;
2319
2320 if (sge_len > first_prp_len) {
2321 sge_addr += first_prp_len;
2322 sge_len -= first_prp_len;
2323 } else if (data_len && (sge_len == first_prp_len)) {
2324 sg_scmd = sg_next(sg_scmd);
2325 sge_addr = sg_dma_address(sg_scmd);
2326 sge_len = sg_dma_len(sg_scmd);
2327 }
2328
2329 for (;;) {
2330 offset = sge_addr & page_mask;
2331
2332
2333 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2334 if (unlikely(!page_mask_result)) {
2335 scmd_printk(KERN_NOTICE,
2336 scmd, "page boundary curr_buff: 0x%p\n",
2337 curr_buff);
2338 msg_dma += 8;
2339 *curr_buff = cpu_to_le64(msg_dma);
2340 curr_buff++;
2341 num_prp_in_chain++;
2342 }
2343
2344 *curr_buff = cpu_to_le64(sge_addr);
2345 curr_buff++;
2346 msg_dma += 8;
2347 num_prp_in_chain++;
2348
2349 sge_addr += nvme_pg_size;
2350 sge_len -= nvme_pg_size;
2351 data_len -= nvme_pg_size;
2352
2353 if (data_len <= 0)
2354 break;
2355
2356 if (sge_len > 0)
2357 continue;
2358
2359 sg_scmd = sg_next(sg_scmd);
2360 sge_addr = sg_dma_address(sg_scmd);
2361 sge_len = sg_dma_len(sg_scmd);
2362 }
2363
2364 main_chain_element->Length =
2365 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2366 return;
2367}
2368
2369static bool
2370base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2371 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2372{
2373 u32 data_length = 0;
2374 bool build_prp = true;
2375
2376 data_length = scsi_bufflen(scmd);
2377 if (pcie_device &&
2378 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2379 build_prp = false;
2380 return build_prp;
2381 }
2382
2383
2384
2385
2386 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2387 build_prp = false;
2388
2389 return build_prp;
2390}
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407static int
2408_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2409 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2410 struct _pcie_device *pcie_device)
2411{
2412 int sges_left;
2413
2414
2415 sges_left = scsi_dma_map(scmd);
2416 if (sges_left < 0) {
2417 sdev_printk(KERN_ERR, scmd->device,
2418 "scsi_dma_map failed: request for %d bytes!\n",
2419 scsi_bufflen(scmd));
2420 return 1;
2421 }
2422
2423
2424 if (base_is_prp_possible(ioc, pcie_device,
2425 scmd, sges_left) == 0) {
2426
2427 goto out;
2428 }
2429
2430
2431
2432
2433 base_make_prp_nvme(ioc, scmd, mpi_request,
2434 smid, sges_left);
2435
2436 return 0;
2437out:
2438 scsi_dma_unmap(scmd);
2439 return 1;
2440}
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450static void
2451_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2452 dma_addr_t dma_addr)
2453{
2454 Mpi25IeeeSgeChain64_t *sgel = paddr;
2455
2456 sgel->Flags = flags;
2457 sgel->NextChainOffset = chain_offset;
2458 sgel->Length = cpu_to_le32(length);
2459 sgel->Address = cpu_to_le64(dma_addr);
2460}
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471static void
2472_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2473{
2474 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2475 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2476 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2477
2478 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2479}
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495static int
2496_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2497 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2498{
2499 Mpi2SCSIIORequest_t *mpi_request;
2500 dma_addr_t chain_dma;
2501 struct scatterlist *sg_scmd;
2502 void *sg_local, *chain;
2503 u32 chain_offset;
2504 u32 chain_length;
2505 u32 chain_flags;
2506 int sges_left;
2507 u32 sges_in_segment;
2508 u32 sgl_flags;
2509 u32 sgl_flags_last_element;
2510 u32 sgl_flags_end_buffer;
2511 struct chain_tracker *chain_req;
2512
2513 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2514
2515
2516 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2517 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2518 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2519 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2520 << MPI2_SGE_FLAGS_SHIFT;
2521 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2522 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2523 << MPI2_SGE_FLAGS_SHIFT;
2524 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2525
2526 sg_scmd = scsi_sglist(scmd);
2527 sges_left = scsi_dma_map(scmd);
2528 if (sges_left < 0) {
2529 sdev_printk(KERN_ERR, scmd->device,
2530 "scsi_dma_map failed: request for %d bytes!\n",
2531 scsi_bufflen(scmd));
2532 return -ENOMEM;
2533 }
2534
2535 sg_local = &mpi_request->SGL;
2536 sges_in_segment = ioc->max_sges_in_main_message;
2537 if (sges_left <= sges_in_segment)
2538 goto fill_in_last_segment;
2539
2540 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2541 (sges_in_segment * ioc->sge_size))/4;
2542
2543
2544 while (sges_in_segment) {
2545 if (sges_in_segment == 1)
2546 ioc->base_add_sg_single(sg_local,
2547 sgl_flags_last_element | sg_dma_len(sg_scmd),
2548 sg_dma_address(sg_scmd));
2549 else
2550 ioc->base_add_sg_single(sg_local, sgl_flags |
2551 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2552 sg_scmd = sg_next(sg_scmd);
2553 sg_local += ioc->sge_size;
2554 sges_left--;
2555 sges_in_segment--;
2556 }
2557
2558
2559 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2560 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2561 if (!chain_req)
2562 return -1;
2563 chain = chain_req->chain_buffer;
2564 chain_dma = chain_req->chain_buffer_dma;
2565 do {
2566 sges_in_segment = (sges_left <=
2567 ioc->max_sges_in_chain_message) ? sges_left :
2568 ioc->max_sges_in_chain_message;
2569 chain_offset = (sges_left == sges_in_segment) ?
2570 0 : (sges_in_segment * ioc->sge_size)/4;
2571 chain_length = sges_in_segment * ioc->sge_size;
2572 if (chain_offset) {
2573 chain_offset = chain_offset <<
2574 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2575 chain_length += ioc->sge_size;
2576 }
2577 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2578 chain_length, chain_dma);
2579 sg_local = chain;
2580 if (!chain_offset)
2581 goto fill_in_last_segment;
2582
2583
2584 while (sges_in_segment) {
2585 if (sges_in_segment == 1)
2586 ioc->base_add_sg_single(sg_local,
2587 sgl_flags_last_element |
2588 sg_dma_len(sg_scmd),
2589 sg_dma_address(sg_scmd));
2590 else
2591 ioc->base_add_sg_single(sg_local, sgl_flags |
2592 sg_dma_len(sg_scmd),
2593 sg_dma_address(sg_scmd));
2594 sg_scmd = sg_next(sg_scmd);
2595 sg_local += ioc->sge_size;
2596 sges_left--;
2597 sges_in_segment--;
2598 }
2599
2600 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2601 if (!chain_req)
2602 return -1;
2603 chain = chain_req->chain_buffer;
2604 chain_dma = chain_req->chain_buffer_dma;
2605 } while (1);
2606
2607
2608 fill_in_last_segment:
2609
2610
2611 while (sges_left) {
2612 if (sges_left == 1)
2613 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2614 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2615 else
2616 ioc->base_add_sg_single(sg_local, sgl_flags |
2617 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2618 sg_scmd = sg_next(sg_scmd);
2619 sg_local += ioc->sge_size;
2620 sges_left--;
2621 }
2622
2623 return 0;
2624}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640static int
2641_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2642 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2643{
2644 Mpi25SCSIIORequest_t *mpi_request;
2645 dma_addr_t chain_dma;
2646 struct scatterlist *sg_scmd;
2647 void *sg_local, *chain;
2648 u32 chain_offset;
2649 u32 chain_length;
2650 int sges_left;
2651 u32 sges_in_segment;
2652 u8 simple_sgl_flags;
2653 u8 simple_sgl_flags_last;
2654 u8 chain_sgl_flags;
2655 struct chain_tracker *chain_req;
2656
2657 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2658
2659
2660 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2661 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2662 simple_sgl_flags_last = simple_sgl_flags |
2663 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2664 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2665 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2666
2667
2668 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2669 smid, scmd, pcie_device) == 0)) {
2670
2671 return 0;
2672 }
2673
2674 sg_scmd = scsi_sglist(scmd);
2675 sges_left = scsi_dma_map(scmd);
2676 if (sges_left < 0) {
2677 sdev_printk(KERN_ERR, scmd->device,
2678 "scsi_dma_map failed: request for %d bytes!\n",
2679 scsi_bufflen(scmd));
2680 return -ENOMEM;
2681 }
2682
2683 sg_local = &mpi_request->SGL;
2684 sges_in_segment = (ioc->request_sz -
2685 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2686 if (sges_left <= sges_in_segment)
2687 goto fill_in_last_segment;
2688
2689 mpi_request->ChainOffset = (sges_in_segment - 1 ) +
2690 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2691
2692
2693 while (sges_in_segment > 1) {
2694 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2695 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2696 sg_scmd = sg_next(sg_scmd);
2697 sg_local += ioc->sge_size_ieee;
2698 sges_left--;
2699 sges_in_segment--;
2700 }
2701
2702
2703 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2704 if (!chain_req)
2705 return -1;
2706 chain = chain_req->chain_buffer;
2707 chain_dma = chain_req->chain_buffer_dma;
2708 do {
2709 sges_in_segment = (sges_left <=
2710 ioc->max_sges_in_chain_message) ? sges_left :
2711 ioc->max_sges_in_chain_message;
2712 chain_offset = (sges_left == sges_in_segment) ?
2713 0 : sges_in_segment;
2714 chain_length = sges_in_segment * ioc->sge_size_ieee;
2715 if (chain_offset)
2716 chain_length += ioc->sge_size_ieee;
2717 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2718 chain_offset, chain_length, chain_dma);
2719
2720 sg_local = chain;
2721 if (!chain_offset)
2722 goto fill_in_last_segment;
2723
2724
2725 while (sges_in_segment) {
2726 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2727 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2728 sg_scmd = sg_next(sg_scmd);
2729 sg_local += ioc->sge_size_ieee;
2730 sges_left--;
2731 sges_in_segment--;
2732 }
2733
2734 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2735 if (!chain_req)
2736 return -1;
2737 chain = chain_req->chain_buffer;
2738 chain_dma = chain_req->chain_buffer_dma;
2739 } while (1);
2740
2741
2742 fill_in_last_segment:
2743
2744
2745 while (sges_left > 0) {
2746 if (sges_left == 1)
2747 _base_add_sg_single_ieee(sg_local,
2748 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2749 sg_dma_address(sg_scmd));
2750 else
2751 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2752 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2753 sg_scmd = sg_next(sg_scmd);
2754 sg_local += ioc->sge_size_ieee;
2755 sges_left--;
2756 }
2757
2758 return 0;
2759}
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770static void
2771_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2772 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2773 size_t data_in_sz)
2774{
2775 u8 sgl_flags;
2776
2777 if (!data_out_sz && !data_in_sz) {
2778 _base_build_zero_len_sge_ieee(ioc, psge);
2779 return;
2780 }
2781
2782 if (data_out_sz && data_in_sz) {
2783
2784 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2785 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2786 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2787 data_out_dma);
2788
2789
2790 psge += ioc->sge_size_ieee;
2791
2792
2793 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2794 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2795 data_in_dma);
2796 } else if (data_out_sz) {
2797 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2798 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2799 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2800 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2801 data_out_dma);
2802 } else if (data_in_sz) {
2803 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2804 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2805 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2806 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2807 data_in_dma);
2808 }
2809}
2810
2811#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2812
2813
2814
2815
2816
2817
2818
2819
2820static int
2821_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2822{
2823 struct sysinfo s;
2824 int dma_mask;
2825
2826 if (ioc->is_mcpu_endpoint ||
2827 sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2828 dma_get_required_mask(&pdev->dev) <= 32)
2829 dma_mask = 32;
2830
2831 else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2832 dma_mask = 63;
2833 else
2834 dma_mask = 64;
2835
2836 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
2837 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
2838 return -ENODEV;
2839
2840 if (dma_mask > 32) {
2841 ioc->base_add_sg_single = &_base_add_sg_single_64;
2842 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2843 } else {
2844 ioc->base_add_sg_single = &_base_add_sg_single_32;
2845 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2846 }
2847
2848 si_meminfo(&s);
2849 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2850 dma_mask, convert_to_kb(s.totalram));
2851
2852 return 0;
2853}
2854
2855
2856
2857
2858
2859
2860
2861
2862static int
2863_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2864{
2865 int base;
2866 u16 message_control;
2867
2868
2869
2870
2871 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2872 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2873 return -EINVAL;
2874 }
2875
2876 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2877 if (!base) {
2878 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2879 return -EINVAL;
2880 }
2881
2882
2883
2884 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2885 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2886 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2887 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2888 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2889 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2890 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2891 ioc->msix_vector_count = 1;
2892 else {
2893 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2894 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2895 }
2896 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2897 ioc->msix_vector_count));
2898 return 0;
2899}
2900
2901
2902
2903
2904
2905
2906
2907static void
2908_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2909{
2910 struct adapter_reply_queue *reply_q, *next;
2911
2912 if (list_empty(&ioc->reply_queue_list))
2913 return;
2914
2915 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2916 list_del(&reply_q->list);
2917 if (ioc->smp_affinity_enable)
2918 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2919 reply_q->msix_index), NULL);
2920 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2921 reply_q);
2922 kfree(reply_q);
2923 }
2924}
2925
2926
2927
2928
2929
2930
2931
2932
2933static int
2934_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2935{
2936 struct pci_dev *pdev = ioc->pdev;
2937 struct adapter_reply_queue *reply_q;
2938 int r;
2939
2940 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2941 if (!reply_q) {
2942 ioc_err(ioc, "unable to allocate memory %zu!\n",
2943 sizeof(struct adapter_reply_queue));
2944 return -ENOMEM;
2945 }
2946 reply_q->ioc = ioc;
2947 reply_q->msix_index = index;
2948
2949 atomic_set(&reply_q->busy, 0);
2950 if (ioc->msix_enable)
2951 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2952 ioc->driver_name, ioc->id, index);
2953 else
2954 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2955 ioc->driver_name, ioc->id);
2956 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2957 IRQF_SHARED, reply_q->name, reply_q);
2958 if (r) {
2959 pr_err("%s: unable to allocate interrupt %d!\n",
2960 reply_q->name, pci_irq_vector(pdev, index));
2961 kfree(reply_q);
2962 return -EBUSY;
2963 }
2964
2965 INIT_LIST_HEAD(&reply_q->list);
2966 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2967 return 0;
2968}
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979static void
2980_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2981{
2982 unsigned int cpu, nr_cpus, nr_msix, index = 0;
2983 struct adapter_reply_queue *reply_q;
2984 int local_numa_node;
2985
2986 if (!_base_is_controller_msix_enabled(ioc))
2987 return;
2988
2989 if (ioc->msix_load_balance)
2990 return;
2991
2992 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2993
2994 nr_cpus = num_online_cpus();
2995 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2996 ioc->facts.MaxMSIxVectors);
2997 if (!nr_msix)
2998 return;
2999
3000 if (ioc->smp_affinity_enable) {
3001
3002
3003
3004
3005
3006 if (ioc->high_iops_queues) {
3007 local_numa_node = dev_to_node(&ioc->pdev->dev);
3008 for (index = 0; index < ioc->high_iops_queues;
3009 index++) {
3010 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3011 index), cpumask_of_node(local_numa_node));
3012 }
3013 }
3014
3015 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3016 const cpumask_t *mask;
3017
3018 if (reply_q->msix_index < ioc->high_iops_queues)
3019 continue;
3020
3021 mask = pci_irq_get_affinity(ioc->pdev,
3022 reply_q->msix_index);
3023 if (!mask) {
3024 ioc_warn(ioc, "no affinity for msi %x\n",
3025 reply_q->msix_index);
3026 goto fall_back;
3027 }
3028
3029 for_each_cpu_and(cpu, mask, cpu_online_mask) {
3030 if (cpu >= ioc->cpu_msix_table_sz)
3031 break;
3032 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3033 }
3034 }
3035 return;
3036 }
3037
3038fall_back:
3039 cpu = cpumask_first(cpu_online_mask);
3040 nr_msix -= ioc->high_iops_queues;
3041 index = 0;
3042
3043 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3044 unsigned int i, group = nr_cpus / nr_msix;
3045
3046 if (reply_q->msix_index < ioc->high_iops_queues)
3047 continue;
3048
3049 if (cpu >= nr_cpus)
3050 break;
3051
3052 if (index < nr_cpus % nr_msix)
3053 group++;
3054
3055 for (i = 0 ; i < group ; i++) {
3056 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3057 cpu = cpumask_next(cpu, cpu_online_mask);
3058 }
3059 index++;
3060 }
3061}
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077static void
3078_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3079 int hba_msix_vector_count)
3080{
3081 u16 lnksta, speed;
3082
3083 if (perf_mode == MPT_PERF_MODE_IOPS ||
3084 perf_mode == MPT_PERF_MODE_LATENCY) {
3085 ioc->high_iops_queues = 0;
3086 return;
3087 }
3088
3089 if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3090
3091 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3092 speed = lnksta & PCI_EXP_LNKSTA_CLS;
3093
3094 if (speed < 0x4) {
3095 ioc->high_iops_queues = 0;
3096 return;
3097 }
3098 }
3099
3100 if (!reset_devices && ioc->is_aero_ioc &&
3101 hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3102 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3103 max_msix_vectors == -1)
3104 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3105 else
3106 ioc->high_iops_queues = 0;
3107}
3108
3109
3110
3111
3112
3113
3114static void
3115_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3116{
3117 if (!ioc->msix_enable)
3118 return;
3119 pci_free_irq_vectors(ioc->pdev);
3120 ioc->msix_enable = 0;
3121}
3122
3123
3124
3125
3126
3127
3128static int
3129_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3130{
3131 int i, irq_flags = PCI_IRQ_MSIX;
3132 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3133 struct irq_affinity *descp = &desc;
3134
3135 if (ioc->smp_affinity_enable)
3136 irq_flags |= PCI_IRQ_AFFINITY;
3137 else
3138 descp = NULL;
3139
3140 ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3141 ioc->reply_queue_count);
3142
3143 i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3144 ioc->high_iops_queues,
3145 ioc->reply_queue_count, irq_flags, descp);
3146
3147 return i;
3148}
3149
3150
3151
3152
3153
3154
3155static int
3156_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3157{
3158 int r;
3159 int i, local_max_msix_vectors;
3160 u8 try_msix = 0;
3161
3162 ioc->msix_load_balance = false;
3163
3164 if (msix_disable == -1 || msix_disable == 0)
3165 try_msix = 1;
3166
3167 if (!try_msix)
3168 goto try_ioapic;
3169
3170 if (_base_check_enable_msix(ioc) != 0)
3171 goto try_ioapic;
3172
3173 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3174 pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3175 ioc->cpu_count, max_msix_vectors);
3176 if (ioc->is_aero_ioc)
3177 _base_check_and_enable_high_iops_queues(ioc,
3178 ioc->msix_vector_count);
3179 ioc->reply_queue_count =
3180 min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3181 ioc->msix_vector_count);
3182
3183 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3184 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3185 else
3186 local_max_msix_vectors = max_msix_vectors;
3187
3188 if (local_max_msix_vectors > 0)
3189 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3190 ioc->reply_queue_count);
3191 else if (local_max_msix_vectors == 0)
3192 goto try_ioapic;
3193
3194
3195
3196
3197
3198 if (!ioc->combined_reply_queue &&
3199 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3200 ioc_info(ioc,
3201 "combined ReplyQueue is off, Enabling msix load balance\n");
3202 ioc->msix_load_balance = true;
3203 }
3204
3205
3206
3207
3208
3209 if (ioc->msix_load_balance)
3210 ioc->smp_affinity_enable = 0;
3211
3212 r = _base_alloc_irq_vectors(ioc);
3213 if (r < 0) {
3214 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3215 goto try_ioapic;
3216 }
3217
3218 ioc->msix_enable = 1;
3219 ioc->reply_queue_count = r;
3220 for (i = 0; i < ioc->reply_queue_count; i++) {
3221 r = _base_request_irq(ioc, i);
3222 if (r) {
3223 _base_free_irq(ioc);
3224 _base_disable_msix(ioc);
3225 goto try_ioapic;
3226 }
3227 }
3228
3229 ioc_info(ioc, "High IOPs queues : %s\n",
3230 ioc->high_iops_queues ? "enabled" : "disabled");
3231
3232 return 0;
3233
3234
3235 try_ioapic:
3236 ioc->high_iops_queues = 0;
3237 ioc_info(ioc, "High IOPs queues : disabled\n");
3238 ioc->reply_queue_count = 1;
3239 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3240 if (r < 0) {
3241 dfailprintk(ioc,
3242 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3243 r));
3244 } else
3245 r = _base_request_irq(ioc, 0);
3246
3247 return r;
3248}
3249
3250
3251
3252
3253
3254static void
3255mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3256{
3257 struct pci_dev *pdev = ioc->pdev;
3258
3259 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3260
3261 _base_free_irq(ioc);
3262 _base_disable_msix(ioc);
3263
3264 kfree(ioc->replyPostRegisterIndex);
3265 ioc->replyPostRegisterIndex = NULL;
3266
3267
3268 if (ioc->chip_phys) {
3269 iounmap(ioc->chip);
3270 ioc->chip_phys = 0;
3271 }
3272
3273 if (pci_is_enabled(pdev)) {
3274 pci_release_selected_regions(ioc->pdev, ioc->bars);
3275 pci_disable_pcie_error_reporting(pdev);
3276 pci_disable_device(pdev);
3277 }
3278}
3279
3280static int
3281_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3282
3283
3284
3285
3286
3287
3288
3289
3290static int
3291_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3292{
3293 u32 ioc_state;
3294 int rc = -EFAULT;
3295
3296 dinitprintk(ioc, pr_info("%s\n", __func__));
3297 if (ioc->pci_error_recovery)
3298 return 0;
3299 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3300 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3301
3302 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3303 mpt3sas_print_fault_code(ioc, ioc_state &
3304 MPI2_DOORBELL_DATA_MASK);
3305 rc = _base_diag_reset(ioc);
3306 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3307 MPI2_IOC_STATE_COREDUMP) {
3308 mpt3sas_print_coredump_info(ioc, ioc_state &
3309 MPI2_DOORBELL_DATA_MASK);
3310 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3311 rc = _base_diag_reset(ioc);
3312 }
3313
3314 return rc;
3315}
3316
3317
3318
3319
3320
3321
3322
3323int
3324mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3325{
3326 struct pci_dev *pdev = ioc->pdev;
3327 u32 memap_sz;
3328 u32 pio_sz;
3329 int i, r = 0, rc;
3330 u64 pio_chip = 0;
3331 phys_addr_t chip_phys = 0;
3332 struct adapter_reply_queue *reply_q;
3333
3334 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3335
3336 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3337 if (pci_enable_device_mem(pdev)) {
3338 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3339 ioc->bars = 0;
3340 return -ENODEV;
3341 }
3342
3343
3344 if (pci_request_selected_regions(pdev, ioc->bars,
3345 ioc->driver_name)) {
3346 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3347 ioc->bars = 0;
3348 r = -ENODEV;
3349 goto out_fail;
3350 }
3351
3352
3353 pci_enable_pcie_error_reporting(pdev);
3354
3355 pci_set_master(pdev);
3356
3357
3358 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3359 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3360 r = -ENODEV;
3361 goto out_fail;
3362 }
3363
3364 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3365 (!memap_sz || !pio_sz); i++) {
3366 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3367 if (pio_sz)
3368 continue;
3369 pio_chip = (u64)pci_resource_start(pdev, i);
3370 pio_sz = pci_resource_len(pdev, i);
3371 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3372 if (memap_sz)
3373 continue;
3374 ioc->chip_phys = pci_resource_start(pdev, i);
3375 chip_phys = ioc->chip_phys;
3376 memap_sz = pci_resource_len(pdev, i);
3377 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3378 }
3379 }
3380
3381 if (ioc->chip == NULL) {
3382 ioc_err(ioc,
3383 "unable to map adapter memory! or resource not found\n");
3384 r = -EINVAL;
3385 goto out_fail;
3386 }
3387
3388 mpt3sas_base_mask_interrupts(ioc);
3389
3390 r = _base_get_ioc_facts(ioc);
3391 if (r) {
3392 rc = _base_check_for_fault_and_issue_reset(ioc);
3393 if (rc || (_base_get_ioc_facts(ioc)))
3394 goto out_fail;
3395 }
3396
3397 if (!ioc->rdpq_array_enable_assigned) {
3398 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3399 ioc->rdpq_array_enable_assigned = 1;
3400 }
3401
3402 r = _base_enable_msix(ioc);
3403 if (r)
3404 goto out_fail;
3405
3406 if (!ioc->is_driver_loading)
3407 _base_init_irqpolls(ioc);
3408
3409
3410
3411 if (ioc->combined_reply_queue) {
3412
3413
3414
3415
3416
3417
3418 ioc->replyPostRegisterIndex = kcalloc(
3419 ioc->combined_reply_index_count,
3420 sizeof(resource_size_t *), GFP_KERNEL);
3421 if (!ioc->replyPostRegisterIndex) {
3422 ioc_err(ioc,
3423 "allocation for replyPostRegisterIndex failed!\n");
3424 r = -ENOMEM;
3425 goto out_fail;
3426 }
3427
3428 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3429 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3430 ((u8 __force *)&ioc->chip->Doorbell +
3431 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3432 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3433 }
3434 }
3435
3436 if (ioc->is_warpdrive) {
3437 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3438 &ioc->chip->ReplyPostHostIndex;
3439
3440 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3441 ioc->reply_post_host_index[i] =
3442 (resource_size_t __iomem *)
3443 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3444 * 4)));
3445 }
3446
3447 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3448 pr_info("%s: %s enabled: IRQ %d\n",
3449 reply_q->name,
3450 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3451 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3452
3453 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3454 &chip_phys, ioc->chip, memap_sz);
3455 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3456 (unsigned long long)pio_chip, pio_sz);
3457
3458
3459 pci_save_state(pdev);
3460 return 0;
3461
3462 out_fail:
3463 mpt3sas_base_unmap_resources(ioc);
3464 return r;
3465}
3466
3467
3468
3469
3470
3471
3472
3473
3474void *
3475mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3476{
3477 return (void *)(ioc->request + (smid * ioc->request_sz));
3478}
3479
3480
3481
3482
3483
3484
3485
3486
3487void *
3488mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3489{
3490 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3491}
3492
3493
3494
3495
3496
3497
3498
3499
3500__le32
3501mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3502{
3503 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3504 SCSI_SENSE_BUFFERSIZE));
3505}
3506
3507
3508
3509
3510
3511
3512
3513
3514void *
3515mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3516{
3517 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3518}
3519
3520
3521
3522
3523
3524
3525
3526
3527dma_addr_t
3528mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3529{
3530 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3531}
3532
3533
3534
3535
3536
3537
3538
3539
3540void *
3541mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3542{
3543 if (!phys_addr)
3544 return NULL;
3545 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3546}
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557static inline u8
3558_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3559 struct scsi_cmnd *scmd)
3560{
3561
3562 if (ioc->msix_load_balance)
3563 return ioc->reply_queue_count ?
3564 base_mod64(atomic64_add_return(1,
3565 &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3566
3567 return ioc->cpu_msix_table[raw_smp_processor_id()];
3568}
3569
3570
3571
3572
3573
3574
3575
3576
3577inline unsigned long
3578_base_sdev_nr_inflight_request(struct request_queue *q)
3579{
3580 struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
3581
3582 return atomic_read(&hctx->nr_active);
3583}
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596static inline u8
3597_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3598 struct scsi_cmnd *scmd)
3599{
3600
3601
3602
3603
3604
3605 if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
3606 MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3607 return base_mod64((
3608 atomic64_add_return(1, &ioc->high_iops_outstanding) /
3609 MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3610 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3611
3612 return _base_get_msix_index(ioc, scmd);
3613}
3614
3615
3616
3617
3618
3619
3620
3621
3622u16
3623mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3624{
3625 unsigned long flags;
3626 struct request_tracker *request;
3627 u16 smid;
3628
3629 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3630 if (list_empty(&ioc->internal_free_list)) {
3631 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3632 ioc_err(ioc, "%s: smid not available\n", __func__);
3633 return 0;
3634 }
3635
3636 request = list_entry(ioc->internal_free_list.next,
3637 struct request_tracker, tracker_list);
3638 request->cb_idx = cb_idx;
3639 smid = request->smid;
3640 list_del(&request->tracker_list);
3641 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3642 return smid;
3643}
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653u16
3654mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3655 struct scsi_cmnd *scmd)
3656{
3657 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3658 unsigned int tag = scmd->request->tag;
3659 u16 smid;
3660
3661 smid = tag + 1;
3662 request->cb_idx = cb_idx;
3663 request->smid = smid;
3664 request->scmd = scmd;
3665 INIT_LIST_HEAD(&request->chain_list);
3666 return smid;
3667}
3668
3669
3670
3671
3672
3673
3674
3675
3676u16
3677mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3678{
3679 unsigned long flags;
3680 struct request_tracker *request;
3681 u16 smid;
3682
3683 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3684 if (list_empty(&ioc->hpr_free_list)) {
3685 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3686 return 0;
3687 }
3688
3689 request = list_entry(ioc->hpr_free_list.next,
3690 struct request_tracker, tracker_list);
3691 request->cb_idx = cb_idx;
3692 smid = request->smid;
3693 list_del(&request->tracker_list);
3694 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3695 return smid;
3696}
3697
3698static void
3699_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3700{
3701
3702
3703
3704 if (ioc->shost_recovery && ioc->pending_io_count) {
3705 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3706 if (ioc->pending_io_count == 0)
3707 wake_up(&ioc->reset_wq);
3708 }
3709}
3710
3711void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3712 struct scsiio_tracker *st)
3713{
3714 if (WARN_ON(st->smid == 0))
3715 return;
3716 st->cb_idx = 0xFF;
3717 st->direct_io = 0;
3718 st->scmd = NULL;
3719 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3720 st->smid = 0;
3721}
3722
3723
3724
3725
3726
3727
3728void
3729mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3730{
3731 unsigned long flags;
3732 int i;
3733
3734 if (smid < ioc->hi_priority_smid) {
3735 struct scsiio_tracker *st;
3736 void *request;
3737
3738 st = _get_st_from_smid(ioc, smid);
3739 if (!st) {
3740 _base_recovery_check(ioc);
3741 return;
3742 }
3743
3744
3745 request = mpt3sas_base_get_msg_frame(ioc, smid);
3746 memset(request, 0, ioc->request_sz);
3747
3748 mpt3sas_base_clear_st(ioc, st);
3749 _base_recovery_check(ioc);
3750 return;
3751 }
3752
3753 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3754 if (smid < ioc->internal_smid) {
3755
3756 i = smid - ioc->hi_priority_smid;
3757 ioc->hpr_lookup[i].cb_idx = 0xFF;
3758 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3759 } else if (smid <= ioc->hba_queue_depth) {
3760
3761 i = smid - ioc->internal_smid;
3762 ioc->internal_lookup[i].cb_idx = 0xFF;
3763 list_add(&ioc->internal_lookup[i].tracker_list,
3764 &ioc->internal_free_list);
3765 }
3766 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3767}
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779static inline void
3780_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3781 spinlock_t *writeq_lock)
3782{
3783 unsigned long flags;
3784
3785 spin_lock_irqsave(writeq_lock, flags);
3786 __raw_writel((u32)(b), addr);
3787 __raw_writel((u32)(b >> 32), (addr + 4));
3788 spin_unlock_irqrestore(writeq_lock, flags);
3789}
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801#if defined(writeq) && defined(CONFIG_64BIT)
3802static inline void
3803_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3804{
3805 wmb();
3806 __raw_writeq(b, addr);
3807 barrier();
3808}
3809#else
3810static inline void
3811_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3812{
3813 _base_mpi_ep_writeq(b, addr, writeq_lock);
3814}
3815#endif
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825static u8
3826_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3827{
3828 struct scsiio_tracker *st = NULL;
3829
3830 if (smid < ioc->hi_priority_smid)
3831 st = _get_st_from_smid(ioc, smid);
3832
3833 if (st == NULL)
3834 return _base_get_msix_index(ioc, NULL);
3835
3836 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3837 return st->msix_io;
3838}
3839
3840
3841
3842
3843
3844
3845
3846static void
3847_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3848 u16 smid, u16 handle)
3849{
3850 Mpi2RequestDescriptorUnion_t descriptor;
3851 u64 *request = (u64 *)&descriptor;
3852 void *mpi_req_iomem;
3853 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3854
3855 _clone_sg_entries(ioc, (void *) mfp, smid);
3856 mpi_req_iomem = (void __force *)ioc->chip +
3857 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3858 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3859 ioc->request_sz);
3860 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3861 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3862 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3863 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3864 descriptor.SCSIIO.LMID = 0;
3865 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3866 &ioc->scsi_lookup_lock);
3867}
3868
3869
3870
3871
3872
3873
3874
3875static void
3876_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3877{
3878 Mpi2RequestDescriptorUnion_t descriptor;
3879 u64 *request = (u64 *)&descriptor;
3880
3881
3882 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3883 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3884 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3885 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3886 descriptor.SCSIIO.LMID = 0;
3887 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3888 &ioc->scsi_lookup_lock);
3889}
3890
3891
3892
3893
3894
3895
3896
3897static void
3898_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3899 u16 handle)
3900{
3901 Mpi2RequestDescriptorUnion_t descriptor;
3902 u64 *request = (u64 *)&descriptor;
3903
3904 descriptor.SCSIIO.RequestFlags =
3905 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3906 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3907 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3908 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3909 descriptor.SCSIIO.LMID = 0;
3910 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3911 &ioc->scsi_lookup_lock);
3912}
3913
3914
3915
3916
3917
3918
3919
3920static void
3921_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3922 u16 msix_task)
3923{
3924 Mpi2RequestDescriptorUnion_t descriptor;
3925 void *mpi_req_iomem;
3926 u64 *request;
3927
3928 if (ioc->is_mcpu_endpoint) {
3929 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3930
3931
3932 mpi_req_iomem = (void __force *)ioc->chip
3933 + MPI_FRAME_START_OFFSET
3934 + (smid * ioc->request_sz);
3935 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3936 ioc->request_sz);
3937 }
3938
3939 request = (u64 *)&descriptor;
3940
3941 descriptor.HighPriority.RequestFlags =
3942 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3943 descriptor.HighPriority.MSIxIndex = msix_task;
3944 descriptor.HighPriority.SMID = cpu_to_le16(smid);
3945 descriptor.HighPriority.LMID = 0;
3946 descriptor.HighPriority.Reserved1 = 0;
3947 if (ioc->is_mcpu_endpoint)
3948 _base_mpi_ep_writeq(*request,
3949 &ioc->chip->RequestDescriptorPostLow,
3950 &ioc->scsi_lookup_lock);
3951 else
3952 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3953 &ioc->scsi_lookup_lock);
3954}
3955
3956
3957
3958
3959
3960
3961
3962void
3963mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3964{
3965 Mpi2RequestDescriptorUnion_t descriptor;
3966 u64 *request = (u64 *)&descriptor;
3967
3968 descriptor.Default.RequestFlags =
3969 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3970 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3971 descriptor.Default.SMID = cpu_to_le16(smid);
3972 descriptor.Default.LMID = 0;
3973 descriptor.Default.DescriptorTypeDependent = 0;
3974 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3975 &ioc->scsi_lookup_lock);
3976}
3977
3978
3979
3980
3981
3982
3983static void
3984_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3985{
3986 Mpi2RequestDescriptorUnion_t descriptor;
3987 void *mpi_req_iomem;
3988 u64 *request;
3989
3990 if (ioc->is_mcpu_endpoint) {
3991 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3992
3993 _clone_sg_entries(ioc, (void *) mfp, smid);
3994
3995 mpi_req_iomem = (void __force *)ioc->chip +
3996 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3997 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3998 ioc->request_sz);
3999 }
4000 request = (u64 *)&descriptor;
4001 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4002 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4003 descriptor.Default.SMID = cpu_to_le16(smid);
4004 descriptor.Default.LMID = 0;
4005 descriptor.Default.DescriptorTypeDependent = 0;
4006 if (ioc->is_mcpu_endpoint)
4007 _base_mpi_ep_writeq(*request,
4008 &ioc->chip->RequestDescriptorPostLow,
4009 &ioc->scsi_lookup_lock);
4010 else
4011 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4012 &ioc->scsi_lookup_lock);
4013}
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024static void
4025_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4026 u16 handle)
4027{
4028 Mpi26AtomicRequestDescriptor_t descriptor;
4029 u32 *request = (u32 *)&descriptor;
4030
4031 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4032 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4033 descriptor.SMID = cpu_to_le16(smid);
4034
4035 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4036}
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046static void
4047_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4048 u16 handle)
4049{
4050 Mpi26AtomicRequestDescriptor_t descriptor;
4051 u32 *request = (u32 *)&descriptor;
4052
4053 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4054 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4055 descriptor.SMID = cpu_to_le16(smid);
4056
4057 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4058}
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069static void
4070_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4071 u16 msix_task)
4072{
4073 Mpi26AtomicRequestDescriptor_t descriptor;
4074 u32 *request = (u32 *)&descriptor;
4075
4076 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4077 descriptor.MSIxIndex = msix_task;
4078 descriptor.SMID = cpu_to_le16(smid);
4079
4080 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4081}
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091static void
4092_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4093{
4094 Mpi26AtomicRequestDescriptor_t descriptor;
4095 u32 *request = (u32 *)&descriptor;
4096
4097 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4098 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4099 descriptor.SMID = cpu_to_le16(smid);
4100
4101 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4102}
4103
4104
4105
4106
4107
4108static void
4109_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4110{
4111 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4112 return;
4113
4114 switch (ioc->pdev->subsystem_vendor) {
4115 case PCI_VENDOR_ID_INTEL:
4116 switch (ioc->pdev->device) {
4117 case MPI2_MFGPAGE_DEVID_SAS2008:
4118 switch (ioc->pdev->subsystem_device) {
4119 case MPT2SAS_INTEL_RMS2LL080_SSDID:
4120 ioc_info(ioc, "%s\n",
4121 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4122 break;
4123 case MPT2SAS_INTEL_RMS2LL040_SSDID:
4124 ioc_info(ioc, "%s\n",
4125 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4126 break;
4127 case MPT2SAS_INTEL_SSD910_SSDID:
4128 ioc_info(ioc, "%s\n",
4129 MPT2SAS_INTEL_SSD910_BRANDING);
4130 break;
4131 default:
4132 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4133 ioc->pdev->subsystem_device);
4134 break;
4135 }
4136 break;
4137 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4138 switch (ioc->pdev->subsystem_device) {
4139 case MPT2SAS_INTEL_RS25GB008_SSDID:
4140 ioc_info(ioc, "%s\n",
4141 MPT2SAS_INTEL_RS25GB008_BRANDING);
4142 break;
4143 case MPT2SAS_INTEL_RMS25JB080_SSDID:
4144 ioc_info(ioc, "%s\n",
4145 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4146 break;
4147 case MPT2SAS_INTEL_RMS25JB040_SSDID:
4148 ioc_info(ioc, "%s\n",
4149 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4150 break;
4151 case MPT2SAS_INTEL_RMS25KB080_SSDID:
4152 ioc_info(ioc, "%s\n",
4153 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4154 break;
4155 case MPT2SAS_INTEL_RMS25KB040_SSDID:
4156 ioc_info(ioc, "%s\n",
4157 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4158 break;
4159 case MPT2SAS_INTEL_RMS25LB040_SSDID:
4160 ioc_info(ioc, "%s\n",
4161 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4162 break;
4163 case MPT2SAS_INTEL_RMS25LB080_SSDID:
4164 ioc_info(ioc, "%s\n",
4165 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4166 break;
4167 default:
4168 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4169 ioc->pdev->subsystem_device);
4170 break;
4171 }
4172 break;
4173 case MPI25_MFGPAGE_DEVID_SAS3008:
4174 switch (ioc->pdev->subsystem_device) {
4175 case MPT3SAS_INTEL_RMS3JC080_SSDID:
4176 ioc_info(ioc, "%s\n",
4177 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4178 break;
4179
4180 case MPT3SAS_INTEL_RS3GC008_SSDID:
4181 ioc_info(ioc, "%s\n",
4182 MPT3SAS_INTEL_RS3GC008_BRANDING);
4183 break;
4184 case MPT3SAS_INTEL_RS3FC044_SSDID:
4185 ioc_info(ioc, "%s\n",
4186 MPT3SAS_INTEL_RS3FC044_BRANDING);
4187 break;
4188 case MPT3SAS_INTEL_RS3UC080_SSDID:
4189 ioc_info(ioc, "%s\n",
4190 MPT3SAS_INTEL_RS3UC080_BRANDING);
4191 break;
4192 default:
4193 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4194 ioc->pdev->subsystem_device);
4195 break;
4196 }
4197 break;
4198 default:
4199 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4200 ioc->pdev->subsystem_device);
4201 break;
4202 }
4203 break;
4204 case PCI_VENDOR_ID_DELL:
4205 switch (ioc->pdev->device) {
4206 case MPI2_MFGPAGE_DEVID_SAS2008:
4207 switch (ioc->pdev->subsystem_device) {
4208 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4209 ioc_info(ioc, "%s\n",
4210 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4211 break;
4212 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4213 ioc_info(ioc, "%s\n",
4214 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4215 break;
4216 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4217 ioc_info(ioc, "%s\n",
4218 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4219 break;
4220 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4221 ioc_info(ioc, "%s\n",
4222 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4223 break;
4224 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4225 ioc_info(ioc, "%s\n",
4226 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4227 break;
4228 case MPT2SAS_DELL_PERC_H200_SSDID:
4229 ioc_info(ioc, "%s\n",
4230 MPT2SAS_DELL_PERC_H200_BRANDING);
4231 break;
4232 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4233 ioc_info(ioc, "%s\n",
4234 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4235 break;
4236 default:
4237 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4238 ioc->pdev->subsystem_device);
4239 break;
4240 }
4241 break;
4242 case MPI25_MFGPAGE_DEVID_SAS3008:
4243 switch (ioc->pdev->subsystem_device) {
4244 case MPT3SAS_DELL_12G_HBA_SSDID:
4245 ioc_info(ioc, "%s\n",
4246 MPT3SAS_DELL_12G_HBA_BRANDING);
4247 break;
4248 default:
4249 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4250 ioc->pdev->subsystem_device);
4251 break;
4252 }
4253 break;
4254 default:
4255 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4256 ioc->pdev->subsystem_device);
4257 break;
4258 }
4259 break;
4260 case PCI_VENDOR_ID_CISCO:
4261 switch (ioc->pdev->device) {
4262 case MPI25_MFGPAGE_DEVID_SAS3008:
4263 switch (ioc->pdev->subsystem_device) {
4264 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4265 ioc_info(ioc, "%s\n",
4266 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4267 break;
4268 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4269 ioc_info(ioc, "%s\n",
4270 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4271 break;
4272 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4273 ioc_info(ioc, "%s\n",
4274 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4275 break;
4276 default:
4277 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4278 ioc->pdev->subsystem_device);
4279 break;
4280 }
4281 break;
4282 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4283 switch (ioc->pdev->subsystem_device) {
4284 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4285 ioc_info(ioc, "%s\n",
4286 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4287 break;
4288 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4289 ioc_info(ioc, "%s\n",
4290 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4291 break;
4292 default:
4293 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4294 ioc->pdev->subsystem_device);
4295 break;
4296 }
4297 break;
4298 default:
4299 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4300 ioc->pdev->subsystem_device);
4301 break;
4302 }
4303 break;
4304 case MPT2SAS_HP_3PAR_SSVID:
4305 switch (ioc->pdev->device) {
4306 case MPI2_MFGPAGE_DEVID_SAS2004:
4307 switch (ioc->pdev->subsystem_device) {
4308 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4309 ioc_info(ioc, "%s\n",
4310 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4311 break;
4312 default:
4313 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4314 ioc->pdev->subsystem_device);
4315 break;
4316 }
4317 break;
4318 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4319 switch (ioc->pdev->subsystem_device) {
4320 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4321 ioc_info(ioc, "%s\n",
4322 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4323 break;
4324 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4325 ioc_info(ioc, "%s\n",
4326 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4327 break;
4328 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4329 ioc_info(ioc, "%s\n",
4330 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4331 break;
4332 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4333 ioc_info(ioc, "%s\n",
4334 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4335 break;
4336 default:
4337 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4338 ioc->pdev->subsystem_device);
4339 break;
4340 }
4341 break;
4342 default:
4343 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4344 ioc->pdev->subsystem_device);
4345 break;
4346 }
4347 default:
4348 break;
4349 }
4350}
4351
4352
4353
4354
4355
4356
4357
4358
4359 static int
4360_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4361{
4362 Mpi2FWImageHeader_t *fw_img_hdr;
4363 Mpi26ComponentImageHeader_t *cmp_img_hdr;
4364 Mpi25FWUploadRequest_t *mpi_request;
4365 Mpi2FWUploadReply_t mpi_reply;
4366 int r = 0;
4367 u32 package_version = 0;
4368 void *fwpkg_data = NULL;
4369 dma_addr_t fwpkg_data_dma;
4370 u16 smid, ioc_status;
4371 size_t data_length;
4372
4373 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4374
4375 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4376 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4377 return -EAGAIN;
4378 }
4379
4380 data_length = sizeof(Mpi2FWImageHeader_t);
4381 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4382 &fwpkg_data_dma, GFP_KERNEL);
4383 if (!fwpkg_data) {
4384 ioc_err(ioc,
4385 "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4386 __FILE__, __LINE__, __func__);
4387 return -ENOMEM;
4388 }
4389
4390 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4391 if (!smid) {
4392 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4393 r = -EAGAIN;
4394 goto out;
4395 }
4396
4397 ioc->base_cmds.status = MPT3_CMD_PENDING;
4398 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4399 ioc->base_cmds.smid = smid;
4400 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4401 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4402 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4403 mpi_request->ImageSize = cpu_to_le32(data_length);
4404 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4405 data_length);
4406 init_completion(&ioc->base_cmds.done);
4407 ioc->put_smid_default(ioc, smid);
4408
4409 wait_for_completion_timeout(&ioc->base_cmds.done,
4410 FW_IMG_HDR_READ_TIMEOUT*HZ);
4411 ioc_info(ioc, "%s: complete\n", __func__);
4412 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4413 ioc_err(ioc, "%s: timeout\n", __func__);
4414 _debug_dump_mf(mpi_request,
4415 sizeof(Mpi25FWUploadRequest_t)/4);
4416 r = -ETIME;
4417 } else {
4418 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4419 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4420 memcpy(&mpi_reply, ioc->base_cmds.reply,
4421 sizeof(Mpi2FWUploadReply_t));
4422 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4423 MPI2_IOCSTATUS_MASK;
4424 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4425 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4426 if (le32_to_cpu(fw_img_hdr->Signature) ==
4427 MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4428 cmp_img_hdr =
4429 (Mpi26ComponentImageHeader_t *)
4430 (fwpkg_data);
4431 package_version =
4432 le32_to_cpu(
4433 cmp_img_hdr->ApplicationSpecific);
4434 } else
4435 package_version =
4436 le32_to_cpu(
4437 fw_img_hdr->PackageVersion.Word);
4438 if (package_version)
4439 ioc_info(ioc,
4440 "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4441 ((package_version) & 0xFF000000) >> 24,
4442 ((package_version) & 0x00FF0000) >> 16,
4443 ((package_version) & 0x0000FF00) >> 8,
4444 (package_version) & 0x000000FF);
4445 } else {
4446 _debug_dump_mf(&mpi_reply,
4447 sizeof(Mpi2FWUploadReply_t)/4);
4448 }
4449 }
4450 }
4451 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4452out:
4453 if (fwpkg_data)
4454 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4455 fwpkg_data_dma);
4456 return r;
4457}
4458
4459
4460
4461
4462
4463static void
4464_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4465{
4466 int i = 0;
4467 char desc[16];
4468 u32 iounit_pg1_flags;
4469 u32 bios_version;
4470
4471 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4472 strncpy(desc, ioc->manu_pg0.ChipName, 16);
4473 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4474 desc,
4475 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4476 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4477 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4478 ioc->facts.FWVersion.Word & 0x000000FF,
4479 ioc->pdev->revision,
4480 (bios_version & 0xFF000000) >> 24,
4481 (bios_version & 0x00FF0000) >> 16,
4482 (bios_version & 0x0000FF00) >> 8,
4483 bios_version & 0x000000FF);
4484
4485 _base_display_OEMs_branding(ioc);
4486
4487 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4488 pr_info("%sNVMe", i ? "," : "");
4489 i++;
4490 }
4491
4492 ioc_info(ioc, "Protocol=(");
4493
4494 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4495 pr_cont("Initiator");
4496 i++;
4497 }
4498
4499 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4500 pr_cont("%sTarget", i ? "," : "");
4501 i++;
4502 }
4503
4504 i = 0;
4505 pr_cont("), Capabilities=(");
4506
4507 if (!ioc->hide_ir_msg) {
4508 if (ioc->facts.IOCCapabilities &
4509 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4510 pr_cont("Raid");
4511 i++;
4512 }
4513 }
4514
4515 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4516 pr_cont("%sTLR", i ? "," : "");
4517 i++;
4518 }
4519
4520 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4521 pr_cont("%sMulticast", i ? "," : "");
4522 i++;
4523 }
4524
4525 if (ioc->facts.IOCCapabilities &
4526 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4527 pr_cont("%sBIDI Target", i ? "," : "");
4528 i++;
4529 }
4530
4531 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4532 pr_cont("%sEEDP", i ? "," : "");
4533 i++;
4534 }
4535
4536 if (ioc->facts.IOCCapabilities &
4537 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4538 pr_cont("%sSnapshot Buffer", i ? "," : "");
4539 i++;
4540 }
4541
4542 if (ioc->facts.IOCCapabilities &
4543 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4544 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4545 i++;
4546 }
4547
4548 if (ioc->facts.IOCCapabilities &
4549 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4550 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4551 i++;
4552 }
4553
4554 if (ioc->facts.IOCCapabilities &
4555 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4556 pr_cont("%sTask Set Full", i ? "," : "");
4557 i++;
4558 }
4559
4560 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4561 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4562 pr_cont("%sNCQ", i ? "," : "");
4563 i++;
4564 }
4565
4566 pr_cont(")\n");
4567}
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579void
4580mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4581 u16 device_missing_delay, u8 io_missing_delay)
4582{
4583 u16 dmd, dmd_new, dmd_orignal;
4584 u8 io_missing_delay_original;
4585 u16 sz;
4586 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4587 Mpi2ConfigReply_t mpi_reply;
4588 u8 num_phys = 0;
4589 u16 ioc_status;
4590
4591 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4592 if (!num_phys)
4593 return;
4594
4595 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4596 sizeof(Mpi2SasIOUnit1PhyData_t));
4597 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4598 if (!sas_iounit_pg1) {
4599 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4600 __FILE__, __LINE__, __func__);
4601 goto out;
4602 }
4603 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4604 sas_iounit_pg1, sz))) {
4605 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4606 __FILE__, __LINE__, __func__);
4607 goto out;
4608 }
4609 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4610 MPI2_IOCSTATUS_MASK;
4611 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4612 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4613 __FILE__, __LINE__, __func__);
4614 goto out;
4615 }
4616
4617
4618 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4619 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4620 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4621 else
4622 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4623 dmd_orignal = dmd;
4624 if (device_missing_delay > 0x7F) {
4625 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4626 device_missing_delay;
4627 dmd = dmd / 16;
4628 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4629 } else
4630 dmd = device_missing_delay;
4631 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4632
4633
4634 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4635 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4636
4637 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4638 sz)) {
4639 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4640 dmd_new = (dmd &
4641 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4642 else
4643 dmd_new =
4644 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4645 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4646 dmd_orignal, dmd_new);
4647 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4648 io_missing_delay_original,
4649 io_missing_delay);
4650 ioc->device_missing_delay = dmd_new;
4651 ioc->io_missing_delay = io_missing_delay;
4652 }
4653
4654out:
4655 kfree(sas_iounit_pg1);
4656}
4657
4658
4659
4660
4661
4662
4663
4664
4665static void
4666_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4667{
4668 Mpi2IOCPage1_t ioc_pg1;
4669 Mpi2ConfigReply_t mpi_reply;
4670
4671 mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4672 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4673
4674 switch (perf_mode) {
4675 case MPT_PERF_MODE_DEFAULT:
4676 case MPT_PERF_MODE_BALANCED:
4677 if (ioc->high_iops_queues) {
4678 ioc_info(ioc,
4679 "Enable interrupt coalescing only for first\t"
4680 "%d reply queues\n",
4681 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691 ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4692 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4693 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4694 ioc_info(ioc, "performance mode: balanced\n");
4695 return;
4696 }
4697 fallthrough;
4698 case MPT_PERF_MODE_LATENCY:
4699
4700
4701
4702
4703 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4704 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4705 ioc_pg1.ProductSpecific = 0;
4706 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4707 ioc_info(ioc, "performance mode: latency\n");
4708 break;
4709 case MPT_PERF_MODE_IOPS:
4710
4711
4712
4713 ioc_info(ioc,
4714 "performance mode: iops with coalescing timeout: 0x%x\n",
4715 le32_to_cpu(ioc_pg1.CoalescingTimeout));
4716 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4717 ioc_pg1.ProductSpecific = 0;
4718 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4719 break;
4720 }
4721}
4722
4723
4724
4725
4726
4727static void
4728_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4729{
4730 Mpi2ConfigReply_t mpi_reply;
4731 u32 iounit_pg1_flags;
4732
4733 ioc->nvme_abort_timeout = 30;
4734 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4735 if (ioc->ir_firmware)
4736 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4737 &ioc->manu_pg10);
4738
4739
4740
4741
4742
4743 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4744 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
4745 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4746 ioc->name);
4747 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4748 ioc->manu_pg11.EEDPTagMode |= 0x1;
4749 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4750 &ioc->manu_pg11);
4751 }
4752 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4753 ioc->tm_custom_handling = 1;
4754 else {
4755 ioc->tm_custom_handling = 0;
4756 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4757 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4758 else if (ioc->manu_pg11.NVMeAbortTO >
4759 NVME_TASK_ABORT_MAX_TIMEOUT)
4760 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4761 else
4762 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4763 }
4764
4765 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4766 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4767 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4768 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4769 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4770 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4771 _base_display_ioc_capabilities(ioc);
4772
4773
4774
4775
4776
4777 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4778 if ((ioc->facts.IOCCapabilities &
4779 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4780 iounit_pg1_flags &=
4781 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4782 else
4783 iounit_pg1_flags |=
4784 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4785 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4786 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4787
4788 if (ioc->iounit_pg8.NumSensors)
4789 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4790 if (ioc->is_aero_ioc)
4791 _base_update_ioc_page1_inlinewith_perf_mode(ioc);
4792}
4793
4794
4795
4796
4797
4798
4799
4800void
4801mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4802{
4803 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4804
4805
4806 list_for_each_entry_safe(enclosure_dev,
4807 enclosure_dev_next, &ioc->enclosure_list, list) {
4808 list_del(&enclosure_dev->list);
4809 kfree(enclosure_dev);
4810 }
4811}
4812
4813
4814
4815
4816
4817
4818
4819static void
4820_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4821{
4822 int i = 0;
4823 int j = 0;
4824 int dma_alloc_count = 0;
4825 struct chain_tracker *ct;
4826 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4827
4828 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4829
4830 if (ioc->request) {
4831 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
4832 ioc->request, ioc->request_dma);
4833 dexitprintk(ioc,
4834 ioc_info(ioc, "request_pool(0x%p): free\n",
4835 ioc->request));
4836 ioc->request = NULL;
4837 }
4838
4839 if (ioc->sense) {
4840 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4841 dma_pool_destroy(ioc->sense_dma_pool);
4842 dexitprintk(ioc,
4843 ioc_info(ioc, "sense_pool(0x%p): free\n",
4844 ioc->sense));
4845 ioc->sense = NULL;
4846 }
4847
4848 if (ioc->reply) {
4849 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4850 dma_pool_destroy(ioc->reply_dma_pool);
4851 dexitprintk(ioc,
4852 ioc_info(ioc, "reply_pool(0x%p): free\n",
4853 ioc->reply));
4854 ioc->reply = NULL;
4855 }
4856
4857 if (ioc->reply_free) {
4858 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4859 ioc->reply_free_dma);
4860 dma_pool_destroy(ioc->reply_free_dma_pool);
4861 dexitprintk(ioc,
4862 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4863 ioc->reply_free));
4864 ioc->reply_free = NULL;
4865 }
4866
4867 if (ioc->reply_post) {
4868 dma_alloc_count = DIV_ROUND_UP(count,
4869 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4870 for (i = 0; i < count; i++) {
4871 if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
4872 && dma_alloc_count) {
4873 if (ioc->reply_post[i].reply_post_free) {
4874 dma_pool_free(
4875 ioc->reply_post_free_dma_pool,
4876 ioc->reply_post[i].reply_post_free,
4877 ioc->reply_post[i].reply_post_free_dma);
4878 dexitprintk(ioc, ioc_info(ioc,
4879 "reply_post_free_pool(0x%p): free\n",
4880 ioc->reply_post[i].reply_post_free));
4881 ioc->reply_post[i].reply_post_free =
4882 NULL;
4883 }
4884 --dma_alloc_count;
4885 }
4886 }
4887 dma_pool_destroy(ioc->reply_post_free_dma_pool);
4888 if (ioc->reply_post_free_array &&
4889 ioc->rdpq_array_enable) {
4890 dma_pool_free(ioc->reply_post_free_array_dma_pool,
4891 ioc->reply_post_free_array,
4892 ioc->reply_post_free_array_dma);
4893 ioc->reply_post_free_array = NULL;
4894 }
4895 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4896 kfree(ioc->reply_post);
4897 }
4898
4899 if (ioc->pcie_sgl_dma_pool) {
4900 for (i = 0; i < ioc->scsiio_depth; i++) {
4901 dma_pool_free(ioc->pcie_sgl_dma_pool,
4902 ioc->pcie_sg_lookup[i].pcie_sgl,
4903 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4904 }
4905 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4906 }
4907
4908 if (ioc->config_page) {
4909 dexitprintk(ioc,
4910 ioc_info(ioc, "config_page(0x%p): free\n",
4911 ioc->config_page));
4912 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
4913 ioc->config_page, ioc->config_page_dma);
4914 }
4915
4916 kfree(ioc->hpr_lookup);
4917 ioc->hpr_lookup = NULL;
4918 kfree(ioc->internal_lookup);
4919 ioc->internal_lookup = NULL;
4920 if (ioc->chain_lookup) {
4921 for (i = 0; i < ioc->scsiio_depth; i++) {
4922 for (j = ioc->chains_per_prp_buffer;
4923 j < ioc->chains_needed_per_io; j++) {
4924 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4925 if (ct && ct->chain_buffer)
4926 dma_pool_free(ioc->chain_dma_pool,
4927 ct->chain_buffer,
4928 ct->chain_buffer_dma);
4929 }
4930 kfree(ioc->chain_lookup[i].chains_per_smid);
4931 }
4932 dma_pool_destroy(ioc->chain_dma_pool);
4933 kfree(ioc->chain_lookup);
4934 ioc->chain_lookup = NULL;
4935 }
4936}
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948static int
4949mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
4950{
4951 long reply_pool_end_address;
4952
4953 reply_pool_end_address = reply_pool_start_address + pool_sz;
4954
4955 if (upper_32_bits(reply_pool_start_address) ==
4956 upper_32_bits(reply_pool_end_address))
4957 return 1;
4958 else
4959 return 0;
4960}
4961
4962
4963
4964
4965
4966
4967
4968
4969static int
4970base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
4971{
4972 int i = 0;
4973 u32 dma_alloc_count = 0;
4974 int reply_post_free_sz = ioc->reply_post_queue_depth *
4975 sizeof(Mpi2DefaultReplyDescriptor_t);
4976 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
4977
4978 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
4979 GFP_KERNEL);
4980 if (!ioc->reply_post)
4981 return -ENOMEM;
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991 dma_alloc_count = DIV_ROUND_UP(count,
4992 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
4993 ioc->reply_post_free_dma_pool =
4994 dma_pool_create("reply_post_free pool",
4995 &ioc->pdev->dev, sz, 16, 0);
4996 if (!ioc->reply_post_free_dma_pool)
4997 return -ENOMEM;
4998 for (i = 0; i < count; i++) {
4999 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
5000 ioc->reply_post[i].reply_post_free =
5001 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
5002 GFP_KERNEL,
5003 &ioc->reply_post[i].reply_post_free_dma);
5004 if (!ioc->reply_post[i].reply_post_free)
5005 return -ENOMEM;
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015 if (!mpt3sas_check_same_4gb_region(
5016 (long)ioc->reply_post[i].reply_post_free, sz)) {
5017 dinitprintk(ioc,
5018 ioc_err(ioc, "bad Replypost free pool(0x%p)"
5019 "reply_post_free_dma = (0x%llx)\n",
5020 ioc->reply_post[i].reply_post_free,
5021 (unsigned long long)
5022 ioc->reply_post[i].reply_post_free_dma));
5023 return -EAGAIN;
5024 }
5025 dma_alloc_count--;
5026
5027 } else {
5028 ioc->reply_post[i].reply_post_free =
5029 (Mpi2ReplyDescriptorsUnion_t *)
5030 ((long)ioc->reply_post[i-1].reply_post_free
5031 + reply_post_free_sz);
5032 ioc->reply_post[i].reply_post_free_dma =
5033 (dma_addr_t)
5034 (ioc->reply_post[i-1].reply_post_free_dma +
5035 reply_post_free_sz);
5036 }
5037 }
5038 return 0;
5039}
5040
5041
5042
5043
5044
5045
5046
5047static int
5048_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5049{
5050 struct mpt3sas_facts *facts;
5051 u16 max_sge_elements;
5052 u16 chains_needed_per_io;
5053 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
5054 u32 retry_sz;
5055 u32 rdpq_sz = 0;
5056 u16 max_request_credit, nvme_blocks_needed;
5057 unsigned short sg_tablesize;
5058 u16 sge_size;
5059 int i, j;
5060 int ret = 0;
5061 struct chain_tracker *ct;
5062
5063 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5064
5065
5066 retry_sz = 0;
5067 facts = &ioc->facts;
5068
5069
5070 if (max_sgl_entries != -1)
5071 sg_tablesize = max_sgl_entries;
5072 else {
5073 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
5074 sg_tablesize = MPT2SAS_SG_DEPTH;
5075 else
5076 sg_tablesize = MPT3SAS_SG_DEPTH;
5077 }
5078
5079
5080 if (reset_devices)
5081 sg_tablesize = min_t(unsigned short, sg_tablesize,
5082 MPT_KDUMP_MIN_PHYS_SEGMENTS);
5083
5084 if (ioc->is_mcpu_endpoint)
5085 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5086 else {
5087 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
5088 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5089 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
5090 sg_tablesize = min_t(unsigned short, sg_tablesize,
5091 SG_MAX_SEGMENTS);
5092 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
5093 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
5094 }
5095 ioc->shost->sg_tablesize = sg_tablesize;
5096 }
5097
5098 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
5099 (facts->RequestCredit / 4));
5100 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
5101 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
5102 INTERNAL_SCSIIO_CMDS_COUNT)) {
5103 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
5104 facts->RequestCredit);
5105 return -ENOMEM;
5106 }
5107 ioc->internal_depth = 10;
5108 }
5109
5110 ioc->hi_priority_depth = ioc->internal_depth - (5);
5111
5112 if (max_queue_depth != -1 && max_queue_depth != 0) {
5113 max_request_credit = min_t(u16, max_queue_depth +
5114 ioc->internal_depth, facts->RequestCredit);
5115 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
5116 max_request_credit = MAX_HBA_QUEUE_DEPTH;
5117 } else if (reset_devices)
5118 max_request_credit = min_t(u16, facts->RequestCredit,
5119 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
5120 else
5121 max_request_credit = min_t(u16, facts->RequestCredit,
5122 MAX_HBA_QUEUE_DEPTH);
5123
5124
5125
5126
5127
5128 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
5129
5130
5131 ioc->request_sz = facts->IOCRequestFrameSize * 4;
5132
5133
5134 ioc->reply_sz = facts->ReplyFrameSize * 4;
5135
5136
5137 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5138 if (facts->IOCMaxChainSegmentSize)
5139 ioc->chain_segment_sz =
5140 facts->IOCMaxChainSegmentSize *
5141 MAX_CHAIN_ELEMT_SZ;
5142 else
5143
5144 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
5145 MAX_CHAIN_ELEMT_SZ;
5146 } else
5147 ioc->chain_segment_sz = ioc->request_sz;
5148
5149
5150 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
5151
5152 retry_allocation:
5153 total_sz = 0;
5154
5155 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
5156 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
5157 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
5158
5159
5160 max_sge_elements = ioc->chain_segment_sz - sge_size;
5161 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
5162
5163
5164
5165
5166 chains_needed_per_io = ((ioc->shost->sg_tablesize -
5167 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
5168 + 1;
5169 if (chains_needed_per_io > facts->MaxChainDepth) {
5170 chains_needed_per_io = facts->MaxChainDepth;
5171 ioc->shost->sg_tablesize = min_t(u16,
5172 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
5173 * chains_needed_per_io), ioc->shost->sg_tablesize);
5174 }
5175 ioc->chains_needed_per_io = chains_needed_per_io;
5176
5177
5178 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5179
5180
5181 if (ioc->is_mcpu_endpoint)
5182 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
5183 else {
5184
5185 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
5186 ioc->reply_free_queue_depth + 1;
5187
5188 if (ioc->reply_post_queue_depth % 16)
5189 ioc->reply_post_queue_depth += 16 -
5190 (ioc->reply_post_queue_depth % 16);
5191 }
5192
5193 if (ioc->reply_post_queue_depth >
5194 facts->MaxReplyDescriptorPostQueueDepth) {
5195 ioc->reply_post_queue_depth =
5196 facts->MaxReplyDescriptorPostQueueDepth -
5197 (facts->MaxReplyDescriptorPostQueueDepth % 16);
5198 ioc->hba_queue_depth =
5199 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
5200 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5201 }
5202
5203 ioc_info(ioc,
5204 "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
5205 "sge_per_io(%d), chains_per_io(%d)\n",
5206 ioc->max_sges_in_main_message,
5207 ioc->max_sges_in_chain_message,
5208 ioc->shost->sg_tablesize,
5209 ioc->chains_needed_per_io);
5210
5211
5212 reply_post_free_sz = ioc->reply_post_queue_depth *
5213 sizeof(Mpi2DefaultReplyDescriptor_t);
5214 rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
5215 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
5216 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
5217 ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
5218 if (ret == -EAGAIN) {
5219
5220
5221
5222
5223 _base_release_memory_pools(ioc);
5224 ioc->use_32bit_dma = true;
5225 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
5226 ioc_err(ioc,
5227 "32 DMA mask failed %s\n", pci_name(ioc->pdev));
5228 return -ENODEV;
5229 }
5230 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
5231 return -ENOMEM;
5232 } else if (ret == -ENOMEM)
5233 return -ENOMEM;
5234 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
5235 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
5236 ioc->scsiio_depth = ioc->hba_queue_depth -
5237 ioc->hi_priority_depth - ioc->internal_depth;
5238
5239
5240
5241
5242 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
5243 dinitprintk(ioc,
5244 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
5245 ioc->shost->can_queue));
5246
5247
5248
5249
5250 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
5251 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
5252
5253
5254 sz += (ioc->hi_priority_depth * ioc->request_sz);
5255
5256
5257 sz += (ioc->internal_depth * ioc->request_sz);
5258
5259 ioc->request_dma_sz = sz;
5260 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
5261 &ioc->request_dma, GFP_KERNEL);
5262 if (!ioc->request) {
5263 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
5264 ioc->hba_queue_depth, ioc->chains_needed_per_io,
5265 ioc->request_sz, sz / 1024);
5266 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
5267 goto out;
5268 retry_sz = 64;
5269 ioc->hba_queue_depth -= retry_sz;
5270 _base_release_memory_pools(ioc);
5271 goto retry_allocation;
5272 }
5273
5274 if (retry_sz)
5275 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
5276 ioc->hba_queue_depth, ioc->chains_needed_per_io,
5277 ioc->request_sz, sz / 1024);
5278
5279
5280 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
5281 ioc->request_sz);
5282 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
5283 ioc->request_sz);
5284
5285
5286 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
5287 ioc->request_sz);
5288 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
5289 ioc->request_sz);
5290
5291 ioc_info(ioc,
5292 "request pool(0x%p) - dma(0x%llx): "
5293 "depth(%d), frame_size(%d), pool_size(%d kB)\n",
5294 ioc->request, (unsigned long long) ioc->request_dma,
5295 ioc->hba_queue_depth, ioc->request_sz,
5296 (ioc->hba_queue_depth * ioc->request_sz) / 1024);
5297
5298 total_sz += sz;
5299
5300 dinitprintk(ioc,
5301 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
5302 ioc->request, ioc->scsiio_depth));
5303
5304 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
5305 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
5306 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
5307 if (!ioc->chain_lookup) {
5308 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
5309 goto out;
5310 }
5311
5312 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
5313 for (i = 0; i < ioc->scsiio_depth; i++) {
5314 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
5315 if (!ioc->chain_lookup[i].chains_per_smid) {
5316 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
5317 goto out;
5318 }
5319 }
5320
5321
5322 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
5323 sizeof(struct request_tracker), GFP_KERNEL);
5324 if (!ioc->hpr_lookup) {
5325 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
5326 goto out;
5327 }
5328 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
5329 dinitprintk(ioc,
5330 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
5331 ioc->hi_priority,
5332 ioc->hi_priority_depth, ioc->hi_priority_smid));
5333
5334
5335 ioc->internal_lookup = kcalloc(ioc->internal_depth,
5336 sizeof(struct request_tracker), GFP_KERNEL);
5337 if (!ioc->internal_lookup) {
5338 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
5339 goto out;
5340 }
5341 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
5342 dinitprintk(ioc,
5343 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
5344 ioc->internal,
5345 ioc->internal_depth, ioc->internal_smid));
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359 ioc->chains_per_prp_buffer = 0;
5360 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
5361 nvme_blocks_needed =
5362 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
5363 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
5364 nvme_blocks_needed++;
5365
5366 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
5367 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
5368 if (!ioc->pcie_sg_lookup) {
5369 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
5370 goto out;
5371 }
5372 sz = nvme_blocks_needed * ioc->page_size;
5373 ioc->pcie_sgl_dma_pool =
5374 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
5375 if (!ioc->pcie_sgl_dma_pool) {
5376 ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5377 goto out;
5378 }
5379
5380 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5381 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
5382 ioc->chains_needed_per_io);
5383
5384 for (i = 0; i < ioc->scsiio_depth; i++) {
5385 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
5386 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5387 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5388 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5389 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5390 goto out;
5391 }
5392 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5393 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5394 ct->chain_buffer =
5395 ioc->pcie_sg_lookup[i].pcie_sgl +
5396 (j * ioc->chain_segment_sz);
5397 ct->chain_buffer_dma =
5398 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5399 (j * ioc->chain_segment_sz);
5400 }
5401 }
5402
5403 dinitprintk(ioc,
5404 ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5405 ioc->scsiio_depth, sz,
5406 (sz * ioc->scsiio_depth) / 1024));
5407 dinitprintk(ioc,
5408 ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
5409 ioc->chains_per_prp_buffer));
5410 total_sz += sz * ioc->scsiio_depth;
5411 }
5412
5413 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5414 ioc->chain_segment_sz, 16, 0);
5415 if (!ioc->chain_dma_pool) {
5416 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
5417 goto out;
5418 }
5419 for (i = 0; i < ioc->scsiio_depth; i++) {
5420 for (j = ioc->chains_per_prp_buffer;
5421 j < ioc->chains_needed_per_io; j++) {
5422 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5423 ct->chain_buffer = dma_pool_alloc(
5424 ioc->chain_dma_pool, GFP_KERNEL,
5425 &ct->chain_buffer_dma);
5426 if (!ct->chain_buffer) {
5427 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
5428 goto out;
5429 }
5430 }
5431 total_sz += ioc->chain_segment_sz;
5432 }
5433
5434 dinitprintk(ioc,
5435 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
5436 ioc->chain_depth, ioc->chain_segment_sz,
5437 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
5438
5439
5440 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
5441 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5442 4, 0);
5443 if (!ioc->sense_dma_pool) {
5444 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
5445 goto out;
5446 }
5447 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5448 &ioc->sense_dma);
5449 if (!ioc->sense) {
5450 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
5451 goto out;
5452 }
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462 if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
5463
5464 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5465 dma_pool_destroy(ioc->sense_dma_pool);
5466 ioc->sense = NULL;
5467
5468 ioc->sense_dma_pool =
5469 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5470 roundup_pow_of_two(sz), 0);
5471 if (!ioc->sense_dma_pool) {
5472 ioc_err(ioc, "sense pool: pci_pool_create failed\n");
5473 goto out;
5474 }
5475 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5476 &ioc->sense_dma);
5477 if (!ioc->sense) {
5478 ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
5479 goto out;
5480 }
5481 }
5482 ioc_info(ioc,
5483 "sense pool(0x%p)- dma(0x%llx): depth(%d),"
5484 "element_size(%d), pool_size(%d kB)\n",
5485 ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
5486 SCSI_SENSE_BUFFERSIZE, sz / 1024);
5487
5488 total_sz += sz;
5489
5490
5491 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
5492 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
5493 4, 0);
5494 if (!ioc->reply_dma_pool) {
5495 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
5496 goto out;
5497 }
5498 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5499 &ioc->reply_dma);
5500 if (!ioc->reply) {
5501 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
5502 goto out;
5503 }
5504 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5505 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5506 dinitprintk(ioc,
5507 ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5508 ioc->reply, ioc->reply_free_queue_depth,
5509 ioc->reply_sz, sz / 1024));
5510 dinitprintk(ioc,
5511 ioc_info(ioc, "reply_dma(0x%llx)\n",
5512 (unsigned long long)ioc->reply_dma));
5513 total_sz += sz;
5514
5515
5516 sz = ioc->reply_free_queue_depth * 4;
5517 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
5518 &ioc->pdev->dev, sz, 16, 0);
5519 if (!ioc->reply_free_dma_pool) {
5520 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
5521 goto out;
5522 }
5523 ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
5524 &ioc->reply_free_dma);
5525 if (!ioc->reply_free) {
5526 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
5527 goto out;
5528 }
5529 dinitprintk(ioc,
5530 ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5531 ioc->reply_free, ioc->reply_free_queue_depth,
5532 4, sz / 1024));
5533 dinitprintk(ioc,
5534 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
5535 (unsigned long long)ioc->reply_free_dma));
5536 total_sz += sz;
5537
5538 if (ioc->rdpq_array_enable) {
5539 reply_post_free_array_sz = ioc->reply_queue_count *
5540 sizeof(Mpi2IOCInitRDPQArrayEntry);
5541 ioc->reply_post_free_array_dma_pool =
5542 dma_pool_create("reply_post_free_array pool",
5543 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5544 if (!ioc->reply_post_free_array_dma_pool) {
5545 dinitprintk(ioc,
5546 ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
5547 goto out;
5548 }
5549 ioc->reply_post_free_array =
5550 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5551 GFP_KERNEL, &ioc->reply_post_free_array_dma);
5552 if (!ioc->reply_post_free_array) {
5553 dinitprintk(ioc,
5554 ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
5555 goto out;
5556 }
5557 }
5558 ioc->config_page_sz = 512;
5559 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
5560 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
5561 if (!ioc->config_page) {
5562 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
5563 goto out;
5564 }
5565
5566 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
5567 ioc->config_page, (unsigned long long)ioc->config_page_dma,
5568 ioc->config_page_sz);
5569 total_sz += ioc->config_page_sz;
5570
5571 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
5572 total_sz / 1024);
5573 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
5574 ioc->shost->can_queue, facts->RequestCredit);
5575 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
5576 ioc->shost->sg_tablesize);
5577 return 0;
5578
5579 out:
5580 return -ENOMEM;
5581}
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591u32
5592mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
5593{
5594 u32 s, sc;
5595
5596 s = ioc->base_readl(&ioc->chip->Doorbell);
5597 sc = s & MPI2_IOC_STATE_MASK;
5598 return cooked ? sc : s;
5599}
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609static int
5610_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5611{
5612 u32 count, cntdn;
5613 u32 current_state;
5614
5615 count = 0;
5616 cntdn = 1000 * timeout;
5617 do {
5618 current_state = mpt3sas_base_get_iocstate(ioc, 1);
5619 if (current_state == ioc_state)
5620 return 0;
5621 if (count && current_state == MPI2_IOC_STATE_FAULT)
5622 break;
5623 if (count && current_state == MPI2_IOC_STATE_COREDUMP)
5624 break;
5625
5626 usleep_range(1000, 1500);
5627 count++;
5628 } while (--cntdn);
5629
5630 return current_state;
5631}
5632
5633
5634
5635
5636
5637
5638
5639static inline void
5640_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
5641{
5642 unsigned int i, sz = 256;
5643 u32 __iomem *reg = (u32 __iomem *)ioc->chip;
5644
5645 ioc_info(ioc, "System Register set:\n");
5646 for (i = 0; i < (sz / sizeof(u32)); i++)
5647 pr_info("%08x: %08x\n", (i * 4), readl(®[i]));
5648}
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661static int
5662_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5663{
5664 u32 cntdn, count;
5665 u32 int_status;
5666
5667 count = 0;
5668 cntdn = 1000 * timeout;
5669 do {
5670 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5671 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5672 dhsprintk(ioc,
5673 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5674 __func__, count, timeout));
5675 return 0;
5676 }
5677
5678 usleep_range(1000, 1500);
5679 count++;
5680 } while (--cntdn);
5681
5682 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5683 __func__, count, int_status);
5684 return -EFAULT;
5685}
5686
5687static int
5688_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5689{
5690 u32 cntdn, count;
5691 u32 int_status;
5692
5693 count = 0;
5694 cntdn = 2000 * timeout;
5695 do {
5696 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5697 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5698 dhsprintk(ioc,
5699 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5700 __func__, count, timeout));
5701 return 0;
5702 }
5703
5704 udelay(500);
5705 count++;
5706 } while (--cntdn);
5707
5708 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5709 __func__, count, int_status);
5710 return -EFAULT;
5711
5712}
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724static int
5725_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5726{
5727 u32 cntdn, count;
5728 u32 int_status;
5729 u32 doorbell;
5730
5731 count = 0;
5732 cntdn = 1000 * timeout;
5733 do {
5734 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5735 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5736 dhsprintk(ioc,
5737 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5738 __func__, count, timeout));
5739 return 0;
5740 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5741 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
5742 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5743 MPI2_IOC_STATE_FAULT) {
5744 mpt3sas_print_fault_code(ioc, doorbell);
5745 return -EFAULT;
5746 }
5747 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5748 MPI2_IOC_STATE_COREDUMP) {
5749 mpt3sas_print_coredump_info(ioc, doorbell);
5750 return -EFAULT;
5751 }
5752 } else if (int_status == 0xFFFFFFFF)
5753 goto out;
5754
5755 usleep_range(1000, 1500);
5756 count++;
5757 } while (--cntdn);
5758
5759 out:
5760 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5761 __func__, count, int_status);
5762 return -EFAULT;
5763}
5764
5765
5766
5767
5768
5769
5770
5771
5772static int
5773_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5774{
5775 u32 cntdn, count;
5776 u32 doorbell_reg;
5777
5778 count = 0;
5779 cntdn = 1000 * timeout;
5780 do {
5781 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5782 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5783 dhsprintk(ioc,
5784 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5785 __func__, count, timeout));
5786 return 0;
5787 }
5788
5789 usleep_range(1000, 1500);
5790 count++;
5791 } while (--cntdn);
5792
5793 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5794 __func__, count, doorbell_reg);
5795 return -EFAULT;
5796}
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806static int
5807_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5808{
5809 u32 ioc_state;
5810 int r = 0;
5811 unsigned long flags;
5812
5813 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5814 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5815 return -EFAULT;
5816 }
5817
5818 if (!(ioc->facts.IOCCapabilities &
5819 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5820 return -EFAULT;
5821
5822 ioc_info(ioc, "sending message unit reset !!\n");
5823
5824 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5825 &ioc->chip->Doorbell);
5826 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5827 r = -EFAULT;
5828 goto out;
5829 }
5830
5831 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5832 if (ioc_state) {
5833 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5834 __func__, ioc_state);
5835 r = -EFAULT;
5836 goto out;
5837 }
5838 out:
5839 if (r != 0) {
5840 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5841 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5842
5843
5844
5845
5846 if ((ioc_state & MPI2_IOC_STATE_MASK) ==
5847 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
5848 ioc->fault_reset_work_q == NULL)) {
5849 spin_unlock_irqrestore(
5850 &ioc->ioc_reset_in_progress_lock, flags);
5851 mpt3sas_print_coredump_info(ioc, ioc_state);
5852 mpt3sas_base_wait_for_coredump_completion(ioc,
5853 __func__);
5854 spin_lock_irqsave(
5855 &ioc->ioc_reset_in_progress_lock, flags);
5856 }
5857 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5858 }
5859 ioc_info(ioc, "message unit reset: %s\n",
5860 r == 0 ? "SUCCESS" : "FAILED");
5861 return r;
5862}
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874int
5875mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5876{
5877 int wait_state_count = 0;
5878 u32 ioc_state;
5879
5880 do {
5881 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5882 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5883 break;
5884 ssleep(1);
5885 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5886 __func__, ++wait_state_count);
5887 } while (--timeout);
5888 if (!timeout) {
5889 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5890 return -EFAULT;
5891 }
5892 if (wait_state_count)
5893 ioc_info(ioc, "ioc is operational\n");
5894 return 0;
5895}
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908static int
5909_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5910 u32 *request, int reply_bytes, u16 *reply, int timeout)
5911{
5912 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5913 int i;
5914 u8 failed;
5915 __le32 *mfp;
5916
5917
5918 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5919 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5920 return -EFAULT;
5921 }
5922
5923
5924 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5925 MPI2_HIS_IOC2SYS_DB_STATUS)
5926 writel(0, &ioc->chip->HostInterruptStatus);
5927
5928
5929 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5930 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5931 &ioc->chip->Doorbell);
5932
5933 if ((_base_spin_on_doorbell_int(ioc, 5))) {
5934 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5935 __LINE__);
5936 return -EFAULT;
5937 }
5938 writel(0, &ioc->chip->HostInterruptStatus);
5939
5940 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5941 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5942 __LINE__);
5943 return -EFAULT;
5944 }
5945
5946
5947 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5948 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5949 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5950 failed = 1;
5951 }
5952
5953 if (failed) {
5954 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5955 __LINE__);
5956 return -EFAULT;
5957 }
5958
5959
5960 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5961 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5962 __LINE__);
5963 return -EFAULT;
5964 }
5965
5966
5967 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5968 & MPI2_DOORBELL_DATA_MASK);
5969 writel(0, &ioc->chip->HostInterruptStatus);
5970 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5971 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5972 __LINE__);
5973 return -EFAULT;
5974 }
5975 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5976 & MPI2_DOORBELL_DATA_MASK);
5977 writel(0, &ioc->chip->HostInterruptStatus);
5978
5979 for (i = 2; i < default_reply->MsgLength * 2; i++) {
5980 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5981 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5982 __LINE__);
5983 return -EFAULT;
5984 }
5985 if (i >= reply_bytes/2)
5986 ioc->base_readl(&ioc->chip->Doorbell);
5987 else
5988 reply[i] = le16_to_cpu(
5989 ioc->base_readl(&ioc->chip->Doorbell)
5990 & MPI2_DOORBELL_DATA_MASK);
5991 writel(0, &ioc->chip->HostInterruptStatus);
5992 }
5993
5994 _base_wait_for_doorbell_int(ioc, 5);
5995 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5996 dhsprintk(ioc,
5997 ioc_info(ioc, "doorbell is in use (line=%d)\n",
5998 __LINE__));
5999 }
6000 writel(0, &ioc->chip->HostInterruptStatus);
6001
6002 if (ioc->logging_level & MPT_DEBUG_INIT) {
6003 mfp = (__le32 *)reply;
6004 pr_info("\toffset:data\n");
6005 for (i = 0; i < reply_bytes/4; i++)
6006 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6007 le32_to_cpu(mfp[i]));
6008 }
6009 return 0;
6010}
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022
6023
6024
6025
6026int
6027mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
6028 Mpi2SasIoUnitControlReply_t *mpi_reply,
6029 Mpi2SasIoUnitControlRequest_t *mpi_request)
6030{
6031 u16 smid;
6032 u8 issue_reset = 0;
6033 int rc;
6034 void *request;
6035
6036 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6037
6038 mutex_lock(&ioc->base_cmds.mutex);
6039
6040 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6041 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6042 rc = -EAGAIN;
6043 goto out;
6044 }
6045
6046 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6047 if (rc)
6048 goto out;
6049
6050 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6051 if (!smid) {
6052 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6053 rc = -EAGAIN;
6054 goto out;
6055 }
6056
6057 rc = 0;
6058 ioc->base_cmds.status = MPT3_CMD_PENDING;
6059 request = mpt3sas_base_get_msg_frame(ioc, smid);
6060 ioc->base_cmds.smid = smid;
6061 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
6062 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6063 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
6064 ioc->ioc_link_reset_in_progress = 1;
6065 init_completion(&ioc->base_cmds.done);
6066 ioc->put_smid_default(ioc, smid);
6067 wait_for_completion_timeout(&ioc->base_cmds.done,
6068 msecs_to_jiffies(10000));
6069 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6070 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
6071 ioc->ioc_link_reset_in_progress)
6072 ioc->ioc_link_reset_in_progress = 0;
6073 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6074 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
6075 mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
6076 issue_reset);
6077 goto issue_host_reset;
6078 }
6079 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6080 memcpy(mpi_reply, ioc->base_cmds.reply,
6081 sizeof(Mpi2SasIoUnitControlReply_t));
6082 else
6083 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
6084 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6085 goto out;
6086
6087 issue_host_reset:
6088 if (issue_reset)
6089 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6090 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6091 rc = -EFAULT;
6092 out:
6093 mutex_unlock(&ioc->base_cmds.mutex);
6094 return rc;
6095}
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108int
6109mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
6110 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
6111{
6112 u16 smid;
6113 u8 issue_reset = 0;
6114 int rc;
6115 void *request;
6116
6117 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6118
6119 mutex_lock(&ioc->base_cmds.mutex);
6120
6121 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6122 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6123 rc = -EAGAIN;
6124 goto out;
6125 }
6126
6127 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6128 if (rc)
6129 goto out;
6130
6131 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6132 if (!smid) {
6133 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6134 rc = -EAGAIN;
6135 goto out;
6136 }
6137
6138 rc = 0;
6139 ioc->base_cmds.status = MPT3_CMD_PENDING;
6140 request = mpt3sas_base_get_msg_frame(ioc, smid);
6141 ioc->base_cmds.smid = smid;
6142 memset(request, 0, ioc->request_sz);
6143 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
6144 init_completion(&ioc->base_cmds.done);
6145 ioc->put_smid_default(ioc, smid);
6146 wait_for_completion_timeout(&ioc->base_cmds.done,
6147 msecs_to_jiffies(10000));
6148 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6149 mpt3sas_check_cmd_timeout(ioc,
6150 ioc->base_cmds.status, mpi_request,
6151 sizeof(Mpi2SepRequest_t)/4, issue_reset);
6152 goto issue_host_reset;
6153 }
6154 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6155 memcpy(mpi_reply, ioc->base_cmds.reply,
6156 sizeof(Mpi2SepReply_t));
6157 else
6158 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
6159 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6160 goto out;
6161
6162 issue_host_reset:
6163 if (issue_reset)
6164 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6165 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6166 rc = -EFAULT;
6167 out:
6168 mutex_unlock(&ioc->base_cmds.mutex);
6169 return rc;
6170}
6171
6172
6173
6174
6175
6176
6177
6178
6179static int
6180_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
6181{
6182 Mpi2PortFactsRequest_t mpi_request;
6183 Mpi2PortFactsReply_t mpi_reply;
6184 struct mpt3sas_port_facts *pfacts;
6185 int mpi_reply_sz, mpi_request_sz, r;
6186
6187 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6188
6189 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
6190 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
6191 memset(&mpi_request, 0, mpi_request_sz);
6192 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
6193 mpi_request.PortNumber = port;
6194 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6195 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6196
6197 if (r != 0) {
6198 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6199 return r;
6200 }
6201
6202 pfacts = &ioc->pfacts[port];
6203 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
6204 pfacts->PortNumber = mpi_reply.PortNumber;
6205 pfacts->VP_ID = mpi_reply.VP_ID;
6206 pfacts->VF_ID = mpi_reply.VF_ID;
6207 pfacts->MaxPostedCmdBuffers =
6208 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
6209
6210 return 0;
6211}
6212
6213
6214
6215
6216
6217
6218
6219
6220static int
6221_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
6222{
6223 u32 ioc_state;
6224 int rc;
6225
6226 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6227
6228 if (ioc->pci_error_recovery) {
6229 dfailprintk(ioc,
6230 ioc_info(ioc, "%s: host in pci error recovery\n",
6231 __func__));
6232 return -EFAULT;
6233 }
6234
6235 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6236 dhsprintk(ioc,
6237 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6238 __func__, ioc_state));
6239
6240 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6241 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6242 return 0;
6243
6244 if (ioc_state & MPI2_DOORBELL_USED) {
6245 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6246 goto issue_diag_reset;
6247 }
6248
6249 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6250 mpt3sas_print_fault_code(ioc, ioc_state &
6251 MPI2_DOORBELL_DATA_MASK);
6252 goto issue_diag_reset;
6253 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6254 MPI2_IOC_STATE_COREDUMP) {
6255 ioc_info(ioc,
6256 "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
6257 __func__, ioc_state);
6258 return -EFAULT;
6259 }
6260
6261 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6262 if (ioc_state) {
6263 dfailprintk(ioc,
6264 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6265 __func__, ioc_state));
6266 return -EFAULT;
6267 }
6268
6269 issue_diag_reset:
6270 rc = _base_diag_reset(ioc);
6271 return rc;
6272}
6273
6274
6275
6276
6277
6278
6279
6280static int
6281_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6282{
6283 Mpi2IOCFactsRequest_t mpi_request;
6284 Mpi2IOCFactsReply_t mpi_reply;
6285 struct mpt3sas_facts *facts;
6286 int mpi_reply_sz, mpi_request_sz, r;
6287
6288 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6289
6290 r = _base_wait_for_iocstate(ioc, 10);
6291 if (r) {
6292 dfailprintk(ioc,
6293 ioc_info(ioc, "%s: failed getting to correct state\n",
6294 __func__));
6295 return r;
6296 }
6297 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6298 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6299 memset(&mpi_request, 0, mpi_request_sz);
6300 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6301 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6302 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6303
6304 if (r != 0) {
6305 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6306 return r;
6307 }
6308
6309 facts = &ioc->facts;
6310 memset(facts, 0, sizeof(struct mpt3sas_facts));
6311 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
6312 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
6313 facts->VP_ID = mpi_reply.VP_ID;
6314 facts->VF_ID = mpi_reply.VF_ID;
6315 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
6316 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
6317 facts->WhoInit = mpi_reply.WhoInit;
6318 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
6319 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
6320 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
6321 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
6322 ioc->combined_reply_queue = 0;
6323 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
6324 facts->MaxReplyDescriptorPostQueueDepth =
6325 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
6326 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
6327 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
6328 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
6329 ioc->ir_firmware = 1;
6330 if ((facts->IOCCapabilities &
6331 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
6332 ioc->rdpq_array_capable = 1;
6333 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
6334 && ioc->is_aero_ioc)
6335 ioc->atomic_desc_capable = 1;
6336 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
6337 facts->IOCRequestFrameSize =
6338 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
6339 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6340 facts->IOCMaxChainSegmentSize =
6341 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
6342 }
6343 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
6344 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
6345 ioc->shost->max_id = -1;
6346 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
6347 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
6348 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
6349 facts->HighPriorityCredit =
6350 le16_to_cpu(mpi_reply.HighPriorityCredit);
6351 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
6352 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
6353 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
6354
6355
6356
6357
6358 ioc->page_size = 1 << facts->CurrentHostPageSize;
6359 if (ioc->page_size == 1) {
6360 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
6361 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
6362 }
6363 dinitprintk(ioc,
6364 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
6365 facts->CurrentHostPageSize));
6366
6367 dinitprintk(ioc,
6368 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
6369 facts->RequestCredit, facts->MaxChainDepth));
6370 dinitprintk(ioc,
6371 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
6372 facts->IOCRequestFrameSize * 4,
6373 facts->ReplyFrameSize * 4));
6374 return 0;
6375}
6376
6377
6378
6379
6380
6381
6382
6383static int
6384_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
6385{
6386 Mpi2IOCInitRequest_t mpi_request;
6387 Mpi2IOCInitReply_t mpi_reply;
6388 int i, r = 0;
6389 ktime_t current_time;
6390 u16 ioc_status;
6391 u32 reply_post_free_array_sz = 0;
6392
6393 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6394
6395 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
6396 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
6397 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
6398 mpi_request.VF_ID = 0;
6399 mpi_request.VP_ID = 0;
6400 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
6401 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
6402 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
6403
6404 if (_base_is_controller_msix_enabled(ioc))
6405 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
6406 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
6407 mpi_request.ReplyDescriptorPostQueueDepth =
6408 cpu_to_le16(ioc->reply_post_queue_depth);
6409 mpi_request.ReplyFreeQueueDepth =
6410 cpu_to_le16(ioc->reply_free_queue_depth);
6411
6412 mpi_request.SenseBufferAddressHigh =
6413 cpu_to_le32((u64)ioc->sense_dma >> 32);
6414 mpi_request.SystemReplyAddressHigh =
6415 cpu_to_le32((u64)ioc->reply_dma >> 32);
6416 mpi_request.SystemRequestFrameBaseAddress =
6417 cpu_to_le64((u64)ioc->request_dma);
6418 mpi_request.ReplyFreeQueueAddress =
6419 cpu_to_le64((u64)ioc->reply_free_dma);
6420
6421 if (ioc->rdpq_array_enable) {
6422 reply_post_free_array_sz = ioc->reply_queue_count *
6423 sizeof(Mpi2IOCInitRDPQArrayEntry);
6424 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
6425 for (i = 0; i < ioc->reply_queue_count; i++)
6426 ioc->reply_post_free_array[i].RDPQBaseAddress =
6427 cpu_to_le64(
6428 (u64)ioc->reply_post[i].reply_post_free_dma);
6429 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
6430 mpi_request.ReplyDescriptorPostQueueAddress =
6431 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
6432 } else {
6433 mpi_request.ReplyDescriptorPostQueueAddress =
6434 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
6435 }
6436
6437
6438
6439
6440 mpi_request.ConfigurationFlags |=
6441 cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
6442
6443
6444
6445
6446 current_time = ktime_get_real();
6447 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
6448
6449 if (ioc->logging_level & MPT_DEBUG_INIT) {
6450 __le32 *mfp;
6451 int i;
6452
6453 mfp = (__le32 *)&mpi_request;
6454 ioc_info(ioc, "\toffset:data\n");
6455 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
6456 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6457 le32_to_cpu(mfp[i]));
6458 }
6459
6460 r = _base_handshake_req_reply_wait(ioc,
6461 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
6462 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
6463
6464 if (r != 0) {
6465 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6466 return r;
6467 }
6468
6469 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6470 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
6471 mpi_reply.IOCLogInfo) {
6472 ioc_err(ioc, "%s: failed\n", __func__);
6473 r = -EIO;
6474 }
6475
6476 return r;
6477}
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489u8
6490mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
6491 u32 reply)
6492{
6493 MPI2DefaultReply_t *mpi_reply;
6494 u16 ioc_status;
6495
6496 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
6497 return 1;
6498
6499 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6500 if (!mpi_reply)
6501 return 1;
6502
6503 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
6504 return 1;
6505
6506 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
6507 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
6508 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
6509 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
6510 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6511 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6512 ioc->port_enable_failed = 1;
6513
6514 if (ioc->is_driver_loading) {
6515 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
6516 mpt3sas_port_enable_complete(ioc);
6517 return 1;
6518 } else {
6519 ioc->start_scan_failed = ioc_status;
6520 ioc->start_scan = 0;
6521 return 1;
6522 }
6523 }
6524 complete(&ioc->port_enable_cmds.done);
6525 return 1;
6526}
6527
6528
6529
6530
6531
6532
6533
6534static int
6535_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
6536{
6537 Mpi2PortEnableRequest_t *mpi_request;
6538 Mpi2PortEnableReply_t *mpi_reply;
6539 int r = 0;
6540 u16 smid;
6541 u16 ioc_status;
6542
6543 ioc_info(ioc, "sending port enable !!\n");
6544
6545 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6546 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6547 return -EAGAIN;
6548 }
6549
6550 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6551 if (!smid) {
6552 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6553 return -EAGAIN;
6554 }
6555
6556 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6557 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6558 ioc->port_enable_cmds.smid = smid;
6559 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6560 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6561
6562 init_completion(&ioc->port_enable_cmds.done);
6563 ioc->put_smid_default(ioc, smid);
6564 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
6565 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
6566 ioc_err(ioc, "%s: timeout\n", __func__);
6567 _debug_dump_mf(mpi_request,
6568 sizeof(Mpi2PortEnableRequest_t)/4);
6569 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
6570 r = -EFAULT;
6571 else
6572 r = -ETIME;
6573 goto out;
6574 }
6575
6576 mpi_reply = ioc->port_enable_cmds.reply;
6577 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6578 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6579 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
6580 __func__, ioc_status);
6581 r = -EFAULT;
6582 goto out;
6583 }
6584
6585 out:
6586 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6587 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
6588 return r;
6589}
6590
6591
6592
6593
6594
6595
6596
6597int
6598mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
6599{
6600 Mpi2PortEnableRequest_t *mpi_request;
6601 u16 smid;
6602
6603 ioc_info(ioc, "sending port enable !!\n");
6604
6605 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6606 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6607 return -EAGAIN;
6608 }
6609
6610 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6611 if (!smid) {
6612 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6613 return -EAGAIN;
6614 }
6615
6616 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6617 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6618 ioc->port_enable_cmds.smid = smid;
6619 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6620 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6621
6622 ioc->put_smid_default(ioc, smid);
6623 return 0;
6624}
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635static int
6636_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
6637{
6638
6639
6640
6641
6642
6643
6644 if (ioc->ir_firmware)
6645 return 1;
6646
6647
6648 if (!ioc->bios_pg3.BiosVersion)
6649 return 0;
6650
6651
6652
6653
6654
6655
6656
6657
6658 if ((ioc->bios_pg2.CurrentBootDeviceForm &
6659 MPI2_BIOSPAGE2_FORM_MASK) ==
6660 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6661
6662 (ioc->bios_pg2.ReqBootDeviceForm &
6663 MPI2_BIOSPAGE2_FORM_MASK) ==
6664 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6665
6666 (ioc->bios_pg2.ReqAltBootDeviceForm &
6667 MPI2_BIOSPAGE2_FORM_MASK) ==
6668 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6669 return 0;
6670
6671 return 1;
6672}
6673
6674
6675
6676
6677
6678
6679
6680
6681static void
6682_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6683{
6684 u32 desired_event;
6685
6686 if (event >= 128)
6687 return;
6688
6689 desired_event = (1 << (event % 32));
6690
6691 if (event < 32)
6692 ioc->event_masks[0] &= ~desired_event;
6693 else if (event < 64)
6694 ioc->event_masks[1] &= ~desired_event;
6695 else if (event < 96)
6696 ioc->event_masks[2] &= ~desired_event;
6697 else if (event < 128)
6698 ioc->event_masks[3] &= ~desired_event;
6699}
6700
6701
6702
6703
6704
6705
6706
6707static int
6708_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6709{
6710 Mpi2EventNotificationRequest_t *mpi_request;
6711 u16 smid;
6712 int r = 0;
6713 int i;
6714
6715 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6716
6717 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6718 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6719 return -EAGAIN;
6720 }
6721
6722 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6723 if (!smid) {
6724 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6725 return -EAGAIN;
6726 }
6727 ioc->base_cmds.status = MPT3_CMD_PENDING;
6728 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6729 ioc->base_cmds.smid = smid;
6730 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6731 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6732 mpi_request->VF_ID = 0;
6733 mpi_request->VP_ID = 0;
6734 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6735 mpi_request->EventMasks[i] =
6736 cpu_to_le32(ioc->event_masks[i]);
6737 init_completion(&ioc->base_cmds.done);
6738 ioc->put_smid_default(ioc, smid);
6739 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6740 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6741 ioc_err(ioc, "%s: timeout\n", __func__);
6742 _debug_dump_mf(mpi_request,
6743 sizeof(Mpi2EventNotificationRequest_t)/4);
6744 if (ioc->base_cmds.status & MPT3_CMD_RESET)
6745 r = -EFAULT;
6746 else
6747 r = -ETIME;
6748 } else
6749 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
6750 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6751 return r;
6752}
6753
6754
6755
6756
6757
6758
6759
6760
6761
6762void
6763mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6764{
6765 int i, j;
6766 u32 event_mask, desired_event;
6767 u8 send_update_to_fw;
6768
6769 for (i = 0, send_update_to_fw = 0; i <
6770 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6771 event_mask = ~event_type[i];
6772 desired_event = 1;
6773 for (j = 0; j < 32; j++) {
6774 if (!(event_mask & desired_event) &&
6775 (ioc->event_masks[i] & desired_event)) {
6776 ioc->event_masks[i] &= ~desired_event;
6777 send_update_to_fw = 1;
6778 }
6779 desired_event = (desired_event << 1);
6780 }
6781 }
6782
6783 if (!send_update_to_fw)
6784 return;
6785
6786 mutex_lock(&ioc->base_cmds.mutex);
6787 _base_event_notification(ioc);
6788 mutex_unlock(&ioc->base_cmds.mutex);
6789}
6790
6791
6792
6793
6794
6795
6796
6797static int
6798_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6799{
6800 u32 host_diagnostic;
6801 u32 ioc_state;
6802 u32 count;
6803 u32 hcb_size;
6804
6805 ioc_info(ioc, "sending diag reset !!\n");
6806
6807 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6808
6809 count = 0;
6810 do {
6811
6812
6813
6814 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6815 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6816 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6817 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6818 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6819 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6820 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6821 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6822
6823
6824 msleep(100);
6825
6826 if (count++ > 20) {
6827 ioc_info(ioc,
6828 "Stop writing magic sequence after 20 retries\n");
6829 _base_dump_reg_set(ioc);
6830 goto out;
6831 }
6832
6833 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6834 drsprintk(ioc,
6835 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6836 count, host_diagnostic));
6837
6838 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6839
6840 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6841
6842 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6843 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6844 &ioc->chip->HostDiagnostic);
6845
6846
6847 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6848
6849
6850 for (count = 0; count < (300000000 /
6851 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6852
6853 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6854
6855 if (host_diagnostic == 0xFFFFFFFF) {
6856 ioc_info(ioc,
6857 "Invalid host diagnostic register value\n");
6858 _base_dump_reg_set(ioc);
6859 goto out;
6860 }
6861 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6862 break;
6863
6864 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6865 }
6866
6867 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6868
6869 drsprintk(ioc,
6870 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6871 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6872 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6873 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6874
6875 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6876 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6877 &ioc->chip->HCBSize);
6878 }
6879
6880 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6881 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6882 &ioc->chip->HostDiagnostic);
6883
6884 drsprintk(ioc,
6885 ioc_info(ioc, "disable writes to the diagnostic register\n"));
6886 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6887
6888 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6889 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6890 if (ioc_state) {
6891 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6892 __func__, ioc_state);
6893 _base_dump_reg_set(ioc);
6894 goto out;
6895 }
6896
6897 ioc_info(ioc, "diag reset: SUCCESS\n");
6898 return 0;
6899
6900 out:
6901 ioc_err(ioc, "diag reset: FAILED\n");
6902 return -EFAULT;
6903}
6904
6905
6906
6907
6908
6909
6910
6911
6912static int
6913_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6914{
6915 u32 ioc_state;
6916 int rc;
6917 int count;
6918
6919 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6920
6921 if (ioc->pci_error_recovery)
6922 return 0;
6923
6924 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6925 dhsprintk(ioc,
6926 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6927 __func__, ioc_state));
6928
6929
6930 count = 0;
6931 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6932 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6933 MPI2_IOC_STATE_READY) {
6934 if (count++ == 10) {
6935 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6936 __func__, ioc_state);
6937 return -EFAULT;
6938 }
6939 ssleep(1);
6940 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6941 }
6942 }
6943
6944 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6945 return 0;
6946
6947 if (ioc_state & MPI2_DOORBELL_USED) {
6948 ioc_info(ioc, "unexpected doorbell active!\n");
6949 goto issue_diag_reset;
6950 }
6951
6952 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6953 mpt3sas_print_fault_code(ioc, ioc_state &
6954 MPI2_DOORBELL_DATA_MASK);
6955 goto issue_diag_reset;
6956 }
6957
6958 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
6959
6960
6961
6962
6963
6964
6965
6966 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
6967 mpt3sas_print_coredump_info(ioc, ioc_state &
6968 MPI2_DOORBELL_DATA_MASK);
6969 mpt3sas_base_wait_for_coredump_completion(ioc,
6970 __func__);
6971 }
6972 goto issue_diag_reset;
6973 }
6974
6975 if (type == FORCE_BIG_HAMMER)
6976 goto issue_diag_reset;
6977
6978 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6979 if (!(_base_send_ioc_reset(ioc,
6980 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6981 return 0;
6982 }
6983
6984 issue_diag_reset:
6985 rc = _base_diag_reset(ioc);
6986 return rc;
6987}
6988
6989
6990
6991
6992
6993
6994
6995static int
6996_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6997{
6998 int r, i, index, rc;
6999 unsigned long flags;
7000 u32 reply_address;
7001 u16 smid;
7002 struct _tr_list *delayed_tr, *delayed_tr_next;
7003 struct _sc_list *delayed_sc, *delayed_sc_next;
7004 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7005 u8 hide_flag;
7006 struct adapter_reply_queue *reply_q;
7007 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
7008
7009 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7010
7011
7012 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7013 &ioc->delayed_tr_list, list) {
7014 list_del(&delayed_tr->list);
7015 kfree(delayed_tr);
7016 }
7017
7018
7019 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7020 &ioc->delayed_tr_volume_list, list) {
7021 list_del(&delayed_tr->list);
7022 kfree(delayed_tr);
7023 }
7024
7025 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
7026 &ioc->delayed_sc_list, list) {
7027 list_del(&delayed_sc->list);
7028 kfree(delayed_sc);
7029 }
7030
7031 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
7032 &ioc->delayed_event_ack_list, list) {
7033 list_del(&delayed_event_ack->list);
7034 kfree(delayed_event_ack);
7035 }
7036
7037 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7038
7039
7040 INIT_LIST_HEAD(&ioc->hpr_free_list);
7041 smid = ioc->hi_priority_smid;
7042 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
7043 ioc->hpr_lookup[i].cb_idx = 0xFF;
7044 ioc->hpr_lookup[i].smid = smid;
7045 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
7046 &ioc->hpr_free_list);
7047 }
7048
7049
7050 INIT_LIST_HEAD(&ioc->internal_free_list);
7051 smid = ioc->internal_smid;
7052 for (i = 0; i < ioc->internal_depth; i++, smid++) {
7053 ioc->internal_lookup[i].cb_idx = 0xFF;
7054 ioc->internal_lookup[i].smid = smid;
7055 list_add_tail(&ioc->internal_lookup[i].tracker_list,
7056 &ioc->internal_free_list);
7057 }
7058
7059 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7060
7061
7062 for (i = 0, reply_address = (u32)ioc->reply_dma ;
7063 i < ioc->reply_free_queue_depth ; i++, reply_address +=
7064 ioc->reply_sz) {
7065 ioc->reply_free[i] = cpu_to_le32(reply_address);
7066 if (ioc->is_mcpu_endpoint)
7067 _base_clone_reply_to_sys_mem(ioc,
7068 reply_address, i);
7069 }
7070
7071
7072 if (ioc->is_driver_loading)
7073 _base_assign_reply_queues(ioc);
7074
7075
7076 index = 0;
7077 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
7078 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7079
7080
7081
7082
7083 if (ioc->rdpq_array_enable) {
7084 reply_q->reply_post_free =
7085 ioc->reply_post[index++].reply_post_free;
7086 } else {
7087 reply_q->reply_post_free = reply_post_free_contig;
7088 reply_post_free_contig += ioc->reply_post_queue_depth;
7089 }
7090
7091 reply_q->reply_post_host_index = 0;
7092 for (i = 0; i < ioc->reply_post_queue_depth; i++)
7093 reply_q->reply_post_free[i].Words =
7094 cpu_to_le64(ULLONG_MAX);
7095 if (!_base_is_controller_msix_enabled(ioc))
7096 goto skip_init_reply_post_free_queue;
7097 }
7098 skip_init_reply_post_free_queue:
7099
7100 r = _base_send_ioc_init(ioc);
7101 if (r) {
7102
7103
7104
7105
7106
7107 if (!ioc->is_driver_loading)
7108 return r;
7109
7110 rc = _base_check_for_fault_and_issue_reset(ioc);
7111 if (rc || (_base_send_ioc_init(ioc)))
7112 return r;
7113 }
7114
7115
7116 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
7117 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
7118
7119
7120 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7121 if (ioc->combined_reply_queue)
7122 writel((reply_q->msix_index & 7)<<
7123 MPI2_RPHI_MSIX_INDEX_SHIFT,
7124 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
7125 else
7126 writel(reply_q->msix_index <<
7127 MPI2_RPHI_MSIX_INDEX_SHIFT,
7128 &ioc->chip->ReplyPostHostIndex);
7129
7130 if (!_base_is_controller_msix_enabled(ioc))
7131 goto skip_init_reply_post_host_index;
7132 }
7133
7134 skip_init_reply_post_host_index:
7135
7136 mpt3sas_base_unmask_interrupts(ioc);
7137
7138 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7139 r = _base_display_fwpkg_version(ioc);
7140 if (r)
7141 return r;
7142 }
7143
7144 _base_static_config_pages(ioc);
7145 r = _base_event_notification(ioc);
7146 if (r)
7147 return r;
7148
7149 if (ioc->is_driver_loading) {
7150
7151 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
7152 == 0x80) {
7153 hide_flag = (u8) (
7154 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
7155 MFG_PAGE10_HIDE_SSDS_MASK);
7156 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
7157 ioc->mfg_pg10_hide_flag = hide_flag;
7158 }
7159
7160 ioc->wait_for_discovery_to_complete =
7161 _base_determine_wait_on_discovery(ioc);
7162
7163 return r;
7164 }
7165
7166 r = _base_send_port_enable(ioc);
7167 if (r)
7168 return r;
7169
7170 return r;
7171}
7172
7173
7174
7175
7176
7177void
7178mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
7179{
7180 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7181
7182
7183 mutex_lock(&ioc->pci_access_mutex);
7184 if (ioc->chip_phys && ioc->chip) {
7185 mpt3sas_base_mask_interrupts(ioc);
7186 ioc->shost_recovery = 1;
7187 _base_make_ioc_ready(ioc, SOFT_RESET);
7188 ioc->shost_recovery = 0;
7189 }
7190
7191 mpt3sas_base_unmap_resources(ioc);
7192 mutex_unlock(&ioc->pci_access_mutex);
7193 return;
7194}
7195
7196
7197
7198
7199
7200
7201
7202int
7203mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7204{
7205 int r, i, rc;
7206 int cpu_id, last_cpu_id = 0;
7207
7208 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7209
7210
7211 ioc->cpu_count = num_online_cpus();
7212 for_each_online_cpu(cpu_id)
7213 last_cpu_id = cpu_id;
7214 ioc->cpu_msix_table_sz = last_cpu_id + 1;
7215 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
7216 ioc->reply_queue_count = 1;
7217 if (!ioc->cpu_msix_table) {
7218 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
7219 r = -ENOMEM;
7220 goto out_free_resources;
7221 }
7222
7223 if (ioc->is_warpdrive) {
7224 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
7225 sizeof(resource_size_t *), GFP_KERNEL);
7226 if (!ioc->reply_post_host_index) {
7227 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
7228 r = -ENOMEM;
7229 goto out_free_resources;
7230 }
7231 }
7232
7233 ioc->smp_affinity_enable = smp_affinity_enable;
7234
7235 ioc->rdpq_array_enable_assigned = 0;
7236 ioc->use_32bit_dma = false;
7237 if (ioc->is_aero_ioc)
7238 ioc->base_readl = &_base_readl_aero;
7239 else
7240 ioc->base_readl = &_base_readl;
7241 r = mpt3sas_base_map_resources(ioc);
7242 if (r)
7243 goto out_free_resources;
7244
7245 pci_set_drvdata(ioc->pdev, ioc->shost);
7246 r = _base_get_ioc_facts(ioc);
7247 if (r) {
7248 rc = _base_check_for_fault_and_issue_reset(ioc);
7249 if (rc || (_base_get_ioc_facts(ioc)))
7250 goto out_free_resources;
7251 }
7252
7253 switch (ioc->hba_mpi_version_belonged) {
7254 case MPI2_VERSION:
7255 ioc->build_sg_scmd = &_base_build_sg_scmd;
7256 ioc->build_sg = &_base_build_sg;
7257 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
7258 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7259 break;
7260 case MPI25_VERSION:
7261 case MPI26_VERSION:
7262
7263
7264
7265
7266
7267
7268 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7269 ioc->build_sg = &_base_build_sg_ieee;
7270 ioc->build_nvme_prp = &_base_build_nvme_prp;
7271 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7272 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7273 if (ioc->high_iops_queues)
7274 ioc->get_msix_index_for_smlio =
7275 &_base_get_high_iops_msix_index;
7276 else
7277 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7278 break;
7279 }
7280 if (ioc->atomic_desc_capable) {
7281 ioc->put_smid_default = &_base_put_smid_default_atomic;
7282 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7283 ioc->put_smid_fast_path =
7284 &_base_put_smid_fast_path_atomic;
7285 ioc->put_smid_hi_priority =
7286 &_base_put_smid_hi_priority_atomic;
7287 } else {
7288 ioc->put_smid_default = &_base_put_smid_default;
7289 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
7290 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
7291 if (ioc->is_mcpu_endpoint)
7292 ioc->put_smid_scsi_io =
7293 &_base_put_smid_mpi_ep_scsi_io;
7294 else
7295 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
7296 }
7297
7298
7299
7300
7301
7302
7303 ioc->build_sg_mpi = &_base_build_sg;
7304 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
7305
7306 r = _base_make_ioc_ready(ioc, SOFT_RESET);
7307 if (r)
7308 goto out_free_resources;
7309
7310 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
7311 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
7312 if (!ioc->pfacts) {
7313 r = -ENOMEM;
7314 goto out_free_resources;
7315 }
7316
7317 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
7318 r = _base_get_port_facts(ioc, i);
7319 if (r) {
7320 rc = _base_check_for_fault_and_issue_reset(ioc);
7321 if (rc || (_base_get_port_facts(ioc, i)))
7322 goto out_free_resources;
7323 }
7324 }
7325
7326 r = _base_allocate_memory_pools(ioc);
7327 if (r)
7328 goto out_free_resources;
7329
7330 if (irqpoll_weight > 0)
7331 ioc->thresh_hold = irqpoll_weight;
7332 else
7333 ioc->thresh_hold = ioc->hba_queue_depth/4;
7334
7335 _base_init_irqpolls(ioc);
7336 init_waitqueue_head(&ioc->reset_wq);
7337
7338
7339 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7340 if (ioc->facts.MaxDevHandle % 8)
7341 ioc->pd_handles_sz++;
7342 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
7343 GFP_KERNEL);
7344 if (!ioc->pd_handles) {
7345 r = -ENOMEM;
7346 goto out_free_resources;
7347 }
7348 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
7349 GFP_KERNEL);
7350 if (!ioc->blocking_handles) {
7351 r = -ENOMEM;
7352 goto out_free_resources;
7353 }
7354
7355
7356 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
7357 if (ioc->facts.MaxDevHandle % 8)
7358 ioc->pend_os_device_add_sz++;
7359 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
7360 GFP_KERNEL);
7361 if (!ioc->pend_os_device_add)
7362 goto out_free_resources;
7363
7364 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
7365 ioc->device_remove_in_progress =
7366 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
7367 if (!ioc->device_remove_in_progress)
7368 goto out_free_resources;
7369
7370 ioc->fwfault_debug = mpt3sas_fwfault_debug;
7371
7372
7373 mutex_init(&ioc->base_cmds.mutex);
7374 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7375 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7376
7377
7378 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7379 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7380
7381
7382 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7383 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
7384 mutex_init(&ioc->transport_cmds.mutex);
7385
7386
7387 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7388 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7389 mutex_init(&ioc->scsih_cmds.mutex);
7390
7391
7392 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7393 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
7394 mutex_init(&ioc->tm_cmds.mutex);
7395
7396
7397 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7398 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
7399 mutex_init(&ioc->config_cmds.mutex);
7400
7401
7402 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7403 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
7404 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
7405 mutex_init(&ioc->ctl_cmds.mutex);
7406
7407 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
7408 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
7409 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
7410 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
7411 r = -ENOMEM;
7412 goto out_free_resources;
7413 }
7414
7415 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7416 ioc->event_masks[i] = -1;
7417
7418
7419 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
7420 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
7421 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
7422 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
7423 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
7424 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
7425 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
7426 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
7427 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
7428 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
7429 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
7430 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
7431 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
7432 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
7433 if (ioc->is_gen35_ioc) {
7434 _base_unmask_events(ioc,
7435 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
7436 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
7437 _base_unmask_events(ioc,
7438 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
7439 }
7440 }
7441 r = _base_make_ioc_operational(ioc);
7442 if (r)
7443 goto out_free_resources;
7444
7445
7446
7447
7448
7449 memcpy(&ioc->prev_fw_facts, &ioc->facts,
7450 sizeof(struct mpt3sas_facts));
7451
7452 ioc->non_operational_loop = 0;
7453 ioc->ioc_coredump_loop = 0;
7454 ioc->got_task_abort_from_ioctl = 0;
7455 return 0;
7456
7457 out_free_resources:
7458
7459 ioc->remove_host = 1;
7460
7461 mpt3sas_base_free_resources(ioc);
7462 _base_release_memory_pools(ioc);
7463 pci_set_drvdata(ioc->pdev, NULL);
7464 kfree(ioc->cpu_msix_table);
7465 if (ioc->is_warpdrive)
7466 kfree(ioc->reply_post_host_index);
7467 kfree(ioc->pd_handles);
7468 kfree(ioc->blocking_handles);
7469 kfree(ioc->device_remove_in_progress);
7470 kfree(ioc->pend_os_device_add);
7471 kfree(ioc->tm_cmds.reply);
7472 kfree(ioc->transport_cmds.reply);
7473 kfree(ioc->scsih_cmds.reply);
7474 kfree(ioc->config_cmds.reply);
7475 kfree(ioc->base_cmds.reply);
7476 kfree(ioc->port_enable_cmds.reply);
7477 kfree(ioc->ctl_cmds.reply);
7478 kfree(ioc->ctl_cmds.sense);
7479 kfree(ioc->pfacts);
7480 ioc->ctl_cmds.reply = NULL;
7481 ioc->base_cmds.reply = NULL;
7482 ioc->tm_cmds.reply = NULL;
7483 ioc->scsih_cmds.reply = NULL;
7484 ioc->transport_cmds.reply = NULL;
7485 ioc->config_cmds.reply = NULL;
7486 ioc->pfacts = NULL;
7487 return r;
7488}
7489
7490
7491
7492
7493
7494
7495void
7496mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
7497{
7498 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7499
7500 mpt3sas_base_stop_watchdog(ioc);
7501 mpt3sas_base_free_resources(ioc);
7502 _base_release_memory_pools(ioc);
7503 mpt3sas_free_enclosure_list(ioc);
7504 pci_set_drvdata(ioc->pdev, NULL);
7505 kfree(ioc->cpu_msix_table);
7506 if (ioc->is_warpdrive)
7507 kfree(ioc->reply_post_host_index);
7508 kfree(ioc->pd_handles);
7509 kfree(ioc->blocking_handles);
7510 kfree(ioc->device_remove_in_progress);
7511 kfree(ioc->pend_os_device_add);
7512 kfree(ioc->pfacts);
7513 kfree(ioc->ctl_cmds.reply);
7514 kfree(ioc->ctl_cmds.sense);
7515 kfree(ioc->base_cmds.reply);
7516 kfree(ioc->port_enable_cmds.reply);
7517 kfree(ioc->tm_cmds.reply);
7518 kfree(ioc->transport_cmds.reply);
7519 kfree(ioc->scsih_cmds.reply);
7520 kfree(ioc->config_cmds.reply);
7521}
7522
7523
7524
7525
7526
7527static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
7528{
7529 mpt3sas_scsih_pre_reset_handler(ioc);
7530 mpt3sas_ctl_pre_reset_handler(ioc);
7531 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
7532}
7533
7534
7535
7536
7537
7538static void
7539_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
7540{
7541 dtmprintk(ioc,
7542 ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
7543 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
7544 ioc->transport_cmds.status |= MPT3_CMD_RESET;
7545 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
7546 complete(&ioc->transport_cmds.done);
7547 }
7548 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7549 ioc->base_cmds.status |= MPT3_CMD_RESET;
7550 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
7551 complete(&ioc->base_cmds.done);
7552 }
7553 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7554 ioc->port_enable_failed = 1;
7555 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
7556 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
7557 if (ioc->is_driver_loading) {
7558 ioc->start_scan_failed =
7559 MPI2_IOCSTATUS_INTERNAL_ERROR;
7560 ioc->start_scan = 0;
7561 ioc->port_enable_cmds.status =
7562 MPT3_CMD_NOT_USED;
7563 } else {
7564 complete(&ioc->port_enable_cmds.done);
7565 }
7566 }
7567 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
7568 ioc->config_cmds.status |= MPT3_CMD_RESET;
7569 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
7570 ioc->config_cmds.smid = USHRT_MAX;
7571 complete(&ioc->config_cmds.done);
7572 }
7573}
7574
7575
7576
7577
7578
7579static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
7580{
7581 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
7582 mpt3sas_ctl_clear_outstanding_ioctls(ioc);
7583 _base_clear_outstanding_mpt_commands(ioc);
7584}
7585
7586
7587
7588
7589
7590static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
7591{
7592 mpt3sas_scsih_reset_done_handler(ioc);
7593 mpt3sas_ctl_reset_done_handler(ioc);
7594 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
7595}
7596
7597
7598
7599
7600
7601
7602
7603
7604void
7605mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
7606{
7607 u32 ioc_state;
7608
7609 ioc->pending_io_count = 0;
7610
7611 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7612 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
7613 return;
7614
7615
7616 ioc->pending_io_count = scsi_host_busy(ioc->shost);
7617
7618 if (!ioc->pending_io_count)
7619 return;
7620
7621
7622 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
7623}
7624
7625
7626
7627
7628
7629
7630
7631
7632static int
7633_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
7634{
7635 u16 pd_handles_sz;
7636 void *pd_handles = NULL, *blocking_handles = NULL;
7637 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
7638 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
7639
7640 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
7641 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7642 if (ioc->facts.MaxDevHandle % 8)
7643 pd_handles_sz++;
7644
7645 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
7646 GFP_KERNEL);
7647 if (!pd_handles) {
7648 ioc_info(ioc,
7649 "Unable to allocate the memory for pd_handles of sz: %d\n",
7650 pd_handles_sz);
7651 return -ENOMEM;
7652 }
7653 memset(pd_handles + ioc->pd_handles_sz, 0,
7654 (pd_handles_sz - ioc->pd_handles_sz));
7655 ioc->pd_handles = pd_handles;
7656
7657 blocking_handles = krealloc(ioc->blocking_handles,
7658 pd_handles_sz, GFP_KERNEL);
7659 if (!blocking_handles) {
7660 ioc_info(ioc,
7661 "Unable to allocate the memory for "
7662 "blocking_handles of sz: %d\n",
7663 pd_handles_sz);
7664 return -ENOMEM;
7665 }
7666 memset(blocking_handles + ioc->pd_handles_sz, 0,
7667 (pd_handles_sz - ioc->pd_handles_sz));
7668 ioc->blocking_handles = blocking_handles;
7669 ioc->pd_handles_sz = pd_handles_sz;
7670
7671 pend_os_device_add = krealloc(ioc->pend_os_device_add,
7672 pd_handles_sz, GFP_KERNEL);
7673 if (!pend_os_device_add) {
7674 ioc_info(ioc,
7675 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
7676 pd_handles_sz);
7677 return -ENOMEM;
7678 }
7679 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
7680 (pd_handles_sz - ioc->pend_os_device_add_sz));
7681 ioc->pend_os_device_add = pend_os_device_add;
7682 ioc->pend_os_device_add_sz = pd_handles_sz;
7683
7684 device_remove_in_progress = krealloc(
7685 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
7686 if (!device_remove_in_progress) {
7687 ioc_info(ioc,
7688 "Unable to allocate the memory for "
7689 "device_remove_in_progress of sz: %d\n "
7690 , pd_handles_sz);
7691 return -ENOMEM;
7692 }
7693 memset(device_remove_in_progress +
7694 ioc->device_remove_in_progress_sz, 0,
7695 (pd_handles_sz - ioc->device_remove_in_progress_sz));
7696 ioc->device_remove_in_progress = device_remove_in_progress;
7697 ioc->device_remove_in_progress_sz = pd_handles_sz;
7698 }
7699
7700 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
7701 return 0;
7702}
7703
7704
7705
7706
7707
7708
7709
7710
7711int
7712mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
7713 enum reset_type type)
7714{
7715 int r;
7716 unsigned long flags;
7717 u32 ioc_state;
7718 u8 is_fault = 0, is_trigger = 0;
7719
7720 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
7721
7722 if (ioc->pci_error_recovery) {
7723 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
7724 r = 0;
7725 goto out_unlocked;
7726 }
7727
7728 if (mpt3sas_fwfault_debug)
7729 mpt3sas_halt_firmware(ioc);
7730
7731
7732 mutex_lock(&ioc->reset_in_progress_mutex);
7733
7734 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7735 ioc->shost_recovery = 1;
7736 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7737
7738 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7739 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
7740 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7741 MPT3_DIAG_BUFFER_IS_RELEASED))) {
7742 is_trigger = 1;
7743 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7744 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
7745 (ioc_state & MPI2_IOC_STATE_MASK) ==
7746 MPI2_IOC_STATE_COREDUMP)
7747 is_fault = 1;
7748 }
7749 _base_pre_reset_handler(ioc);
7750 mpt3sas_wait_for_commands_to_complete(ioc);
7751 mpt3sas_base_mask_interrupts(ioc);
7752 r = _base_make_ioc_ready(ioc, type);
7753 if (r)
7754 goto out;
7755 _base_clear_outstanding_commands(ioc);
7756
7757
7758
7759
7760 if (ioc->is_driver_loading && ioc->port_enable_failed) {
7761 ioc->remove_host = 1;
7762 r = -EFAULT;
7763 goto out;
7764 }
7765 r = _base_get_ioc_facts(ioc);
7766 if (r)
7767 goto out;
7768
7769 r = _base_check_ioc_facts_changes(ioc);
7770 if (r) {
7771 ioc_info(ioc,
7772 "Some of the parameters got changed in this new firmware"
7773 " image and it requires system reboot\n");
7774 goto out;
7775 }
7776 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
7777 panic("%s: Issue occurred with flashing controller firmware."
7778 "Please reboot the system and ensure that the correct"
7779 " firmware version is running\n", ioc->name);
7780
7781 r = _base_make_ioc_operational(ioc);
7782 if (!r)
7783 _base_reset_done_handler(ioc);
7784
7785 out:
7786 ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
7787
7788 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7789 ioc->shost_recovery = 0;
7790 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7791 ioc->ioc_reset_count++;
7792 mutex_unlock(&ioc->reset_in_progress_mutex);
7793
7794 out_unlocked:
7795 if ((r == 0) && is_trigger) {
7796 if (is_fault)
7797 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
7798 else
7799 mpt3sas_trigger_master(ioc,
7800 MASTER_TRIGGER_ADAPTER_RESET);
7801 }
7802 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
7803 return r;
7804}
7805