1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/io.h>
59#include <linux/time.h>
60#include <linux/ktime.h>
61#include <linux/kthread.h>
62#include <asm/page.h>
63#include <linux/aer.h>
64
65
66#include "mpt3sas_base.h"
67
68static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
69
70
71#define FAULT_POLLING_INTERVAL 1000
72
73
74#define MAX_HBA_QUEUE_DEPTH 30000
75#define MAX_CHAIN_DEPTH 100000
76static int max_queue_depth = -1;
77module_param(max_queue_depth, int, 0444);
78MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79
80static int max_sgl_entries = -1;
81module_param(max_sgl_entries, int, 0444);
82MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83
84static int msix_disable = -1;
85module_param(msix_disable, int, 0444);
86MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87
88static int smp_affinity_enable = 1;
89module_param(smp_affinity_enable, int, 0444);
90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91
92static int max_msix_vectors = -1;
93module_param(max_msix_vectors, int, 0444);
94MODULE_PARM_DESC(max_msix_vectors,
95 " max msix vectors");
96
97static int irqpoll_weight = -1;
98module_param(irqpoll_weight, int, 0444);
99MODULE_PARM_DESC(irqpoll_weight,
100 "irq poll weight (default= one fourth of HBA queue depth)");
101
102static int mpt3sas_fwfault_debug;
103MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104 " enable detection of firmware fault and halt firmware - (default=0)");
105
106static int perf_mode = -1;
107module_param(perf_mode, int, 0444);
108MODULE_PARM_DESC(perf_mode,
109 "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
110 "0 - balanced: high iops mode is enabled &\n\t\t"
111 "interrupt coalescing is enabled only on high iops queues,\n\t\t"
112 "1 - iops: high iops mode is disabled &\n\t\t"
113 "interrupt coalescing is enabled on all queues,\n\t\t"
114 "2 - latency: high iops mode is disabled &\n\t\t"
115 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
116 "\t\tdefault - default perf_mode is 'balanced'"
117 );
118
119enum mpt3sas_perf_mode {
120 MPT_PERF_MODE_DEFAULT = -1,
121 MPT_PERF_MODE_BALANCED = 0,
122 MPT_PERF_MODE_IOPS = 1,
123 MPT_PERF_MODE_LATENCY = 2,
124};
125
126static int
127_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
128 u32 ioc_state, int timeout);
129static int
130_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
131static void
132_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
133
134
135
136
137
138
139
140
141
142
143
144
145
146u8
147mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
148 u8 status, void *mpi_request, int sz)
149{
150 u8 issue_reset = 0;
151
152 if (!(status & MPT3_CMD_RESET))
153 issue_reset = 1;
154
155 ioc_err(ioc, "Command %s\n",
156 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
157 _debug_dump_mf(mpi_request, sz);
158
159 return issue_reset;
160}
161
162
163
164
165
166
167
168
169static int
170_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
171{
172 int ret = param_set_int(val, kp);
173 struct MPT3SAS_ADAPTER *ioc;
174
175 if (ret)
176 return ret;
177
178
179 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
180 spin_lock(&gioc_lock);
181 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
182 ioc->fwfault_debug = mpt3sas_fwfault_debug;
183 spin_unlock(&gioc_lock);
184 return 0;
185}
186module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
187 param_get_int, &mpt3sas_fwfault_debug, 0644);
188
189
190
191
192
193
194
195
196static inline u32
197_base_readl_aero(const volatile void __iomem *addr)
198{
199 u32 i = 0, ret_val;
200
201 do {
202 ret_val = readl(addr);
203 i++;
204 } while (ret_val == 0 && i < 3);
205
206 return ret_val;
207}
208
209static inline u32
210_base_readl(const volatile void __iomem *addr)
211{
212 return readl(addr);
213}
214
215
216
217
218
219
220
221
222
223static void
224_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
225 u32 index)
226{
227
228
229
230
231
232 u16 cmd_credit = ioc->facts.RequestCredit + 1;
233 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
234 MPI_FRAME_START_OFFSET +
235 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
236
237 writel(reply, reply_free_iomem);
238}
239
240
241
242
243
244
245
246
247
248static void
249_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
250{
251 int i;
252 u32 *src_virt_mem = (u32 *)src;
253
254 for (i = 0; i < size/4; i++)
255 writel((u32)src_virt_mem[i],
256 (void __iomem *)dst_iomem + (i * 4));
257}
258
259
260
261
262
263
264
265
266static void
267_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
268{
269 int i;
270 u32 *src_virt_mem = (u32 *)(src);
271
272 for (i = 0; i < size/4; i++)
273 writel((u32)src_virt_mem[i],
274 (void __iomem *)dst_iomem + (i * 4));
275}
276
277
278
279
280
281
282
283
284
285
286
287static inline void __iomem*
288_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
289 u8 sge_chain_count)
290{
291 void __iomem *base_chain, *chain_virt;
292 u16 cmd_credit = ioc->facts.RequestCredit + 1;
293
294 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
295 (cmd_credit * ioc->request_sz) +
296 REPLY_FREE_POOL_SIZE;
297 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
298 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
299 return chain_virt;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313static inline phys_addr_t
314_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
315 u8 sge_chain_count)
316{
317 phys_addr_t base_chain_phys, chain_phys;
318 u16 cmd_credit = ioc->facts.RequestCredit + 1;
319
320 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
321 (cmd_credit * ioc->request_sz) +
322 REPLY_FREE_POOL_SIZE;
323 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
324 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
325 return chain_phys;
326}
327
328
329
330
331
332
333
334
335
336
337
338
339static void __iomem *
340_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
341{
342 u16 cmd_credit = ioc->facts.RequestCredit + 1;
343
344 void __iomem *chain_end = _base_get_chain(ioc,
345 cmd_credit + 1,
346 ioc->facts.MaxChainDepth);
347 return chain_end + (smid * 64 * 1024);
348}
349
350
351
352
353
354
355
356
357
358
359
360static phys_addr_t
361_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
362{
363 u16 cmd_credit = ioc->facts.RequestCredit + 1;
364 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
365 cmd_credit + 1,
366 ioc->facts.MaxChainDepth);
367 return chain_end_phys + (smid * 64 * 1024);
368}
369
370
371
372
373
374
375
376
377
378
379
380
381static void *
382_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
383 dma_addr_t chain_buffer_dma)
384{
385 u16 index, j;
386 struct chain_tracker *ct;
387
388 for (index = 0; index < ioc->scsiio_depth; index++) {
389 for (j = 0; j < ioc->chains_needed_per_io; j++) {
390 ct = &ioc->chain_lookup[index].chains_per_smid[j];
391 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
392 return ct->chain_buffer;
393 }
394 }
395 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
396 return NULL;
397}
398
399
400
401
402
403
404
405
406
407
408
409static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
410 void *mpi_request, u16 smid)
411{
412 Mpi2SGESimple32_t *sgel, *sgel_next;
413 u32 sgl_flags, sge_chain_count = 0;
414 bool is_write = false;
415 u16 i = 0;
416 void __iomem *buffer_iomem;
417 phys_addr_t buffer_iomem_phys;
418 void __iomem *buff_ptr;
419 phys_addr_t buff_ptr_phys;
420 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
421 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
422 phys_addr_t dst_addr_phys;
423 MPI2RequestHeader_t *request_hdr;
424 struct scsi_cmnd *scmd;
425 struct scatterlist *sg_scmd = NULL;
426 int is_scsiio_req = 0;
427
428 request_hdr = (MPI2RequestHeader_t *) mpi_request;
429
430 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
431 Mpi25SCSIIORequest_t *scsiio_request =
432 (Mpi25SCSIIORequest_t *)mpi_request;
433 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
434 is_scsiio_req = 1;
435 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
436 Mpi2ConfigRequest_t *config_req =
437 (Mpi2ConfigRequest_t *)mpi_request;
438 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
439 } else
440 return;
441
442
443
444
445
446
447 if (is_scsiio_req) {
448
449 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
450 if (scmd == NULL) {
451 ioc_err(ioc, "scmd is NULL\n");
452 return;
453 }
454
455
456 sg_scmd = scsi_sglist(scmd);
457 }
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
475 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
476
477 buff_ptr = buffer_iomem;
478 buff_ptr_phys = buffer_iomem_phys;
479 WARN_ON(buff_ptr_phys > U32_MAX);
480
481 if (le32_to_cpu(sgel->FlagsLength) &
482 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
483 is_write = true;
484
485 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
486
487 sgl_flags =
488 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
489
490 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
491 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
492
493
494
495
496
497 sgel_next =
498 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
499 le32_to_cpu(sgel->Address));
500 if (sgel_next == NULL)
501 return;
502
503
504
505
506 dst_chain_addr[sge_chain_count] =
507 _base_get_chain(ioc,
508 smid, sge_chain_count);
509 src_chain_addr[sge_chain_count] =
510 (void *) sgel_next;
511 dst_addr_phys = _base_get_chain_phys(ioc,
512 smid, sge_chain_count);
513 WARN_ON(dst_addr_phys > U32_MAX);
514 sgel->Address =
515 cpu_to_le32(lower_32_bits(dst_addr_phys));
516 sgel = sgel_next;
517 sge_chain_count++;
518 break;
519 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
520 if (is_write) {
521 if (is_scsiio_req) {
522 _base_clone_to_sys_mem(buff_ptr,
523 sg_virt(sg_scmd),
524 (le32_to_cpu(sgel->FlagsLength) &
525 0x00ffffff));
526
527
528
529
530 sgel->Address =
531 cpu_to_le32((u32)buff_ptr_phys);
532 } else {
533 _base_clone_to_sys_mem(buff_ptr,
534 ioc->config_vaddr,
535 (le32_to_cpu(sgel->FlagsLength) &
536 0x00ffffff));
537 sgel->Address =
538 cpu_to_le32((u32)buff_ptr_phys);
539 }
540 }
541 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
542 0x00ffffff);
543 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
544 0x00ffffff);
545 if ((le32_to_cpu(sgel->FlagsLength) &
546 (MPI2_SGE_FLAGS_END_OF_BUFFER
547 << MPI2_SGE_FLAGS_SHIFT)))
548 goto eob_clone_chain;
549 else {
550
551
552
553
554
555
556 if (is_scsiio_req) {
557 sg_scmd = sg_next(sg_scmd);
558 if (sg_scmd)
559 sgel++;
560 else
561 goto eob_clone_chain;
562 }
563 }
564 break;
565 }
566 }
567
568eob_clone_chain:
569 for (i = 0; i < sge_chain_count; i++) {
570 if (is_scsiio_req)
571 _base_clone_to_sys_mem(dst_chain_addr[i],
572 src_chain_addr[i], ioc->request_sz);
573 }
574}
575
576
577
578
579
580
581
582
583
584static int mpt3sas_remove_dead_ioc_func(void *arg)
585{
586 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
587 struct pci_dev *pdev;
588
589 if (!ioc)
590 return -1;
591
592 pdev = ioc->pdev;
593 if (!pdev)
594 return -1;
595 pci_stop_and_remove_bus_device_locked(pdev);
596 return 0;
597}
598
599
600
601
602
603
604
605static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
606{
607 Mpi26IoUnitControlRequest_t *mpi_request;
608 Mpi26IoUnitControlReply_t *mpi_reply;
609 u16 smid;
610 ktime_t current_time;
611 u64 TimeStamp = 0;
612 u8 issue_reset = 0;
613
614 mutex_lock(&ioc->scsih_cmds.mutex);
615 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
616 ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
617 goto out;
618 }
619 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
620 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
621 if (!smid) {
622 ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
623 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
624 goto out;
625 }
626 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
627 ioc->scsih_cmds.smid = smid;
628 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
629 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
630 mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
631 mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
632 current_time = ktime_get_real();
633 TimeStamp = ktime_to_ms(current_time);
634 mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
635 mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32);
636 init_completion(&ioc->scsih_cmds.done);
637 ioc->put_smid_default(ioc, smid);
638 dinitprintk(ioc, ioc_info(ioc,
639 "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
640 TimeStamp));
641 wait_for_completion_timeout(&ioc->scsih_cmds.done,
642 MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
643 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
644 mpt3sas_check_cmd_timeout(ioc,
645 ioc->scsih_cmds.status, mpi_request,
646 sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
647 goto issue_host_reset;
648 }
649 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
650 mpi_reply = ioc->scsih_cmds.reply;
651 dinitprintk(ioc, ioc_info(ioc,
652 "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
653 le16_to_cpu(mpi_reply->IOCStatus),
654 le32_to_cpu(mpi_reply->IOCLogInfo)));
655 }
656issue_host_reset:
657 if (issue_reset)
658 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
659 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
660out:
661 mutex_unlock(&ioc->scsih_cmds.mutex);
662}
663
664
665
666
667
668
669
670static void
671_base_fault_reset_work(struct work_struct *work)
672{
673 struct MPT3SAS_ADAPTER *ioc =
674 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
675 unsigned long flags;
676 u32 doorbell;
677 int rc;
678 struct task_struct *p;
679
680
681 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
682 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
683 ioc->pci_error_recovery)
684 goto rearm_timer;
685 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
686
687 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
688 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
689 ioc_err(ioc, "SAS host is non-operational !!!!\n");
690
691
692
693
694
695
696
697
698
699 if (ioc->non_operational_loop++ < 5) {
700 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
701 flags);
702 goto rearm_timer;
703 }
704
705
706
707
708
709
710
711
712 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
713
714
715
716
717 ioc->remove_host = 1;
718
719 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
720 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
721 if (IS_ERR(p))
722 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
723 __func__);
724 else
725 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
726 __func__);
727 return;
728 }
729
730 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
731 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
732 ioc->manu_pg11.CoreDumpTOSec :
733 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
734
735 timeout /= (FAULT_POLLING_INTERVAL/1000);
736
737 if (ioc->ioc_coredump_loop == 0) {
738 mpt3sas_print_coredump_info(ioc,
739 doorbell & MPI2_DOORBELL_DATA_MASK);
740
741 spin_lock_irqsave(
742 &ioc->ioc_reset_in_progress_lock, flags);
743 ioc->shost_recovery = 1;
744 spin_unlock_irqrestore(
745 &ioc->ioc_reset_in_progress_lock, flags);
746 mpt3sas_base_mask_interrupts(ioc);
747 _base_clear_outstanding_commands(ioc);
748 }
749
750 ioc_info(ioc, "%s: CoreDump loop %d.",
751 __func__, ioc->ioc_coredump_loop);
752
753
754 if (ioc->ioc_coredump_loop++ < timeout) {
755 spin_lock_irqsave(
756 &ioc->ioc_reset_in_progress_lock, flags);
757 goto rearm_timer;
758 }
759 }
760
761 if (ioc->ioc_coredump_loop) {
762 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
763 ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
764 __func__, ioc->ioc_coredump_loop);
765 else
766 ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
767 __func__, ioc->ioc_coredump_loop);
768 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
769 }
770 ioc->non_operational_loop = 0;
771 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
772 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
773 ioc_warn(ioc, "%s: hard reset: %s\n",
774 __func__, rc == 0 ? "success" : "failed");
775 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
776 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
777 mpt3sas_print_fault_code(ioc, doorbell &
778 MPI2_DOORBELL_DATA_MASK);
779 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
780 MPI2_IOC_STATE_COREDUMP)
781 mpt3sas_print_coredump_info(ioc, doorbell &
782 MPI2_DOORBELL_DATA_MASK);
783 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
784 MPI2_IOC_STATE_OPERATIONAL)
785 return;
786 }
787 ioc->ioc_coredump_loop = 0;
788 if (ioc->time_sync_interval &&
789 ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
790 ioc->timestamp_update_count = 0;
791 _base_sync_drv_fw_timestamp(ioc);
792 }
793 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
794 rearm_timer:
795 if (ioc->fault_reset_work_q)
796 queue_delayed_work(ioc->fault_reset_work_q,
797 &ioc->fault_reset_work,
798 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
799 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
800}
801
802
803
804
805
806
807
808void
809mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
810{
811 unsigned long flags;
812
813 if (ioc->fault_reset_work_q)
814 return;
815
816 ioc->timestamp_update_count = 0;
817
818
819 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
820 snprintf(ioc->fault_reset_work_q_name,
821 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
822 ioc->driver_name, ioc->id);
823 ioc->fault_reset_work_q =
824 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
825 if (!ioc->fault_reset_work_q) {
826 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
827 return;
828 }
829 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
830 if (ioc->fault_reset_work_q)
831 queue_delayed_work(ioc->fault_reset_work_q,
832 &ioc->fault_reset_work,
833 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
834 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
835}
836
837
838
839
840
841
842
843void
844mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
845{
846 unsigned long flags;
847 struct workqueue_struct *wq;
848
849 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
850 wq = ioc->fault_reset_work_q;
851 ioc->fault_reset_work_q = NULL;
852 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
853 if (wq) {
854 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
855 flush_workqueue(wq);
856 destroy_workqueue(wq);
857 }
858}
859
860
861
862
863
864
865void
866mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
867{
868 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
869}
870
871
872
873
874
875
876
877
878void
879mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
880{
881 ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
882}
883
884
885
886
887
888
889
890
891
892int
893mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
894 const char *caller)
895{
896 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
897 ioc->manu_pg11.CoreDumpTOSec :
898 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
899
900 int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
901 timeout);
902
903 if (ioc_state)
904 ioc_err(ioc,
905 "%s: CoreDump timed out. (ioc_state=0x%x)\n",
906 caller, ioc_state);
907 else
908 ioc_info(ioc,
909 "%s: CoreDump completed. (ioc_state=0x%x)\n",
910 caller, ioc_state);
911
912 return ioc_state;
913}
914
915
916
917
918
919
920
921
922
923
924void
925mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
926{
927 u32 doorbell;
928
929 if (!ioc->fwfault_debug)
930 return;
931
932 dump_stack();
933
934 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
935 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
936 mpt3sas_print_fault_code(ioc, doorbell &
937 MPI2_DOORBELL_DATA_MASK);
938 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
939 MPI2_IOC_STATE_COREDUMP) {
940 mpt3sas_print_coredump_info(ioc, doorbell &
941 MPI2_DOORBELL_DATA_MASK);
942 } else {
943 writel(0xC0FFEE00, &ioc->chip->Doorbell);
944 ioc_err(ioc, "Firmware is halted due to command timeout\n");
945 }
946
947 if (ioc->fwfault_debug == 2)
948 for (;;)
949 ;
950 else
951 panic("panic in %s\n", __func__);
952}
953
954
955
956
957
958
959
960static void
961_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
962 MPI2RequestHeader_t *request_hdr)
963{
964 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
965 MPI2_IOCSTATUS_MASK;
966 char *desc = NULL;
967 u16 frame_sz;
968 char *func_str = NULL;
969
970
971 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
972 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
973 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
974 return;
975
976 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
977 return;
978
979
980
981
982
983 if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
984 Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
985
986 if ((rqst->ExtPageType ==
987 MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
988 !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
989 return;
990 }
991 }
992
993 switch (ioc_status) {
994
995
996
997
998
999 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1000 desc = "invalid function";
1001 break;
1002 case MPI2_IOCSTATUS_BUSY:
1003 desc = "busy";
1004 break;
1005 case MPI2_IOCSTATUS_INVALID_SGL:
1006 desc = "invalid sgl";
1007 break;
1008 case MPI2_IOCSTATUS_INTERNAL_ERROR:
1009 desc = "internal error";
1010 break;
1011 case MPI2_IOCSTATUS_INVALID_VPID:
1012 desc = "invalid vpid";
1013 break;
1014 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
1015 desc = "insufficient resources";
1016 break;
1017 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
1018 desc = "insufficient power";
1019 break;
1020 case MPI2_IOCSTATUS_INVALID_FIELD:
1021 desc = "invalid field";
1022 break;
1023 case MPI2_IOCSTATUS_INVALID_STATE:
1024 desc = "invalid state";
1025 break;
1026 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
1027 desc = "op state not supported";
1028 break;
1029
1030
1031
1032
1033
1034 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
1035 desc = "config invalid action";
1036 break;
1037 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
1038 desc = "config invalid type";
1039 break;
1040 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
1041 desc = "config invalid page";
1042 break;
1043 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
1044 desc = "config invalid data";
1045 break;
1046 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
1047 desc = "config no defaults";
1048 break;
1049 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
1050 desc = "config cant commit";
1051 break;
1052
1053
1054
1055
1056
1057 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1058 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1059 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1060 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1061 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1062 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1063 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1064 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1065 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1066 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1067 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1068 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1069 break;
1070
1071
1072
1073
1074
1075 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1076 desc = "eedp guard error";
1077 break;
1078 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1079 desc = "eedp ref tag error";
1080 break;
1081 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1082 desc = "eedp app tag error";
1083 break;
1084
1085
1086
1087
1088
1089 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1090 desc = "target invalid io index";
1091 break;
1092 case MPI2_IOCSTATUS_TARGET_ABORTED:
1093 desc = "target aborted";
1094 break;
1095 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1096 desc = "target no conn retryable";
1097 break;
1098 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1099 desc = "target no connection";
1100 break;
1101 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1102 desc = "target xfer count mismatch";
1103 break;
1104 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1105 desc = "target data offset error";
1106 break;
1107 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1108 desc = "target too much write data";
1109 break;
1110 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1111 desc = "target iu too short";
1112 break;
1113 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1114 desc = "target ack nak timeout";
1115 break;
1116 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1117 desc = "target nak received";
1118 break;
1119
1120
1121
1122
1123
1124 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1125 desc = "smp request failed";
1126 break;
1127 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1128 desc = "smp data overrun";
1129 break;
1130
1131
1132
1133
1134
1135 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1136 desc = "diagnostic released";
1137 break;
1138 default:
1139 break;
1140 }
1141
1142 if (!desc)
1143 return;
1144
1145 switch (request_hdr->Function) {
1146 case MPI2_FUNCTION_CONFIG:
1147 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1148 func_str = "config_page";
1149 break;
1150 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1151 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1152 func_str = "task_mgmt";
1153 break;
1154 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1155 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1156 func_str = "sas_iounit_ctl";
1157 break;
1158 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1159 frame_sz = sizeof(Mpi2SepRequest_t);
1160 func_str = "enclosure";
1161 break;
1162 case MPI2_FUNCTION_IOC_INIT:
1163 frame_sz = sizeof(Mpi2IOCInitRequest_t);
1164 func_str = "ioc_init";
1165 break;
1166 case MPI2_FUNCTION_PORT_ENABLE:
1167 frame_sz = sizeof(Mpi2PortEnableRequest_t);
1168 func_str = "port_enable";
1169 break;
1170 case MPI2_FUNCTION_SMP_PASSTHROUGH:
1171 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1172 func_str = "smp_passthru";
1173 break;
1174 case MPI2_FUNCTION_NVME_ENCAPSULATED:
1175 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1176 ioc->sge_size;
1177 func_str = "nvme_encapsulated";
1178 break;
1179 default:
1180 frame_sz = 32;
1181 func_str = "unknown";
1182 break;
1183 }
1184
1185 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1186 desc, ioc_status, request_hdr, func_str);
1187
1188 _debug_dump_mf(request_hdr, frame_sz/4);
1189}
1190
1191
1192
1193
1194
1195
1196static void
1197_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1198 Mpi2EventNotificationReply_t *mpi_reply)
1199{
1200 char *desc = NULL;
1201 u16 event;
1202
1203 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1204 return;
1205
1206 event = le16_to_cpu(mpi_reply->Event);
1207
1208 switch (event) {
1209 case MPI2_EVENT_LOG_DATA:
1210 desc = "Log Data";
1211 break;
1212 case MPI2_EVENT_STATE_CHANGE:
1213 desc = "Status Change";
1214 break;
1215 case MPI2_EVENT_HARD_RESET_RECEIVED:
1216 desc = "Hard Reset Received";
1217 break;
1218 case MPI2_EVENT_EVENT_CHANGE:
1219 desc = "Event Change";
1220 break;
1221 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1222 desc = "Device Status Change";
1223 break;
1224 case MPI2_EVENT_IR_OPERATION_STATUS:
1225 if (!ioc->hide_ir_msg)
1226 desc = "IR Operation Status";
1227 break;
1228 case MPI2_EVENT_SAS_DISCOVERY:
1229 {
1230 Mpi2EventDataSasDiscovery_t *event_data =
1231 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1232 ioc_info(ioc, "Discovery: (%s)",
1233 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1234 "start" : "stop");
1235 if (event_data->DiscoveryStatus)
1236 pr_cont(" discovery_status(0x%08x)",
1237 le32_to_cpu(event_data->DiscoveryStatus));
1238 pr_cont("\n");
1239 return;
1240 }
1241 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1242 desc = "SAS Broadcast Primitive";
1243 break;
1244 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1245 desc = "SAS Init Device Status Change";
1246 break;
1247 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1248 desc = "SAS Init Table Overflow";
1249 break;
1250 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1251 desc = "SAS Topology Change List";
1252 break;
1253 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1254 desc = "SAS Enclosure Device Status Change";
1255 break;
1256 case MPI2_EVENT_IR_VOLUME:
1257 if (!ioc->hide_ir_msg)
1258 desc = "IR Volume";
1259 break;
1260 case MPI2_EVENT_IR_PHYSICAL_DISK:
1261 if (!ioc->hide_ir_msg)
1262 desc = "IR Physical Disk";
1263 break;
1264 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1265 if (!ioc->hide_ir_msg)
1266 desc = "IR Configuration Change List";
1267 break;
1268 case MPI2_EVENT_LOG_ENTRY_ADDED:
1269 if (!ioc->hide_ir_msg)
1270 desc = "Log Entry Added";
1271 break;
1272 case MPI2_EVENT_TEMP_THRESHOLD:
1273 desc = "Temperature Threshold";
1274 break;
1275 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1276 desc = "Cable Event";
1277 break;
1278 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1279 desc = "SAS Device Discovery Error";
1280 break;
1281 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1282 desc = "PCIE Device Status Change";
1283 break;
1284 case MPI2_EVENT_PCIE_ENUMERATION:
1285 {
1286 Mpi26EventDataPCIeEnumeration_t *event_data =
1287 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1288 ioc_info(ioc, "PCIE Enumeration: (%s)",
1289 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1290 "start" : "stop");
1291 if (event_data->EnumerationStatus)
1292 pr_cont("enumeration_status(0x%08x)",
1293 le32_to_cpu(event_data->EnumerationStatus));
1294 pr_cont("\n");
1295 return;
1296 }
1297 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1298 desc = "PCIE Topology Change List";
1299 break;
1300 }
1301
1302 if (!desc)
1303 return;
1304
1305 ioc_info(ioc, "%s\n", desc);
1306}
1307
1308
1309
1310
1311
1312
1313static void
1314_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1315{
1316 union loginfo_type {
1317 u32 loginfo;
1318 struct {
1319 u32 subcode:16;
1320 u32 code:8;
1321 u32 originator:4;
1322 u32 bus_type:4;
1323 } dw;
1324 };
1325 union loginfo_type sas_loginfo;
1326 char *originator_str = NULL;
1327
1328 sas_loginfo.loginfo = log_info;
1329 if (sas_loginfo.dw.bus_type != 3 )
1330 return;
1331
1332
1333 if (log_info == 0x31170000)
1334 return;
1335
1336
1337 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1338 0x31140000 || log_info == 0x31130000))
1339 return;
1340
1341 switch (sas_loginfo.dw.originator) {
1342 case 0:
1343 originator_str = "IOP";
1344 break;
1345 case 1:
1346 originator_str = "PL";
1347 break;
1348 case 2:
1349 if (!ioc->hide_ir_msg)
1350 originator_str = "IR";
1351 else
1352 originator_str = "WarpDrive";
1353 break;
1354 }
1355
1356 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1357 log_info,
1358 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1359}
1360
1361
1362
1363
1364
1365
1366
1367
1368static void
1369_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1370 u32 reply)
1371{
1372 MPI2DefaultReply_t *mpi_reply;
1373 u16 ioc_status;
1374 u32 loginfo = 0;
1375
1376 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1377 if (unlikely(!mpi_reply)) {
1378 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1379 __FILE__, __LINE__, __func__);
1380 return;
1381 }
1382 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1383
1384 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1385 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1386 _base_sas_ioc_info(ioc , mpi_reply,
1387 mpt3sas_base_get_msg_frame(ioc, smid));
1388 }
1389
1390 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1391 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1392 _base_sas_log_info(ioc, loginfo);
1393 }
1394
1395 if (ioc_status || loginfo) {
1396 ioc_status &= MPI2_IOCSTATUS_MASK;
1397 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1398 }
1399}
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412u8
1413mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1414 u32 reply)
1415{
1416 MPI2DefaultReply_t *mpi_reply;
1417
1418 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1419 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1420 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1421
1422 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1423 return 1;
1424
1425 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1426 if (mpi_reply) {
1427 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1428 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1429 }
1430 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1431
1432 complete(&ioc->base_cmds.done);
1433 return 1;
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static u8
1447_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1448{
1449 Mpi2EventNotificationReply_t *mpi_reply;
1450 Mpi2EventAckRequest_t *ack_request;
1451 u16 smid;
1452 struct _event_ack_list *delayed_event_ack;
1453
1454 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1455 if (!mpi_reply)
1456 return 1;
1457 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1458 return 1;
1459
1460 _base_display_event_data(ioc, mpi_reply);
1461
1462 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1463 goto out;
1464 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1465 if (!smid) {
1466 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1467 GFP_ATOMIC);
1468 if (!delayed_event_ack)
1469 goto out;
1470 INIT_LIST_HEAD(&delayed_event_ack->list);
1471 delayed_event_ack->Event = mpi_reply->Event;
1472 delayed_event_ack->EventContext = mpi_reply->EventContext;
1473 list_add_tail(&delayed_event_ack->list,
1474 &ioc->delayed_event_ack_list);
1475 dewtprintk(ioc,
1476 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1477 le16_to_cpu(mpi_reply->Event)));
1478 goto out;
1479 }
1480
1481 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1482 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1483 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1484 ack_request->Event = mpi_reply->Event;
1485 ack_request->EventContext = mpi_reply->EventContext;
1486 ack_request->VF_ID = 0;
1487 ack_request->VP_ID = 0;
1488 ioc->put_smid_default(ioc, smid);
1489
1490 out:
1491
1492
1493 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1494
1495
1496 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1497
1498 return 1;
1499}
1500
1501static struct scsiio_tracker *
1502_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1503{
1504 struct scsi_cmnd *cmd;
1505
1506 if (WARN_ON(!smid) ||
1507 WARN_ON(smid >= ioc->hi_priority_smid))
1508 return NULL;
1509
1510 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1511 if (cmd)
1512 return scsi_cmd_priv(cmd);
1513
1514 return NULL;
1515}
1516
1517
1518
1519
1520
1521
1522
1523
1524static u8
1525_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1526{
1527 int i;
1528 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1529 u8 cb_idx = 0xFF;
1530
1531 if (smid < ioc->hi_priority_smid) {
1532 struct scsiio_tracker *st;
1533
1534 if (smid < ctl_smid) {
1535 st = _get_st_from_smid(ioc, smid);
1536 if (st)
1537 cb_idx = st->cb_idx;
1538 } else if (smid == ctl_smid)
1539 cb_idx = ioc->ctl_cb_idx;
1540 } else if (smid < ioc->internal_smid) {
1541 i = smid - ioc->hi_priority_smid;
1542 cb_idx = ioc->hpr_lookup[i].cb_idx;
1543 } else if (smid <= ioc->hba_queue_depth) {
1544 i = smid - ioc->internal_smid;
1545 cb_idx = ioc->internal_lookup[i].cb_idx;
1546 }
1547 return cb_idx;
1548}
1549
1550
1551
1552
1553
1554
1555
1556void
1557mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1558{
1559 u32 him_register;
1560
1561 ioc->mask_interrupts = 1;
1562 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1563 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1564 writel(him_register, &ioc->chip->HostInterruptMask);
1565 ioc->base_readl(&ioc->chip->HostInterruptMask);
1566}
1567
1568
1569
1570
1571
1572
1573
1574void
1575mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1576{
1577 u32 him_register;
1578
1579 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1580 him_register &= ~MPI2_HIM_RIM;
1581 writel(him_register, &ioc->chip->HostInterruptMask);
1582 ioc->mask_interrupts = 0;
1583}
1584
1585union reply_descriptor {
1586 u64 word;
1587 struct {
1588 u32 low;
1589 u32 high;
1590 } u;
1591};
1592
1593static u32 base_mod64(u64 dividend, u32 divisor)
1594{
1595 u32 remainder;
1596
1597 if (!divisor)
1598 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1599 remainder = do_div(dividend, divisor);
1600 return remainder;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611static int
1612_base_process_reply_queue(struct adapter_reply_queue *reply_q)
1613{
1614 union reply_descriptor rd;
1615 u64 completed_cmds;
1616 u8 request_descript_type;
1617 u16 smid;
1618 u8 cb_idx;
1619 u32 reply;
1620 u8 msix_index = reply_q->msix_index;
1621 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1622 Mpi2ReplyDescriptorsUnion_t *rpf;
1623 u8 rc;
1624
1625 completed_cmds = 0;
1626 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1627 return completed_cmds;
1628
1629 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1630 request_descript_type = rpf->Default.ReplyFlags
1631 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1632 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1633 atomic_dec(&reply_q->busy);
1634 return completed_cmds;
1635 }
1636
1637 cb_idx = 0xFF;
1638 do {
1639 rd.word = le64_to_cpu(rpf->Words);
1640 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1641 goto out;
1642 reply = 0;
1643 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1644 if (request_descript_type ==
1645 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1646 request_descript_type ==
1647 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1648 request_descript_type ==
1649 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1650 cb_idx = _base_get_cb_idx(ioc, smid);
1651 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1652 (likely(mpt_callbacks[cb_idx] != NULL))) {
1653 rc = mpt_callbacks[cb_idx](ioc, smid,
1654 msix_index, 0);
1655 if (rc)
1656 mpt3sas_base_free_smid(ioc, smid);
1657 }
1658 } else if (request_descript_type ==
1659 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1660 reply = le32_to_cpu(
1661 rpf->AddressReply.ReplyFrameAddress);
1662 if (reply > ioc->reply_dma_max_address ||
1663 reply < ioc->reply_dma_min_address)
1664 reply = 0;
1665 if (smid) {
1666 cb_idx = _base_get_cb_idx(ioc, smid);
1667 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1668 (likely(mpt_callbacks[cb_idx] != NULL))) {
1669 rc = mpt_callbacks[cb_idx](ioc, smid,
1670 msix_index, reply);
1671 if (reply)
1672 _base_display_reply_info(ioc,
1673 smid, msix_index, reply);
1674 if (rc)
1675 mpt3sas_base_free_smid(ioc,
1676 smid);
1677 }
1678 } else {
1679 _base_async_event(ioc, msix_index, reply);
1680 }
1681
1682
1683 if (reply) {
1684 ioc->reply_free_host_index =
1685 (ioc->reply_free_host_index ==
1686 (ioc->reply_free_queue_depth - 1)) ?
1687 0 : ioc->reply_free_host_index + 1;
1688 ioc->reply_free[ioc->reply_free_host_index] =
1689 cpu_to_le32(reply);
1690 if (ioc->is_mcpu_endpoint)
1691 _base_clone_reply_to_sys_mem(ioc,
1692 reply,
1693 ioc->reply_free_host_index);
1694 writel(ioc->reply_free_host_index,
1695 &ioc->chip->ReplyFreeHostIndex);
1696 }
1697 }
1698
1699 rpf->Words = cpu_to_le64(ULLONG_MAX);
1700 reply_q->reply_post_host_index =
1701 (reply_q->reply_post_host_index ==
1702 (ioc->reply_post_queue_depth - 1)) ? 0 :
1703 reply_q->reply_post_host_index + 1;
1704 request_descript_type =
1705 reply_q->reply_post_free[reply_q->reply_post_host_index].
1706 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1707 completed_cmds++;
1708
1709
1710
1711
1712
1713 if (completed_cmds >= ioc->thresh_hold) {
1714 if (ioc->combined_reply_queue) {
1715 writel(reply_q->reply_post_host_index |
1716 ((msix_index & 7) <<
1717 MPI2_RPHI_MSIX_INDEX_SHIFT),
1718 ioc->replyPostRegisterIndex[msix_index/8]);
1719 } else {
1720 writel(reply_q->reply_post_host_index |
1721 (msix_index <<
1722 MPI2_RPHI_MSIX_INDEX_SHIFT),
1723 &ioc->chip->ReplyPostHostIndex);
1724 }
1725 if (!reply_q->irq_poll_scheduled) {
1726 reply_q->irq_poll_scheduled = true;
1727 irq_poll_sched(&reply_q->irqpoll);
1728 }
1729 atomic_dec(&reply_q->busy);
1730 return completed_cmds;
1731 }
1732 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1733 goto out;
1734 if (!reply_q->reply_post_host_index)
1735 rpf = reply_q->reply_post_free;
1736 else
1737 rpf++;
1738 } while (1);
1739
1740 out:
1741
1742 if (!completed_cmds) {
1743 atomic_dec(&reply_q->busy);
1744 return completed_cmds;
1745 }
1746
1747 if (ioc->is_warpdrive) {
1748 writel(reply_q->reply_post_host_index,
1749 ioc->reply_post_host_index[msix_index]);
1750 atomic_dec(&reply_q->busy);
1751 return completed_cmds;
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769 if (ioc->combined_reply_queue)
1770 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1771 MPI2_RPHI_MSIX_INDEX_SHIFT),
1772 ioc->replyPostRegisterIndex[msix_index/8]);
1773 else
1774 writel(reply_q->reply_post_host_index | (msix_index <<
1775 MPI2_RPHI_MSIX_INDEX_SHIFT),
1776 &ioc->chip->ReplyPostHostIndex);
1777 atomic_dec(&reply_q->busy);
1778 return completed_cmds;
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788static irqreturn_t
1789_base_interrupt(int irq, void *bus_id)
1790{
1791 struct adapter_reply_queue *reply_q = bus_id;
1792 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1793
1794 if (ioc->mask_interrupts)
1795 return IRQ_NONE;
1796 if (reply_q->irq_poll_scheduled)
1797 return IRQ_HANDLED;
1798 return ((_base_process_reply_queue(reply_q) > 0) ?
1799 IRQ_HANDLED : IRQ_NONE);
1800}
1801
1802
1803
1804
1805
1806
1807
1808
1809static int
1810_base_irqpoll(struct irq_poll *irqpoll, int budget)
1811{
1812 struct adapter_reply_queue *reply_q;
1813 int num_entries = 0;
1814
1815 reply_q = container_of(irqpoll, struct adapter_reply_queue,
1816 irqpoll);
1817 if (reply_q->irq_line_enable) {
1818 disable_irq_nosync(reply_q->os_irq);
1819 reply_q->irq_line_enable = false;
1820 }
1821 num_entries = _base_process_reply_queue(reply_q);
1822 if (num_entries < budget) {
1823 irq_poll_complete(irqpoll);
1824 reply_q->irq_poll_scheduled = false;
1825 reply_q->irq_line_enable = true;
1826 enable_irq(reply_q->os_irq);
1827
1828
1829
1830
1831
1832
1833 _base_process_reply_queue(reply_q);
1834 }
1835
1836 return num_entries;
1837}
1838
1839
1840
1841
1842
1843
1844
1845static void
1846_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1847{
1848 struct adapter_reply_queue *reply_q, *next;
1849
1850 if (list_empty(&ioc->reply_queue_list))
1851 return;
1852
1853 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1854 irq_poll_init(&reply_q->irqpoll,
1855 ioc->hba_queue_depth/4, _base_irqpoll);
1856 reply_q->irq_poll_scheduled = false;
1857 reply_q->irq_line_enable = true;
1858 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1859 reply_q->msix_index);
1860 }
1861}
1862
1863
1864
1865
1866
1867
1868
1869static inline int
1870_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1871{
1872 return (ioc->facts.IOCCapabilities &
1873 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1874}
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885void
1886mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1887{
1888 struct adapter_reply_queue *reply_q;
1889
1890
1891
1892
1893 if (!_base_is_controller_msix_enabled(ioc))
1894 return;
1895
1896 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1897 if (ioc->shost_recovery || ioc->remove_host ||
1898 ioc->pci_error_recovery)
1899 return;
1900
1901 if (reply_q->msix_index == 0)
1902 continue;
1903 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1904 if (reply_q->irq_poll_scheduled) {
1905
1906
1907
1908 irq_poll_disable(&reply_q->irqpoll);
1909 irq_poll_enable(&reply_q->irqpoll);
1910
1911
1912
1913 if (reply_q->irq_poll_scheduled) {
1914 reply_q->irq_poll_scheduled = false;
1915 reply_q->irq_line_enable = true;
1916 enable_irq(reply_q->os_irq);
1917 }
1918 }
1919 }
1920 if (poll)
1921 _base_process_reply_queue(reply_q);
1922}
1923
1924
1925
1926
1927
1928void
1929mpt3sas_base_release_callback_handler(u8 cb_idx)
1930{
1931 mpt_callbacks[cb_idx] = NULL;
1932}
1933
1934
1935
1936
1937
1938
1939
1940u8
1941mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1942{
1943 u8 cb_idx;
1944
1945 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1946 if (mpt_callbacks[cb_idx] == NULL)
1947 break;
1948
1949 mpt_callbacks[cb_idx] = cb_func;
1950 return cb_idx;
1951}
1952
1953
1954
1955
1956void
1957mpt3sas_base_initialize_callback_handler(void)
1958{
1959 u8 cb_idx;
1960
1961 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1962 mpt3sas_base_release_callback_handler(cb_idx);
1963}
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static void
1976_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1977{
1978 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1979 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1980 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1981 MPI2_SGE_FLAGS_SHIFT);
1982 ioc->base_add_sg_single(paddr, flags_length, -1);
1983}
1984
1985
1986
1987
1988
1989
1990
1991static void
1992_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1993{
1994 Mpi2SGESimple32_t *sgel = paddr;
1995
1996 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1997 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1998 sgel->FlagsLength = cpu_to_le32(flags_length);
1999 sgel->Address = cpu_to_le32(dma_addr);
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009static void
2010_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2011{
2012 Mpi2SGESimple64_t *sgel = paddr;
2013
2014 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
2015 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2016 sgel->FlagsLength = cpu_to_le32(flags_length);
2017 sgel->Address = cpu_to_le64(dma_addr);
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028static struct chain_tracker *
2029_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
2030 struct scsi_cmnd *scmd)
2031{
2032 struct chain_tracker *chain_req;
2033 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2034 u16 smid = st->smid;
2035 u8 chain_offset =
2036 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
2037
2038 if (chain_offset == ioc->chains_needed_per_io)
2039 return NULL;
2040
2041 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
2042 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
2043 return chain_req;
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056static void
2057_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
2058 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2059 size_t data_in_sz)
2060{
2061 u32 sgl_flags;
2062
2063 if (!data_out_sz && !data_in_sz) {
2064 _base_build_zero_len_sge(ioc, psge);
2065 return;
2066 }
2067
2068 if (data_out_sz && data_in_sz) {
2069
2070 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2071 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2072 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2073 ioc->base_add_sg_single(psge, sgl_flags |
2074 data_out_sz, data_out_dma);
2075
2076
2077 psge += ioc->sge_size;
2078
2079
2080 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2081 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2082 MPI2_SGE_FLAGS_END_OF_LIST);
2083 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2084 ioc->base_add_sg_single(psge, sgl_flags |
2085 data_in_sz, data_in_dma);
2086 } else if (data_out_sz) {
2087 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2088 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2089 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
2090 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2091 ioc->base_add_sg_single(psge, sgl_flags |
2092 data_out_sz, data_out_dma);
2093 } else if (data_in_sz) {
2094 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2095 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2096 MPI2_SGE_FLAGS_END_OF_LIST);
2097 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2098 ioc->base_add_sg_single(psge, sgl_flags |
2099 data_in_sz, data_in_dma);
2100 }
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160static void
2161_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2162 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2163 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2164 size_t data_in_sz)
2165{
2166 int prp_size = NVME_PRP_SIZE;
2167 __le64 *prp_entry, *prp1_entry, *prp2_entry;
2168 __le64 *prp_page;
2169 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
2170 u32 offset, entry_len;
2171 u32 page_mask_result, page_mask;
2172 size_t length;
2173 struct mpt3sas_nvme_cmd *nvme_cmd =
2174 (void *)nvme_encap_request->NVMe_Command;
2175
2176
2177
2178
2179
2180 if (!data_in_sz && !data_out_sz)
2181 return;
2182 prp1_entry = &nvme_cmd->prp1;
2183 prp2_entry = &nvme_cmd->prp2;
2184 prp_entry = prp1_entry;
2185
2186
2187
2188
2189 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2190 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2191
2192
2193
2194
2195
2196 page_mask = ioc->page_size - 1;
2197 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2198 if (!page_mask_result) {
2199
2200 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2201 prp_page_dma = prp_page_dma + prp_size;
2202 }
2203
2204
2205
2206
2207
2208 prp_entry_dma = prp_page_dma;
2209
2210
2211 if (data_in_sz) {
2212 dma_addr = data_in_dma;
2213 length = data_in_sz;
2214 } else {
2215 dma_addr = data_out_dma;
2216 length = data_out_sz;
2217 }
2218
2219
2220 while (length) {
2221
2222
2223
2224
2225 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2226 if (!page_mask_result) {
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239 prp_entry_dma++;
2240 *prp_entry = cpu_to_le64(prp_entry_dma);
2241 prp_entry++;
2242 }
2243
2244
2245 offset = dma_addr & page_mask;
2246 entry_len = ioc->page_size - offset;
2247
2248 if (prp_entry == prp1_entry) {
2249
2250
2251
2252
2253 *prp1_entry = cpu_to_le64(dma_addr);
2254
2255
2256
2257
2258
2259 prp_entry = prp2_entry;
2260 } else if (prp_entry == prp2_entry) {
2261
2262
2263
2264
2265
2266 if (length > ioc->page_size) {
2267
2268
2269
2270
2271
2272
2273 *prp2_entry = cpu_to_le64(prp_entry_dma);
2274
2275
2276
2277
2278
2279 prp_entry = prp_page;
2280 } else {
2281
2282
2283
2284
2285 *prp2_entry = cpu_to_le64(dma_addr);
2286 }
2287 } else {
2288
2289
2290
2291
2292
2293
2294
2295 *prp_entry = cpu_to_le64(dma_addr);
2296 prp_entry++;
2297 prp_entry_dma++;
2298 }
2299
2300
2301
2302
2303
2304 dma_addr += entry_len;
2305
2306
2307 if (entry_len > length)
2308 length = 0;
2309 else
2310 length -= entry_len;
2311 }
2312}
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327static void
2328base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2329 struct scsi_cmnd *scmd,
2330 Mpi25SCSIIORequest_t *mpi_request,
2331 u16 smid, int sge_count)
2332{
2333 int sge_len, num_prp_in_chain = 0;
2334 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2335 __le64 *curr_buff;
2336 dma_addr_t msg_dma, sge_addr, offset;
2337 u32 page_mask, page_mask_result;
2338 struct scatterlist *sg_scmd;
2339 u32 first_prp_len;
2340 int data_len = scsi_bufflen(scmd);
2341 u32 nvme_pg_size;
2342
2343 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356 page_mask = nvme_pg_size - 1;
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2369
2370
2371
2372
2373 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2374 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2375
2376
2377
2378
2379
2380
2381
2382 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2383 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2384
2385 main_chain_element->Address = cpu_to_le64(msg_dma);
2386 main_chain_element->NextChainOffset = 0;
2387 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2388 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2389 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2390
2391
2392 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2393 sg_scmd = scsi_sglist(scmd);
2394 sge_addr = sg_dma_address(sg_scmd);
2395 sge_len = sg_dma_len(sg_scmd);
2396
2397 offset = sge_addr & page_mask;
2398 first_prp_len = nvme_pg_size - offset;
2399
2400 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2401 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2402
2403 data_len -= first_prp_len;
2404
2405 if (sge_len > first_prp_len) {
2406 sge_addr += first_prp_len;
2407 sge_len -= first_prp_len;
2408 } else if (data_len && (sge_len == first_prp_len)) {
2409 sg_scmd = sg_next(sg_scmd);
2410 sge_addr = sg_dma_address(sg_scmd);
2411 sge_len = sg_dma_len(sg_scmd);
2412 }
2413
2414 for (;;) {
2415 offset = sge_addr & page_mask;
2416
2417
2418 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2419 if (unlikely(!page_mask_result)) {
2420 scmd_printk(KERN_NOTICE,
2421 scmd, "page boundary curr_buff: 0x%p\n",
2422 curr_buff);
2423 msg_dma += 8;
2424 *curr_buff = cpu_to_le64(msg_dma);
2425 curr_buff++;
2426 num_prp_in_chain++;
2427 }
2428
2429 *curr_buff = cpu_to_le64(sge_addr);
2430 curr_buff++;
2431 msg_dma += 8;
2432 num_prp_in_chain++;
2433
2434 sge_addr += nvme_pg_size;
2435 sge_len -= nvme_pg_size;
2436 data_len -= nvme_pg_size;
2437
2438 if (data_len <= 0)
2439 break;
2440
2441 if (sge_len > 0)
2442 continue;
2443
2444 sg_scmd = sg_next(sg_scmd);
2445 sge_addr = sg_dma_address(sg_scmd);
2446 sge_len = sg_dma_len(sg_scmd);
2447 }
2448
2449 main_chain_element->Length =
2450 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2451 return;
2452}
2453
2454static bool
2455base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2456 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2457{
2458 u32 data_length = 0;
2459 bool build_prp = true;
2460
2461 data_length = scsi_bufflen(scmd);
2462 if (pcie_device &&
2463 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2464 build_prp = false;
2465 return build_prp;
2466 }
2467
2468
2469
2470
2471 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2472 build_prp = false;
2473
2474 return build_prp;
2475}
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492static int
2493_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2494 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2495 struct _pcie_device *pcie_device)
2496{
2497 int sges_left;
2498
2499
2500 sges_left = scsi_dma_map(scmd);
2501 if (sges_left < 0) {
2502 sdev_printk(KERN_ERR, scmd->device,
2503 "scsi_dma_map failed: request for %d bytes!\n",
2504 scsi_bufflen(scmd));
2505 return 1;
2506 }
2507
2508
2509 if (!base_is_prp_possible(ioc, pcie_device,
2510 scmd, sges_left)) {
2511
2512 goto out;
2513 }
2514
2515
2516
2517
2518 base_make_prp_nvme(ioc, scmd, mpi_request,
2519 smid, sges_left);
2520
2521 return 0;
2522out:
2523 scsi_dma_unmap(scmd);
2524 return 1;
2525}
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535static void
2536_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2537 dma_addr_t dma_addr)
2538{
2539 Mpi25IeeeSgeChain64_t *sgel = paddr;
2540
2541 sgel->Flags = flags;
2542 sgel->NextChainOffset = chain_offset;
2543 sgel->Length = cpu_to_le32(length);
2544 sgel->Address = cpu_to_le64(dma_addr);
2545}
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556static void
2557_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2558{
2559 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2560 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2561 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2562
2563 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2564}
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580static int
2581_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2582 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2583{
2584 Mpi2SCSIIORequest_t *mpi_request;
2585 dma_addr_t chain_dma;
2586 struct scatterlist *sg_scmd;
2587 void *sg_local, *chain;
2588 u32 chain_offset;
2589 u32 chain_length;
2590 u32 chain_flags;
2591 int sges_left;
2592 u32 sges_in_segment;
2593 u32 sgl_flags;
2594 u32 sgl_flags_last_element;
2595 u32 sgl_flags_end_buffer;
2596 struct chain_tracker *chain_req;
2597
2598 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2599
2600
2601 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2602 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2603 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2604 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2605 << MPI2_SGE_FLAGS_SHIFT;
2606 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2607 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2608 << MPI2_SGE_FLAGS_SHIFT;
2609 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2610
2611 sg_scmd = scsi_sglist(scmd);
2612 sges_left = scsi_dma_map(scmd);
2613 if (sges_left < 0) {
2614 sdev_printk(KERN_ERR, scmd->device,
2615 "scsi_dma_map failed: request for %d bytes!\n",
2616 scsi_bufflen(scmd));
2617 return -ENOMEM;
2618 }
2619
2620 sg_local = &mpi_request->SGL;
2621 sges_in_segment = ioc->max_sges_in_main_message;
2622 if (sges_left <= sges_in_segment)
2623 goto fill_in_last_segment;
2624
2625 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2626 (sges_in_segment * ioc->sge_size))/4;
2627
2628
2629 while (sges_in_segment) {
2630 if (sges_in_segment == 1)
2631 ioc->base_add_sg_single(sg_local,
2632 sgl_flags_last_element | sg_dma_len(sg_scmd),
2633 sg_dma_address(sg_scmd));
2634 else
2635 ioc->base_add_sg_single(sg_local, sgl_flags |
2636 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2637 sg_scmd = sg_next(sg_scmd);
2638 sg_local += ioc->sge_size;
2639 sges_left--;
2640 sges_in_segment--;
2641 }
2642
2643
2644 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2645 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2646 if (!chain_req)
2647 return -1;
2648 chain = chain_req->chain_buffer;
2649 chain_dma = chain_req->chain_buffer_dma;
2650 do {
2651 sges_in_segment = (sges_left <=
2652 ioc->max_sges_in_chain_message) ? sges_left :
2653 ioc->max_sges_in_chain_message;
2654 chain_offset = (sges_left == sges_in_segment) ?
2655 0 : (sges_in_segment * ioc->sge_size)/4;
2656 chain_length = sges_in_segment * ioc->sge_size;
2657 if (chain_offset) {
2658 chain_offset = chain_offset <<
2659 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2660 chain_length += ioc->sge_size;
2661 }
2662 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2663 chain_length, chain_dma);
2664 sg_local = chain;
2665 if (!chain_offset)
2666 goto fill_in_last_segment;
2667
2668
2669 while (sges_in_segment) {
2670 if (sges_in_segment == 1)
2671 ioc->base_add_sg_single(sg_local,
2672 sgl_flags_last_element |
2673 sg_dma_len(sg_scmd),
2674 sg_dma_address(sg_scmd));
2675 else
2676 ioc->base_add_sg_single(sg_local, sgl_flags |
2677 sg_dma_len(sg_scmd),
2678 sg_dma_address(sg_scmd));
2679 sg_scmd = sg_next(sg_scmd);
2680 sg_local += ioc->sge_size;
2681 sges_left--;
2682 sges_in_segment--;
2683 }
2684
2685 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2686 if (!chain_req)
2687 return -1;
2688 chain = chain_req->chain_buffer;
2689 chain_dma = chain_req->chain_buffer_dma;
2690 } while (1);
2691
2692
2693 fill_in_last_segment:
2694
2695
2696 while (sges_left) {
2697 if (sges_left == 1)
2698 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2699 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2700 else
2701 ioc->base_add_sg_single(sg_local, sgl_flags |
2702 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2703 sg_scmd = sg_next(sg_scmd);
2704 sg_local += ioc->sge_size;
2705 sges_left--;
2706 }
2707
2708 return 0;
2709}
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725static int
2726_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2727 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2728{
2729 Mpi25SCSIIORequest_t *mpi_request;
2730 dma_addr_t chain_dma;
2731 struct scatterlist *sg_scmd;
2732 void *sg_local, *chain;
2733 u32 chain_offset;
2734 u32 chain_length;
2735 int sges_left;
2736 u32 sges_in_segment;
2737 u8 simple_sgl_flags;
2738 u8 simple_sgl_flags_last;
2739 u8 chain_sgl_flags;
2740 struct chain_tracker *chain_req;
2741
2742 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2743
2744
2745 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2746 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2747 simple_sgl_flags_last = simple_sgl_flags |
2748 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2749 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2750 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2751
2752
2753 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2754 smid, scmd, pcie_device) == 0)) {
2755
2756 return 0;
2757 }
2758
2759 sg_scmd = scsi_sglist(scmd);
2760 sges_left = scsi_dma_map(scmd);
2761 if (sges_left < 0) {
2762 sdev_printk(KERN_ERR, scmd->device,
2763 "scsi_dma_map failed: request for %d bytes!\n",
2764 scsi_bufflen(scmd));
2765 return -ENOMEM;
2766 }
2767
2768 sg_local = &mpi_request->SGL;
2769 sges_in_segment = (ioc->request_sz -
2770 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2771 if (sges_left <= sges_in_segment)
2772 goto fill_in_last_segment;
2773
2774 mpi_request->ChainOffset = (sges_in_segment - 1 ) +
2775 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2776
2777
2778 while (sges_in_segment > 1) {
2779 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2780 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2781 sg_scmd = sg_next(sg_scmd);
2782 sg_local += ioc->sge_size_ieee;
2783 sges_left--;
2784 sges_in_segment--;
2785 }
2786
2787
2788 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2789 if (!chain_req)
2790 return -1;
2791 chain = chain_req->chain_buffer;
2792 chain_dma = chain_req->chain_buffer_dma;
2793 do {
2794 sges_in_segment = (sges_left <=
2795 ioc->max_sges_in_chain_message) ? sges_left :
2796 ioc->max_sges_in_chain_message;
2797 chain_offset = (sges_left == sges_in_segment) ?
2798 0 : sges_in_segment;
2799 chain_length = sges_in_segment * ioc->sge_size_ieee;
2800 if (chain_offset)
2801 chain_length += ioc->sge_size_ieee;
2802 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2803 chain_offset, chain_length, chain_dma);
2804
2805 sg_local = chain;
2806 if (!chain_offset)
2807 goto fill_in_last_segment;
2808
2809
2810 while (sges_in_segment) {
2811 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2812 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2813 sg_scmd = sg_next(sg_scmd);
2814 sg_local += ioc->sge_size_ieee;
2815 sges_left--;
2816 sges_in_segment--;
2817 }
2818
2819 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2820 if (!chain_req)
2821 return -1;
2822 chain = chain_req->chain_buffer;
2823 chain_dma = chain_req->chain_buffer_dma;
2824 } while (1);
2825
2826
2827 fill_in_last_segment:
2828
2829
2830 while (sges_left > 0) {
2831 if (sges_left == 1)
2832 _base_add_sg_single_ieee(sg_local,
2833 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2834 sg_dma_address(sg_scmd));
2835 else
2836 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2837 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2838 sg_scmd = sg_next(sg_scmd);
2839 sg_local += ioc->sge_size_ieee;
2840 sges_left--;
2841 }
2842
2843 return 0;
2844}
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855static void
2856_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2857 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2858 size_t data_in_sz)
2859{
2860 u8 sgl_flags;
2861
2862 if (!data_out_sz && !data_in_sz) {
2863 _base_build_zero_len_sge_ieee(ioc, psge);
2864 return;
2865 }
2866
2867 if (data_out_sz && data_in_sz) {
2868
2869 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2870 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2871 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2872 data_out_dma);
2873
2874
2875 psge += ioc->sge_size_ieee;
2876
2877
2878 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2879 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2880 data_in_dma);
2881 } else if (data_out_sz) {
2882 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2883 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2884 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2885 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2886 data_out_dma);
2887 } else if (data_in_sz) {
2888 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2889 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2890 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2891 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2892 data_in_dma);
2893 }
2894}
2895
2896#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2897
2898
2899
2900
2901
2902
2903
2904
2905static int
2906_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2907{
2908 struct sysinfo s;
2909
2910 if (ioc->is_mcpu_endpoint ||
2911 sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2912 dma_get_required_mask(&pdev->dev) <= 32)
2913 ioc->dma_mask = 32;
2914
2915 else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2916 ioc->dma_mask = 63;
2917 else
2918 ioc->dma_mask = 64;
2919
2920 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
2921 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
2922 return -ENODEV;
2923
2924 if (ioc->dma_mask > 32) {
2925 ioc->base_add_sg_single = &_base_add_sg_single_64;
2926 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2927 } else {
2928 ioc->base_add_sg_single = &_base_add_sg_single_32;
2929 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2930 }
2931
2932 si_meminfo(&s);
2933 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2934 ioc->dma_mask, convert_to_kb(s.totalram));
2935
2936 return 0;
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946static int
2947_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2948{
2949 int base;
2950 u16 message_control;
2951
2952
2953
2954
2955 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2956 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2957 return -EINVAL;
2958 }
2959
2960 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2961 if (!base) {
2962 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2963 return -EINVAL;
2964 }
2965
2966
2967
2968 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2969 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2970 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2971 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2972 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2973 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2974 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2975 ioc->msix_vector_count = 1;
2976 else {
2977 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2978 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2979 }
2980 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2981 ioc->msix_vector_count));
2982 return 0;
2983}
2984
2985
2986
2987
2988
2989
2990
2991void
2992mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2993{
2994 struct adapter_reply_queue *reply_q, *next;
2995
2996 if (list_empty(&ioc->reply_queue_list))
2997 return;
2998
2999 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
3000 list_del(&reply_q->list);
3001 if (ioc->smp_affinity_enable)
3002 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3003 reply_q->msix_index), NULL);
3004 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
3005 reply_q);
3006 kfree(reply_q);
3007 }
3008}
3009
3010
3011
3012
3013
3014
3015
3016
3017static int
3018_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
3019{
3020 struct pci_dev *pdev = ioc->pdev;
3021 struct adapter_reply_queue *reply_q;
3022 int r;
3023
3024 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
3025 if (!reply_q) {
3026 ioc_err(ioc, "unable to allocate memory %zu!\n",
3027 sizeof(struct adapter_reply_queue));
3028 return -ENOMEM;
3029 }
3030 reply_q->ioc = ioc;
3031 reply_q->msix_index = index;
3032
3033 atomic_set(&reply_q->busy, 0);
3034 if (ioc->msix_enable)
3035 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
3036 ioc->driver_name, ioc->id, index);
3037 else
3038 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
3039 ioc->driver_name, ioc->id);
3040 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
3041 IRQF_SHARED, reply_q->name, reply_q);
3042 if (r) {
3043 pr_err("%s: unable to allocate interrupt %d!\n",
3044 reply_q->name, pci_irq_vector(pdev, index));
3045 kfree(reply_q);
3046 return -EBUSY;
3047 }
3048
3049 INIT_LIST_HEAD(&reply_q->list);
3050 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
3051 return 0;
3052}
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063static void
3064_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
3065{
3066 unsigned int cpu, nr_cpus, nr_msix, index = 0;
3067 struct adapter_reply_queue *reply_q;
3068 int local_numa_node;
3069
3070 if (!_base_is_controller_msix_enabled(ioc))
3071 return;
3072
3073 if (ioc->msix_load_balance)
3074 return;
3075
3076 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
3077
3078 nr_cpus = num_online_cpus();
3079 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
3080 ioc->facts.MaxMSIxVectors);
3081 if (!nr_msix)
3082 return;
3083
3084 if (ioc->smp_affinity_enable) {
3085
3086
3087
3088
3089
3090 if (ioc->high_iops_queues) {
3091 local_numa_node = dev_to_node(&ioc->pdev->dev);
3092 for (index = 0; index < ioc->high_iops_queues;
3093 index++) {
3094 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3095 index), cpumask_of_node(local_numa_node));
3096 }
3097 }
3098
3099 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3100 const cpumask_t *mask;
3101
3102 if (reply_q->msix_index < ioc->high_iops_queues)
3103 continue;
3104
3105 mask = pci_irq_get_affinity(ioc->pdev,
3106 reply_q->msix_index);
3107 if (!mask) {
3108 ioc_warn(ioc, "no affinity for msi %x\n",
3109 reply_q->msix_index);
3110 goto fall_back;
3111 }
3112
3113 for_each_cpu_and(cpu, mask, cpu_online_mask) {
3114 if (cpu >= ioc->cpu_msix_table_sz)
3115 break;
3116 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3117 }
3118 }
3119 return;
3120 }
3121
3122fall_back:
3123 cpu = cpumask_first(cpu_online_mask);
3124 nr_msix -= ioc->high_iops_queues;
3125 index = 0;
3126
3127 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3128 unsigned int i, group = nr_cpus / nr_msix;
3129
3130 if (reply_q->msix_index < ioc->high_iops_queues)
3131 continue;
3132
3133 if (cpu >= nr_cpus)
3134 break;
3135
3136 if (index < nr_cpus % nr_msix)
3137 group++;
3138
3139 for (i = 0 ; i < group ; i++) {
3140 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3141 cpu = cpumask_next(cpu, cpu_online_mask);
3142 }
3143 index++;
3144 }
3145}
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161static void
3162_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3163 int hba_msix_vector_count)
3164{
3165 u16 lnksta, speed;
3166
3167 if (perf_mode == MPT_PERF_MODE_IOPS ||
3168 perf_mode == MPT_PERF_MODE_LATENCY) {
3169 ioc->high_iops_queues = 0;
3170 return;
3171 }
3172
3173 if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3174
3175 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3176 speed = lnksta & PCI_EXP_LNKSTA_CLS;
3177
3178 if (speed < 0x4) {
3179 ioc->high_iops_queues = 0;
3180 return;
3181 }
3182 }
3183
3184 if (!reset_devices && ioc->is_aero_ioc &&
3185 hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3186 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3187 max_msix_vectors == -1)
3188 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3189 else
3190 ioc->high_iops_queues = 0;
3191}
3192
3193
3194
3195
3196
3197
3198void
3199mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3200{
3201 if (!ioc->msix_enable)
3202 return;
3203 pci_free_irq_vectors(ioc->pdev);
3204 ioc->msix_enable = 0;
3205}
3206
3207
3208
3209
3210
3211
3212static int
3213_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3214{
3215 int i, irq_flags = PCI_IRQ_MSIX;
3216 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3217 struct irq_affinity *descp = &desc;
3218
3219 if (ioc->smp_affinity_enable)
3220 irq_flags |= PCI_IRQ_AFFINITY;
3221 else
3222 descp = NULL;
3223
3224 ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3225 ioc->reply_queue_count);
3226
3227 i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3228 ioc->high_iops_queues,
3229 ioc->reply_queue_count, irq_flags, descp);
3230
3231 return i;
3232}
3233
3234
3235
3236
3237
3238
3239static int
3240_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3241{
3242 int r;
3243 int i, local_max_msix_vectors;
3244 u8 try_msix = 0;
3245
3246 ioc->msix_load_balance = false;
3247
3248 if (msix_disable == -1 || msix_disable == 0)
3249 try_msix = 1;
3250
3251 if (!try_msix)
3252 goto try_ioapic;
3253
3254 if (_base_check_enable_msix(ioc) != 0)
3255 goto try_ioapic;
3256
3257 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3258 pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3259 ioc->cpu_count, max_msix_vectors);
3260 if (ioc->is_aero_ioc)
3261 _base_check_and_enable_high_iops_queues(ioc,
3262 ioc->msix_vector_count);
3263 ioc->reply_queue_count =
3264 min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3265 ioc->msix_vector_count);
3266
3267 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3268 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3269 else
3270 local_max_msix_vectors = max_msix_vectors;
3271
3272 if (local_max_msix_vectors > 0)
3273 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3274 ioc->reply_queue_count);
3275 else if (local_max_msix_vectors == 0)
3276 goto try_ioapic;
3277
3278
3279
3280
3281
3282 if (!ioc->combined_reply_queue &&
3283 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3284 ioc_info(ioc,
3285 "combined ReplyQueue is off, Enabling msix load balance\n");
3286 ioc->msix_load_balance = true;
3287 }
3288
3289
3290
3291
3292
3293 if (ioc->msix_load_balance)
3294 ioc->smp_affinity_enable = 0;
3295
3296 r = _base_alloc_irq_vectors(ioc);
3297 if (r < 0) {
3298 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3299 goto try_ioapic;
3300 }
3301
3302 ioc->msix_enable = 1;
3303 ioc->reply_queue_count = r;
3304 for (i = 0; i < ioc->reply_queue_count; i++) {
3305 r = _base_request_irq(ioc, i);
3306 if (r) {
3307 mpt3sas_base_free_irq(ioc);
3308 mpt3sas_base_disable_msix(ioc);
3309 goto try_ioapic;
3310 }
3311 }
3312
3313 ioc_info(ioc, "High IOPs queues : %s\n",
3314 ioc->high_iops_queues ? "enabled" : "disabled");
3315
3316 return 0;
3317
3318
3319 try_ioapic:
3320 ioc->high_iops_queues = 0;
3321 ioc_info(ioc, "High IOPs queues : disabled\n");
3322 ioc->reply_queue_count = 1;
3323 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3324 if (r < 0) {
3325 dfailprintk(ioc,
3326 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3327 r));
3328 } else
3329 r = _base_request_irq(ioc, 0);
3330
3331 return r;
3332}
3333
3334
3335
3336
3337
3338static void
3339mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3340{
3341 struct pci_dev *pdev = ioc->pdev;
3342
3343 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3344
3345 mpt3sas_base_free_irq(ioc);
3346 mpt3sas_base_disable_msix(ioc);
3347
3348 kfree(ioc->replyPostRegisterIndex);
3349 ioc->replyPostRegisterIndex = NULL;
3350
3351
3352 if (ioc->chip_phys) {
3353 iounmap(ioc->chip);
3354 ioc->chip_phys = 0;
3355 }
3356
3357 if (pci_is_enabled(pdev)) {
3358 pci_release_selected_regions(ioc->pdev, ioc->bars);
3359 pci_disable_pcie_error_reporting(pdev);
3360 pci_disable_device(pdev);
3361 }
3362}
3363
3364static int
3365_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3366
3367
3368
3369
3370
3371
3372
3373
3374int
3375mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3376{
3377 u32 ioc_state;
3378 int rc = -EFAULT;
3379
3380 dinitprintk(ioc, pr_info("%s\n", __func__));
3381 if (ioc->pci_error_recovery)
3382 return 0;
3383 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3384 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3385
3386 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3387 mpt3sas_print_fault_code(ioc, ioc_state &
3388 MPI2_DOORBELL_DATA_MASK);
3389 mpt3sas_base_mask_interrupts(ioc);
3390 rc = _base_diag_reset(ioc);
3391 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3392 MPI2_IOC_STATE_COREDUMP) {
3393 mpt3sas_print_coredump_info(ioc, ioc_state &
3394 MPI2_DOORBELL_DATA_MASK);
3395 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3396 mpt3sas_base_mask_interrupts(ioc);
3397 rc = _base_diag_reset(ioc);
3398 }
3399
3400 return rc;
3401}
3402
3403
3404
3405
3406
3407
3408
3409int
3410mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3411{
3412 struct pci_dev *pdev = ioc->pdev;
3413 u32 memap_sz;
3414 u32 pio_sz;
3415 int i, r = 0, rc;
3416 u64 pio_chip = 0;
3417 phys_addr_t chip_phys = 0;
3418 struct adapter_reply_queue *reply_q;
3419
3420 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3421
3422 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3423 if (pci_enable_device_mem(pdev)) {
3424 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3425 ioc->bars = 0;
3426 return -ENODEV;
3427 }
3428
3429
3430 if (pci_request_selected_regions(pdev, ioc->bars,
3431 ioc->driver_name)) {
3432 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3433 ioc->bars = 0;
3434 r = -ENODEV;
3435 goto out_fail;
3436 }
3437
3438
3439 pci_enable_pcie_error_reporting(pdev);
3440
3441 pci_set_master(pdev);
3442
3443
3444 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3445 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3446 r = -ENODEV;
3447 goto out_fail;
3448 }
3449
3450 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3451 (!memap_sz || !pio_sz); i++) {
3452 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3453 if (pio_sz)
3454 continue;
3455 pio_chip = (u64)pci_resource_start(pdev, i);
3456 pio_sz = pci_resource_len(pdev, i);
3457 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3458 if (memap_sz)
3459 continue;
3460 ioc->chip_phys = pci_resource_start(pdev, i);
3461 chip_phys = ioc->chip_phys;
3462 memap_sz = pci_resource_len(pdev, i);
3463 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3464 }
3465 }
3466
3467 if (ioc->chip == NULL) {
3468 ioc_err(ioc,
3469 "unable to map adapter memory! or resource not found\n");
3470 r = -EINVAL;
3471 goto out_fail;
3472 }
3473
3474 mpt3sas_base_mask_interrupts(ioc);
3475
3476 r = _base_get_ioc_facts(ioc);
3477 if (r) {
3478 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
3479 if (rc || (_base_get_ioc_facts(ioc)))
3480 goto out_fail;
3481 }
3482
3483 if (!ioc->rdpq_array_enable_assigned) {
3484 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3485 ioc->rdpq_array_enable_assigned = 1;
3486 }
3487
3488 r = _base_enable_msix(ioc);
3489 if (r)
3490 goto out_fail;
3491
3492 if (!ioc->is_driver_loading)
3493 _base_init_irqpolls(ioc);
3494
3495
3496
3497 if (ioc->combined_reply_queue) {
3498
3499
3500
3501
3502
3503
3504 ioc->replyPostRegisterIndex = kcalloc(
3505 ioc->combined_reply_index_count,
3506 sizeof(resource_size_t *), GFP_KERNEL);
3507 if (!ioc->replyPostRegisterIndex) {
3508 ioc_err(ioc,
3509 "allocation for replyPostRegisterIndex failed!\n");
3510 r = -ENOMEM;
3511 goto out_fail;
3512 }
3513
3514 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3515 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3516 ((u8 __force *)&ioc->chip->Doorbell +
3517 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3518 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3519 }
3520 }
3521
3522 if (ioc->is_warpdrive) {
3523 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3524 &ioc->chip->ReplyPostHostIndex;
3525
3526 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3527 ioc->reply_post_host_index[i] =
3528 (resource_size_t __iomem *)
3529 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3530 * 4)));
3531 }
3532
3533 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3534 pr_info("%s: %s enabled: IRQ %d\n",
3535 reply_q->name,
3536 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3537 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3538
3539 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3540 &chip_phys, ioc->chip, memap_sz);
3541 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3542 (unsigned long long)pio_chip, pio_sz);
3543
3544
3545 pci_save_state(pdev);
3546 return 0;
3547
3548 out_fail:
3549 mpt3sas_base_unmap_resources(ioc);
3550 return r;
3551}
3552
3553
3554
3555
3556
3557
3558
3559
3560void *
3561mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3562{
3563 return (void *)(ioc->request + (smid * ioc->request_sz));
3564}
3565
3566
3567
3568
3569
3570
3571
3572
3573void *
3574mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3575{
3576 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3577}
3578
3579
3580
3581
3582
3583
3584
3585
3586__le32
3587mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3588{
3589 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3590 SCSI_SENSE_BUFFERSIZE));
3591}
3592
3593
3594
3595
3596
3597
3598
3599
3600void *
3601mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3602{
3603 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3604}
3605
3606
3607
3608
3609
3610
3611
3612
3613dma_addr_t
3614mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3615{
3616 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3617}
3618
3619
3620
3621
3622
3623
3624
3625
3626void *
3627mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3628{
3629 if (!phys_addr)
3630 return NULL;
3631 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3632}
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643static inline u8
3644_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3645 struct scsi_cmnd *scmd)
3646{
3647
3648 if (ioc->msix_load_balance)
3649 return ioc->reply_queue_count ?
3650 base_mod64(atomic64_add_return(1,
3651 &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3652
3653 if (scmd && ioc->shost->nr_hw_queues > 1) {
3654 u32 tag = blk_mq_unique_tag(scmd->request);
3655
3656 return blk_mq_unique_tag_to_hwq(tag) +
3657 ioc->high_iops_queues;
3658 }
3659
3660 return ioc->cpu_msix_table[raw_smp_processor_id()];
3661}
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673static inline u8
3674_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3675 struct scsi_cmnd *scmd)
3676{
3677
3678
3679
3680
3681
3682
3683 if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3684 return base_mod64((
3685 atomic64_add_return(1, &ioc->high_iops_outstanding) /
3686 MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3687 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3688
3689 return _base_get_msix_index(ioc, scmd);
3690}
3691
3692
3693
3694
3695
3696
3697
3698
3699u16
3700mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3701{
3702 unsigned long flags;
3703 struct request_tracker *request;
3704 u16 smid;
3705
3706 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3707 if (list_empty(&ioc->internal_free_list)) {
3708 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3709 ioc_err(ioc, "%s: smid not available\n", __func__);
3710 return 0;
3711 }
3712
3713 request = list_entry(ioc->internal_free_list.next,
3714 struct request_tracker, tracker_list);
3715 request->cb_idx = cb_idx;
3716 smid = request->smid;
3717 list_del(&request->tracker_list);
3718 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3719 return smid;
3720}
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730u16
3731mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3732 struct scsi_cmnd *scmd)
3733{
3734 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3735 u16 smid;
3736 u32 tag, unique_tag;
3737
3738 unique_tag = blk_mq_unique_tag(scmd->request);
3739 tag = blk_mq_unique_tag_to_tag(unique_tag);
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751 ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
3752
3753 smid = tag + 1;
3754 request->cb_idx = cb_idx;
3755 request->smid = smid;
3756 request->scmd = scmd;
3757 INIT_LIST_HEAD(&request->chain_list);
3758 return smid;
3759}
3760
3761
3762
3763
3764
3765
3766
3767
3768u16
3769mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3770{
3771 unsigned long flags;
3772 struct request_tracker *request;
3773 u16 smid;
3774
3775 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3776 if (list_empty(&ioc->hpr_free_list)) {
3777 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3778 return 0;
3779 }
3780
3781 request = list_entry(ioc->hpr_free_list.next,
3782 struct request_tracker, tracker_list);
3783 request->cb_idx = cb_idx;
3784 smid = request->smid;
3785 list_del(&request->tracker_list);
3786 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3787 return smid;
3788}
3789
3790static void
3791_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3792{
3793
3794
3795
3796 if (ioc->shost_recovery && ioc->pending_io_count) {
3797 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3798 if (ioc->pending_io_count == 0)
3799 wake_up(&ioc->reset_wq);
3800 }
3801}
3802
3803void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3804 struct scsiio_tracker *st)
3805{
3806 if (WARN_ON(st->smid == 0))
3807 return;
3808 st->cb_idx = 0xFF;
3809 st->direct_io = 0;
3810 st->scmd = NULL;
3811 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3812 st->smid = 0;
3813}
3814
3815
3816
3817
3818
3819
3820void
3821mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3822{
3823 unsigned long flags;
3824 int i;
3825
3826 if (smid < ioc->hi_priority_smid) {
3827 struct scsiio_tracker *st;
3828 void *request;
3829
3830 st = _get_st_from_smid(ioc, smid);
3831 if (!st) {
3832 _base_recovery_check(ioc);
3833 return;
3834 }
3835
3836
3837 request = mpt3sas_base_get_msg_frame(ioc, smid);
3838 memset(request, 0, ioc->request_sz);
3839
3840 mpt3sas_base_clear_st(ioc, st);
3841 _base_recovery_check(ioc);
3842 ioc->io_queue_num[smid - 1] = 0;
3843 return;
3844 }
3845
3846 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3847 if (smid < ioc->internal_smid) {
3848
3849 i = smid - ioc->hi_priority_smid;
3850 ioc->hpr_lookup[i].cb_idx = 0xFF;
3851 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3852 } else if (smid <= ioc->hba_queue_depth) {
3853
3854 i = smid - ioc->internal_smid;
3855 ioc->internal_lookup[i].cb_idx = 0xFF;
3856 list_add(&ioc->internal_lookup[i].tracker_list,
3857 &ioc->internal_free_list);
3858 }
3859 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3860}
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872static inline void
3873_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3874 spinlock_t *writeq_lock)
3875{
3876 unsigned long flags;
3877
3878 spin_lock_irqsave(writeq_lock, flags);
3879 __raw_writel((u32)(b), addr);
3880 __raw_writel((u32)(b >> 32), (addr + 4));
3881 spin_unlock_irqrestore(writeq_lock, flags);
3882}
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894#if defined(writeq) && defined(CONFIG_64BIT)
3895static inline void
3896_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3897{
3898 wmb();
3899 __raw_writeq(b, addr);
3900 barrier();
3901}
3902#else
3903static inline void
3904_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3905{
3906 _base_mpi_ep_writeq(b, addr, writeq_lock);
3907}
3908#endif
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918static u8
3919_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3920{
3921 struct scsiio_tracker *st = NULL;
3922
3923 if (smid < ioc->hi_priority_smid)
3924 st = _get_st_from_smid(ioc, smid);
3925
3926 if (st == NULL)
3927 return _base_get_msix_index(ioc, NULL);
3928
3929 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3930 return st->msix_io;
3931}
3932
3933
3934
3935
3936
3937
3938
3939static void
3940_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3941 u16 smid, u16 handle)
3942{
3943 Mpi2RequestDescriptorUnion_t descriptor;
3944 u64 *request = (u64 *)&descriptor;
3945 void *mpi_req_iomem;
3946 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3947
3948 _clone_sg_entries(ioc, (void *) mfp, smid);
3949 mpi_req_iomem = (void __force *)ioc->chip +
3950 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3951 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3952 ioc->request_sz);
3953 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3954 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3955 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3956 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3957 descriptor.SCSIIO.LMID = 0;
3958 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3959 &ioc->scsi_lookup_lock);
3960}
3961
3962
3963
3964
3965
3966
3967
3968static void
3969_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3970{
3971 Mpi2RequestDescriptorUnion_t descriptor;
3972 u64 *request = (u64 *)&descriptor;
3973
3974
3975 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3976 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3977 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3978 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3979 descriptor.SCSIIO.LMID = 0;
3980 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3981 &ioc->scsi_lookup_lock);
3982}
3983
3984
3985
3986
3987
3988
3989
3990static void
3991_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3992 u16 handle)
3993{
3994 Mpi2RequestDescriptorUnion_t descriptor;
3995 u64 *request = (u64 *)&descriptor;
3996
3997 descriptor.SCSIIO.RequestFlags =
3998 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3999 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4000 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4001 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4002 descriptor.SCSIIO.LMID = 0;
4003 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4004 &ioc->scsi_lookup_lock);
4005}
4006
4007
4008
4009
4010
4011
4012
4013static void
4014_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4015 u16 msix_task)
4016{
4017 Mpi2RequestDescriptorUnion_t descriptor;
4018 void *mpi_req_iomem;
4019 u64 *request;
4020
4021 if (ioc->is_mcpu_endpoint) {
4022 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4023
4024
4025 mpi_req_iomem = (void __force *)ioc->chip
4026 + MPI_FRAME_START_OFFSET
4027 + (smid * ioc->request_sz);
4028 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4029 ioc->request_sz);
4030 }
4031
4032 request = (u64 *)&descriptor;
4033
4034 descriptor.HighPriority.RequestFlags =
4035 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4036 descriptor.HighPriority.MSIxIndex = msix_task;
4037 descriptor.HighPriority.SMID = cpu_to_le16(smid);
4038 descriptor.HighPriority.LMID = 0;
4039 descriptor.HighPriority.Reserved1 = 0;
4040 if (ioc->is_mcpu_endpoint)
4041 _base_mpi_ep_writeq(*request,
4042 &ioc->chip->RequestDescriptorPostLow,
4043 &ioc->scsi_lookup_lock);
4044 else
4045 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4046 &ioc->scsi_lookup_lock);
4047}
4048
4049
4050
4051
4052
4053
4054
4055void
4056mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4057{
4058 Mpi2RequestDescriptorUnion_t descriptor;
4059 u64 *request = (u64 *)&descriptor;
4060
4061 descriptor.Default.RequestFlags =
4062 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
4063 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4064 descriptor.Default.SMID = cpu_to_le16(smid);
4065 descriptor.Default.LMID = 0;
4066 descriptor.Default.DescriptorTypeDependent = 0;
4067 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4068 &ioc->scsi_lookup_lock);
4069}
4070
4071
4072
4073
4074
4075
4076static void
4077_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4078{
4079 Mpi2RequestDescriptorUnion_t descriptor;
4080 void *mpi_req_iomem;
4081 u64 *request;
4082
4083 if (ioc->is_mcpu_endpoint) {
4084 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4085
4086 _clone_sg_entries(ioc, (void *) mfp, smid);
4087
4088 mpi_req_iomem = (void __force *)ioc->chip +
4089 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4090 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4091 ioc->request_sz);
4092 }
4093 request = (u64 *)&descriptor;
4094 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4095 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4096 descriptor.Default.SMID = cpu_to_le16(smid);
4097 descriptor.Default.LMID = 0;
4098 descriptor.Default.DescriptorTypeDependent = 0;
4099 if (ioc->is_mcpu_endpoint)
4100 _base_mpi_ep_writeq(*request,
4101 &ioc->chip->RequestDescriptorPostLow,
4102 &ioc->scsi_lookup_lock);
4103 else
4104 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4105 &ioc->scsi_lookup_lock);
4106}
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117static void
4118_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4119 u16 handle)
4120{
4121 Mpi26AtomicRequestDescriptor_t descriptor;
4122 u32 *request = (u32 *)&descriptor;
4123
4124 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4125 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4126 descriptor.SMID = cpu_to_le16(smid);
4127
4128 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4129}
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139static void
4140_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4141 u16 handle)
4142{
4143 Mpi26AtomicRequestDescriptor_t descriptor;
4144 u32 *request = (u32 *)&descriptor;
4145
4146 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4147 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4148 descriptor.SMID = cpu_to_le16(smid);
4149
4150 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4151}
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162static void
4163_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4164 u16 msix_task)
4165{
4166 Mpi26AtomicRequestDescriptor_t descriptor;
4167 u32 *request = (u32 *)&descriptor;
4168
4169 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4170 descriptor.MSIxIndex = msix_task;
4171 descriptor.SMID = cpu_to_le16(smid);
4172
4173 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4174}
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184static void
4185_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4186{
4187 Mpi26AtomicRequestDescriptor_t descriptor;
4188 u32 *request = (u32 *)&descriptor;
4189
4190 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4191 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4192 descriptor.SMID = cpu_to_le16(smid);
4193
4194 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4195}
4196
4197
4198
4199
4200
4201static void
4202_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4203{
4204 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4205 return;
4206
4207 switch (ioc->pdev->subsystem_vendor) {
4208 case PCI_VENDOR_ID_INTEL:
4209 switch (ioc->pdev->device) {
4210 case MPI2_MFGPAGE_DEVID_SAS2008:
4211 switch (ioc->pdev->subsystem_device) {
4212 case MPT2SAS_INTEL_RMS2LL080_SSDID:
4213 ioc_info(ioc, "%s\n",
4214 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4215 break;
4216 case MPT2SAS_INTEL_RMS2LL040_SSDID:
4217 ioc_info(ioc, "%s\n",
4218 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4219 break;
4220 case MPT2SAS_INTEL_SSD910_SSDID:
4221 ioc_info(ioc, "%s\n",
4222 MPT2SAS_INTEL_SSD910_BRANDING);
4223 break;
4224 default:
4225 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4226 ioc->pdev->subsystem_device);
4227 break;
4228 }
4229 break;
4230 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4231 switch (ioc->pdev->subsystem_device) {
4232 case MPT2SAS_INTEL_RS25GB008_SSDID:
4233 ioc_info(ioc, "%s\n",
4234 MPT2SAS_INTEL_RS25GB008_BRANDING);
4235 break;
4236 case MPT2SAS_INTEL_RMS25JB080_SSDID:
4237 ioc_info(ioc, "%s\n",
4238 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4239 break;
4240 case MPT2SAS_INTEL_RMS25JB040_SSDID:
4241 ioc_info(ioc, "%s\n",
4242 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4243 break;
4244 case MPT2SAS_INTEL_RMS25KB080_SSDID:
4245 ioc_info(ioc, "%s\n",
4246 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4247 break;
4248 case MPT2SAS_INTEL_RMS25KB040_SSDID:
4249 ioc_info(ioc, "%s\n",
4250 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4251 break;
4252 case MPT2SAS_INTEL_RMS25LB040_SSDID:
4253 ioc_info(ioc, "%s\n",
4254 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4255 break;
4256 case MPT2SAS_INTEL_RMS25LB080_SSDID:
4257 ioc_info(ioc, "%s\n",
4258 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4259 break;
4260 default:
4261 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4262 ioc->pdev->subsystem_device);
4263 break;
4264 }
4265 break;
4266 case MPI25_MFGPAGE_DEVID_SAS3008:
4267 switch (ioc->pdev->subsystem_device) {
4268 case MPT3SAS_INTEL_RMS3JC080_SSDID:
4269 ioc_info(ioc, "%s\n",
4270 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4271 break;
4272
4273 case MPT3SAS_INTEL_RS3GC008_SSDID:
4274 ioc_info(ioc, "%s\n",
4275 MPT3SAS_INTEL_RS3GC008_BRANDING);
4276 break;
4277 case MPT3SAS_INTEL_RS3FC044_SSDID:
4278 ioc_info(ioc, "%s\n",
4279 MPT3SAS_INTEL_RS3FC044_BRANDING);
4280 break;
4281 case MPT3SAS_INTEL_RS3UC080_SSDID:
4282 ioc_info(ioc, "%s\n",
4283 MPT3SAS_INTEL_RS3UC080_BRANDING);
4284 break;
4285 default:
4286 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4287 ioc->pdev->subsystem_device);
4288 break;
4289 }
4290 break;
4291 default:
4292 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4293 ioc->pdev->subsystem_device);
4294 break;
4295 }
4296 break;
4297 case PCI_VENDOR_ID_DELL:
4298 switch (ioc->pdev->device) {
4299 case MPI2_MFGPAGE_DEVID_SAS2008:
4300 switch (ioc->pdev->subsystem_device) {
4301 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4302 ioc_info(ioc, "%s\n",
4303 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4304 break;
4305 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4306 ioc_info(ioc, "%s\n",
4307 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4308 break;
4309 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4310 ioc_info(ioc, "%s\n",
4311 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4312 break;
4313 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4314 ioc_info(ioc, "%s\n",
4315 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4316 break;
4317 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4318 ioc_info(ioc, "%s\n",
4319 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4320 break;
4321 case MPT2SAS_DELL_PERC_H200_SSDID:
4322 ioc_info(ioc, "%s\n",
4323 MPT2SAS_DELL_PERC_H200_BRANDING);
4324 break;
4325 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4326 ioc_info(ioc, "%s\n",
4327 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4328 break;
4329 default:
4330 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4331 ioc->pdev->subsystem_device);
4332 break;
4333 }
4334 break;
4335 case MPI25_MFGPAGE_DEVID_SAS3008:
4336 switch (ioc->pdev->subsystem_device) {
4337 case MPT3SAS_DELL_12G_HBA_SSDID:
4338 ioc_info(ioc, "%s\n",
4339 MPT3SAS_DELL_12G_HBA_BRANDING);
4340 break;
4341 default:
4342 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4343 ioc->pdev->subsystem_device);
4344 break;
4345 }
4346 break;
4347 default:
4348 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4349 ioc->pdev->subsystem_device);
4350 break;
4351 }
4352 break;
4353 case PCI_VENDOR_ID_CISCO:
4354 switch (ioc->pdev->device) {
4355 case MPI25_MFGPAGE_DEVID_SAS3008:
4356 switch (ioc->pdev->subsystem_device) {
4357 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4358 ioc_info(ioc, "%s\n",
4359 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4360 break;
4361 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4362 ioc_info(ioc, "%s\n",
4363 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4364 break;
4365 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4366 ioc_info(ioc, "%s\n",
4367 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4368 break;
4369 default:
4370 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4371 ioc->pdev->subsystem_device);
4372 break;
4373 }
4374 break;
4375 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4376 switch (ioc->pdev->subsystem_device) {
4377 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4378 ioc_info(ioc, "%s\n",
4379 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4380 break;
4381 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4382 ioc_info(ioc, "%s\n",
4383 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4384 break;
4385 default:
4386 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4387 ioc->pdev->subsystem_device);
4388 break;
4389 }
4390 break;
4391 default:
4392 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4393 ioc->pdev->subsystem_device);
4394 break;
4395 }
4396 break;
4397 case MPT2SAS_HP_3PAR_SSVID:
4398 switch (ioc->pdev->device) {
4399 case MPI2_MFGPAGE_DEVID_SAS2004:
4400 switch (ioc->pdev->subsystem_device) {
4401 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4402 ioc_info(ioc, "%s\n",
4403 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4404 break;
4405 default:
4406 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4407 ioc->pdev->subsystem_device);
4408 break;
4409 }
4410 break;
4411 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4412 switch (ioc->pdev->subsystem_device) {
4413 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4414 ioc_info(ioc, "%s\n",
4415 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4416 break;
4417 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4418 ioc_info(ioc, "%s\n",
4419 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4420 break;
4421 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4422 ioc_info(ioc, "%s\n",
4423 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4424 break;
4425 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4426 ioc_info(ioc, "%s\n",
4427 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4428 break;
4429 default:
4430 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4431 ioc->pdev->subsystem_device);
4432 break;
4433 }
4434 break;
4435 default:
4436 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4437 ioc->pdev->subsystem_device);
4438 break;
4439 }
4440 break;
4441 default:
4442 break;
4443 }
4444}
4445
4446
4447
4448
4449
4450
4451
4452
4453 static int
4454_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4455{
4456 Mpi2FWImageHeader_t *fw_img_hdr;
4457 Mpi26ComponentImageHeader_t *cmp_img_hdr;
4458 Mpi25FWUploadRequest_t *mpi_request;
4459 Mpi2FWUploadReply_t mpi_reply;
4460 int r = 0, issue_diag_reset = 0;
4461 u32 package_version = 0;
4462 void *fwpkg_data = NULL;
4463 dma_addr_t fwpkg_data_dma;
4464 u16 smid, ioc_status;
4465 size_t data_length;
4466
4467 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4468
4469 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4470 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4471 return -EAGAIN;
4472 }
4473
4474 data_length = sizeof(Mpi2FWImageHeader_t);
4475 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4476 &fwpkg_data_dma, GFP_KERNEL);
4477 if (!fwpkg_data) {
4478 ioc_err(ioc,
4479 "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4480 __FILE__, __LINE__, __func__);
4481 return -ENOMEM;
4482 }
4483
4484 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4485 if (!smid) {
4486 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4487 r = -EAGAIN;
4488 goto out;
4489 }
4490
4491 ioc->base_cmds.status = MPT3_CMD_PENDING;
4492 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4493 ioc->base_cmds.smid = smid;
4494 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4495 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4496 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4497 mpi_request->ImageSize = cpu_to_le32(data_length);
4498 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4499 data_length);
4500 init_completion(&ioc->base_cmds.done);
4501 ioc->put_smid_default(ioc, smid);
4502
4503 wait_for_completion_timeout(&ioc->base_cmds.done,
4504 FW_IMG_HDR_READ_TIMEOUT*HZ);
4505 ioc_info(ioc, "%s: complete\n", __func__);
4506 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4507 ioc_err(ioc, "%s: timeout\n", __func__);
4508 _debug_dump_mf(mpi_request,
4509 sizeof(Mpi25FWUploadRequest_t)/4);
4510 issue_diag_reset = 1;
4511 } else {
4512 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4513 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4514 memcpy(&mpi_reply, ioc->base_cmds.reply,
4515 sizeof(Mpi2FWUploadReply_t));
4516 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4517 MPI2_IOCSTATUS_MASK;
4518 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4519 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4520 if (le32_to_cpu(fw_img_hdr->Signature) ==
4521 MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4522 cmp_img_hdr =
4523 (Mpi26ComponentImageHeader_t *)
4524 (fwpkg_data);
4525 package_version =
4526 le32_to_cpu(
4527 cmp_img_hdr->ApplicationSpecific);
4528 } else
4529 package_version =
4530 le32_to_cpu(
4531 fw_img_hdr->PackageVersion.Word);
4532 if (package_version)
4533 ioc_info(ioc,
4534 "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4535 ((package_version) & 0xFF000000) >> 24,
4536 ((package_version) & 0x00FF0000) >> 16,
4537 ((package_version) & 0x0000FF00) >> 8,
4538 (package_version) & 0x000000FF);
4539 } else {
4540 _debug_dump_mf(&mpi_reply,
4541 sizeof(Mpi2FWUploadReply_t)/4);
4542 }
4543 }
4544 }
4545 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4546out:
4547 if (fwpkg_data)
4548 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4549 fwpkg_data_dma);
4550 if (issue_diag_reset) {
4551 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
4552 return -EFAULT;
4553 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
4554 return -EFAULT;
4555 r = -EAGAIN;
4556 }
4557 return r;
4558}
4559
4560
4561
4562
4563
4564static void
4565_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4566{
4567 int i = 0;
4568 char desc[16];
4569 u32 iounit_pg1_flags;
4570 u32 bios_version;
4571
4572 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4573 strncpy(desc, ioc->manu_pg0.ChipName, 16);
4574 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4575 desc,
4576 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4577 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4578 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4579 ioc->facts.FWVersion.Word & 0x000000FF,
4580 ioc->pdev->revision,
4581 (bios_version & 0xFF000000) >> 24,
4582 (bios_version & 0x00FF0000) >> 16,
4583 (bios_version & 0x0000FF00) >> 8,
4584 bios_version & 0x000000FF);
4585
4586 _base_display_OEMs_branding(ioc);
4587
4588 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4589 pr_info("%sNVMe", i ? "," : "");
4590 i++;
4591 }
4592
4593 ioc_info(ioc, "Protocol=(");
4594
4595 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4596 pr_cont("Initiator");
4597 i++;
4598 }
4599
4600 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4601 pr_cont("%sTarget", i ? "," : "");
4602 i++;
4603 }
4604
4605 i = 0;
4606 pr_cont("), Capabilities=(");
4607
4608 if (!ioc->hide_ir_msg) {
4609 if (ioc->facts.IOCCapabilities &
4610 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4611 pr_cont("Raid");
4612 i++;
4613 }
4614 }
4615
4616 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4617 pr_cont("%sTLR", i ? "," : "");
4618 i++;
4619 }
4620
4621 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4622 pr_cont("%sMulticast", i ? "," : "");
4623 i++;
4624 }
4625
4626 if (ioc->facts.IOCCapabilities &
4627 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4628 pr_cont("%sBIDI Target", i ? "," : "");
4629 i++;
4630 }
4631
4632 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4633 pr_cont("%sEEDP", i ? "," : "");
4634 i++;
4635 }
4636
4637 if (ioc->facts.IOCCapabilities &
4638 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4639 pr_cont("%sSnapshot Buffer", i ? "," : "");
4640 i++;
4641 }
4642
4643 if (ioc->facts.IOCCapabilities &
4644 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4645 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4646 i++;
4647 }
4648
4649 if (ioc->facts.IOCCapabilities &
4650 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4651 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4652 i++;
4653 }
4654
4655 if (ioc->facts.IOCCapabilities &
4656 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4657 pr_cont("%sTask Set Full", i ? "," : "");
4658 i++;
4659 }
4660
4661 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4662 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4663 pr_cont("%sNCQ", i ? "," : "");
4664 i++;
4665 }
4666
4667 pr_cont(")\n");
4668}
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680void
4681mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4682 u16 device_missing_delay, u8 io_missing_delay)
4683{
4684 u16 dmd, dmd_new, dmd_orignal;
4685 u8 io_missing_delay_original;
4686 u16 sz;
4687 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4688 Mpi2ConfigReply_t mpi_reply;
4689 u8 num_phys = 0;
4690 u16 ioc_status;
4691
4692 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4693 if (!num_phys)
4694 return;
4695
4696 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4697 sizeof(Mpi2SasIOUnit1PhyData_t));
4698 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4699 if (!sas_iounit_pg1) {
4700 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4701 __FILE__, __LINE__, __func__);
4702 goto out;
4703 }
4704 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4705 sas_iounit_pg1, sz))) {
4706 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4707 __FILE__, __LINE__, __func__);
4708 goto out;
4709 }
4710 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4711 MPI2_IOCSTATUS_MASK;
4712 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4713 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4714 __FILE__, __LINE__, __func__);
4715 goto out;
4716 }
4717
4718
4719 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4720 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4721 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4722 else
4723 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4724 dmd_orignal = dmd;
4725 if (device_missing_delay > 0x7F) {
4726 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4727 device_missing_delay;
4728 dmd = dmd / 16;
4729 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4730 } else
4731 dmd = device_missing_delay;
4732 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4733
4734
4735 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4736 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4737
4738 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4739 sz)) {
4740 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4741 dmd_new = (dmd &
4742 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4743 else
4744 dmd_new =
4745 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4746 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4747 dmd_orignal, dmd_new);
4748 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4749 io_missing_delay_original,
4750 io_missing_delay);
4751 ioc->device_missing_delay = dmd_new;
4752 ioc->io_missing_delay = io_missing_delay;
4753 }
4754
4755out:
4756 kfree(sas_iounit_pg1);
4757}
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767static int
4768_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4769{
4770 Mpi2IOCPage1_t ioc_pg1;
4771 Mpi2ConfigReply_t mpi_reply;
4772 int rc;
4773
4774 rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4775 if (rc)
4776 return rc;
4777 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4778
4779 switch (perf_mode) {
4780 case MPT_PERF_MODE_DEFAULT:
4781 case MPT_PERF_MODE_BALANCED:
4782 if (ioc->high_iops_queues) {
4783 ioc_info(ioc,
4784 "Enable interrupt coalescing only for first\t"
4785 "%d reply queues\n",
4786 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796 ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4797 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4798 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4799 if (rc)
4800 return rc;
4801 ioc_info(ioc, "performance mode: balanced\n");
4802 return 0;
4803 }
4804 fallthrough;
4805 case MPT_PERF_MODE_LATENCY:
4806
4807
4808
4809
4810 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4811 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4812 ioc_pg1.ProductSpecific = 0;
4813 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4814 if (rc)
4815 return rc;
4816 ioc_info(ioc, "performance mode: latency\n");
4817 break;
4818 case MPT_PERF_MODE_IOPS:
4819
4820
4821
4822 ioc_info(ioc,
4823 "performance mode: iops with coalescing timeout: 0x%x\n",
4824 le32_to_cpu(ioc_pg1.CoalescingTimeout));
4825 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4826 ioc_pg1.ProductSpecific = 0;
4827 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4828 if (rc)
4829 return rc;
4830 break;
4831 }
4832 return 0;
4833}
4834
4835
4836
4837
4838
4839
4840
4841
4842static int
4843_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4844{
4845 Mpi26DriverTriggerPage2_t trigger_pg2;
4846 struct SL_WH_EVENT_TRIGGER_T *event_tg;
4847 MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
4848 Mpi2ConfigReply_t mpi_reply;
4849 int r = 0, i = 0;
4850 u16 count = 0;
4851 u16 ioc_status;
4852
4853 r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
4854 &trigger_pg2);
4855 if (r)
4856 return r;
4857
4858 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4859 MPI2_IOCSTATUS_MASK;
4860 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4861 dinitprintk(ioc,
4862 ioc_err(ioc,
4863 "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
4864 __func__, ioc_status));
4865 return 0;
4866 }
4867
4868 if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
4869 count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
4870 count = min_t(u16, NUM_VALID_ENTRIES, count);
4871 ioc->diag_trigger_event.ValidEntries = count;
4872
4873 event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
4874 mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
4875 for (i = 0; i < count; i++) {
4876 event_tg->EventValue = le16_to_cpu(
4877 mpi_event_tg->MPIEventCode);
4878 event_tg->LogEntryQualifier = le16_to_cpu(
4879 mpi_event_tg->MPIEventCodeSpecific);
4880 event_tg++;
4881 mpi_event_tg++;
4882 }
4883 }
4884 return 0;
4885}
4886
4887
4888
4889
4890
4891
4892
4893
4894static int
4895_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4896{
4897 Mpi26DriverTriggerPage3_t trigger_pg3;
4898 struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
4899 MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
4900 Mpi2ConfigReply_t mpi_reply;
4901 int r = 0, i = 0;
4902 u16 count = 0;
4903 u16 ioc_status;
4904
4905 r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
4906 &trigger_pg3);
4907 if (r)
4908 return r;
4909
4910 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4911 MPI2_IOCSTATUS_MASK;
4912 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4913 dinitprintk(ioc,
4914 ioc_err(ioc,
4915 "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
4916 __func__, ioc_status));
4917 return 0;
4918 }
4919
4920 if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
4921 count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
4922 count = min_t(u16, NUM_VALID_ENTRIES, count);
4923 ioc->diag_trigger_scsi.ValidEntries = count;
4924
4925 scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
4926 mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
4927 for (i = 0; i < count; i++) {
4928 scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
4929 scsi_tg->ASC = mpi_scsi_tg->ASC;
4930 scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
4931
4932 scsi_tg++;
4933 mpi_scsi_tg++;
4934 }
4935 }
4936 return 0;
4937}
4938
4939
4940
4941
4942
4943
4944
4945
4946static int
4947_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4948{
4949 Mpi26DriverTriggerPage4_t trigger_pg4;
4950 struct SL_WH_MPI_TRIGGER_T *status_tg;
4951 MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
4952 Mpi2ConfigReply_t mpi_reply;
4953 int r = 0, i = 0;
4954 u16 count = 0;
4955 u16 ioc_status;
4956
4957 r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
4958 &trigger_pg4);
4959 if (r)
4960 return r;
4961
4962 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4963 MPI2_IOCSTATUS_MASK;
4964 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4965 dinitprintk(ioc,
4966 ioc_err(ioc,
4967 "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
4968 __func__, ioc_status));
4969 return 0;
4970 }
4971
4972 if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
4973 count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
4974 count = min_t(u16, NUM_VALID_ENTRIES, count);
4975 ioc->diag_trigger_mpi.ValidEntries = count;
4976
4977 status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
4978 mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
4979
4980 for (i = 0; i < count; i++) {
4981 status_tg->IOCStatus = le16_to_cpu(
4982 mpi_status_tg->IOCStatus);
4983 status_tg->IocLogInfo = le32_to_cpu(
4984 mpi_status_tg->LogInfo);
4985
4986 status_tg++;
4987 mpi_status_tg++;
4988 }
4989 }
4990 return 0;
4991}
4992
4993
4994
4995
4996
4997
4998
4999
5000static int
5001_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5002{
5003 Mpi26DriverTriggerPage1_t trigger_pg1;
5004 Mpi2ConfigReply_t mpi_reply;
5005 int r;
5006 u16 ioc_status;
5007
5008 r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
5009 &trigger_pg1);
5010 if (r)
5011 return r;
5012
5013 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5014 MPI2_IOCSTATUS_MASK;
5015 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5016 dinitprintk(ioc,
5017 ioc_err(ioc,
5018 "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
5019 __func__, ioc_status));
5020 return 0;
5021 }
5022
5023 if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
5024 ioc->diag_trigger_master.MasterData |=
5025 le32_to_cpu(
5026 trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
5027 return 0;
5028}
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042static int
5043_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
5044{
5045 Mpi26DriverTriggerPage0_t trigger_pg0;
5046 int r = 0;
5047 Mpi2ConfigReply_t mpi_reply;
5048 u16 ioc_status;
5049
5050 r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
5051 &trigger_pg0);
5052 if (r)
5053 return r;
5054
5055 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5056 MPI2_IOCSTATUS_MASK;
5057 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5058 return -EFAULT;
5059
5060 *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
5061 return 0;
5062}
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072static int
5073_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5074{
5075 int trigger_flags;
5076 int r;
5077
5078
5079
5080
5081 ioc->diag_trigger_master.MasterData =
5082 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
5083
5084 r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
5085 if (r) {
5086 if (r == -EAGAIN)
5087 return r;
5088
5089
5090
5091
5092 return 0;
5093 }
5094
5095 ioc->supports_trigger_pages = 1;
5096
5097
5098
5099
5100
5101 if ((u16)trigger_flags &
5102 MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
5103 r = _base_get_master_diag_triggers(ioc);
5104 if (r)
5105 return r;
5106 }
5107
5108
5109
5110
5111
5112 if ((u16)trigger_flags &
5113 MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
5114 r = _base_get_event_diag_triggers(ioc);
5115 if (r)
5116 return r;
5117 }
5118
5119
5120
5121
5122
5123 if ((u16)trigger_flags &
5124 MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
5125 r = _base_get_scsi_diag_triggers(ioc);
5126 if (r)
5127 return r;
5128 }
5129
5130
5131
5132
5133 if ((u16)trigger_flags &
5134 MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
5135 r = _base_get_mpi_diag_triggers(ioc);
5136 if (r)
5137 return r;
5138 }
5139 return 0;
5140}
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150static void
5151_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
5152{
5153
5154 if (ioc->diag_trigger_master.MasterData)
5155 mpt3sas_config_update_driver_trigger_pg1(ioc,
5156 &ioc->diag_trigger_master, 1);
5157
5158 if (ioc->diag_trigger_event.ValidEntries)
5159 mpt3sas_config_update_driver_trigger_pg2(ioc,
5160 &ioc->diag_trigger_event, 1);
5161
5162 if (ioc->diag_trigger_scsi.ValidEntries)
5163 mpt3sas_config_update_driver_trigger_pg3(ioc,
5164 &ioc->diag_trigger_scsi, 1);
5165
5166 if (ioc->diag_trigger_mpi.ValidEntries)
5167 mpt3sas_config_update_driver_trigger_pg4(ioc,
5168 &ioc->diag_trigger_mpi, 1);
5169}
5170
5171
5172
5173
5174
5175static int
5176_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
5177{
5178 Mpi2ConfigReply_t mpi_reply;
5179 u32 iounit_pg1_flags;
5180 int tg_flags = 0;
5181 int rc;
5182 ioc->nvme_abort_timeout = 30;
5183
5184 rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
5185 &ioc->manu_pg0);
5186 if (rc)
5187 return rc;
5188 if (ioc->ir_firmware) {
5189 rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
5190 &ioc->manu_pg10);
5191 if (rc)
5192 return rc;
5193 }
5194
5195
5196
5197
5198 rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
5199 &ioc->manu_pg11);
5200 if (rc)
5201 return rc;
5202 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
5203 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
5204 ioc->name);
5205 ioc->manu_pg11.EEDPTagMode &= ~0x3;
5206 ioc->manu_pg11.EEDPTagMode |= 0x1;
5207 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
5208 &ioc->manu_pg11);
5209 }
5210 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
5211 ioc->tm_custom_handling = 1;
5212 else {
5213 ioc->tm_custom_handling = 0;
5214 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
5215 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
5216 else if (ioc->manu_pg11.NVMeAbortTO >
5217 NVME_TASK_ABORT_MAX_TIMEOUT)
5218 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
5219 else
5220 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
5221 }
5222 ioc->time_sync_interval =
5223 ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
5224 if (ioc->time_sync_interval) {
5225 if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
5226 ioc->time_sync_interval =
5227 ioc->time_sync_interval * SECONDS_PER_HOUR;
5228 else
5229 ioc->time_sync_interval =
5230 ioc->time_sync_interval * SECONDS_PER_MIN;
5231 dinitprintk(ioc, ioc_info(ioc,
5232 "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
5233 ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
5234 MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
5235 } else {
5236 if (ioc->is_gen35_ioc)
5237 ioc_warn(ioc,
5238 "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
5239 }
5240 rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
5241 if (rc)
5242 return rc;
5243 rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
5244 if (rc)
5245 return rc;
5246 rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
5247 if (rc)
5248 return rc;
5249 rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
5250 if (rc)
5251 return rc;
5252 rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5253 if (rc)
5254 return rc;
5255 rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
5256 if (rc)
5257 return rc;
5258 _base_display_ioc_capabilities(ioc);
5259
5260
5261
5262
5263
5264 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
5265 if ((ioc->facts.IOCCapabilities &
5266 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
5267 iounit_pg1_flags &=
5268 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5269 else
5270 iounit_pg1_flags |=
5271 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5272 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
5273 rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5274 if (rc)
5275 return rc;
5276
5277 if (ioc->iounit_pg8.NumSensors)
5278 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
5279 if (ioc->is_aero_ioc) {
5280 rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
5281 if (rc)
5282 return rc;
5283 }
5284 if (ioc->is_gen35_ioc) {
5285 if (ioc->is_driver_loading) {
5286 rc = _base_get_diag_triggers(ioc);
5287 if (rc)
5288 return rc;
5289 } else {
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301 _base_check_for_trigger_pages_support(ioc, &tg_flags);
5302 if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
5303 _base_update_diag_trigger_pages(ioc);
5304 else if (ioc->supports_trigger_pages &&
5305 tg_flags == -EFAULT)
5306 ioc->supports_trigger_pages = 0;
5307 }
5308 }
5309 return 0;
5310}
5311
5312
5313
5314
5315
5316
5317
5318void
5319mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
5320{
5321 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
5322
5323
5324 list_for_each_entry_safe(enclosure_dev,
5325 enclosure_dev_next, &ioc->enclosure_list, list) {
5326 list_del(&enclosure_dev->list);
5327 kfree(enclosure_dev);
5328 }
5329}
5330
5331
5332
5333
5334
5335
5336
5337static void
5338_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5339{
5340 int i = 0;
5341 int j = 0;
5342 int dma_alloc_count = 0;
5343 struct chain_tracker *ct;
5344 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5345
5346 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5347
5348 if (ioc->request) {
5349 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
5350 ioc->request, ioc->request_dma);
5351 dexitprintk(ioc,
5352 ioc_info(ioc, "request_pool(0x%p): free\n",
5353 ioc->request));
5354 ioc->request = NULL;
5355 }
5356
5357 if (ioc->sense) {
5358 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5359 dma_pool_destroy(ioc->sense_dma_pool);
5360 dexitprintk(ioc,
5361 ioc_info(ioc, "sense_pool(0x%p): free\n",
5362 ioc->sense));
5363 ioc->sense = NULL;
5364 }
5365
5366 if (ioc->reply) {
5367 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
5368 dma_pool_destroy(ioc->reply_dma_pool);
5369 dexitprintk(ioc,
5370 ioc_info(ioc, "reply_pool(0x%p): free\n",
5371 ioc->reply));
5372 ioc->reply = NULL;
5373 }
5374
5375 if (ioc->reply_free) {
5376 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
5377 ioc->reply_free_dma);
5378 dma_pool_destroy(ioc->reply_free_dma_pool);
5379 dexitprintk(ioc,
5380 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
5381 ioc->reply_free));
5382 ioc->reply_free = NULL;
5383 }
5384
5385 if (ioc->reply_post) {
5386 dma_alloc_count = DIV_ROUND_UP(count,
5387 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5388 for (i = 0; i < count; i++) {
5389 if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
5390 && dma_alloc_count) {
5391 if (ioc->reply_post[i].reply_post_free) {
5392 dma_pool_free(
5393 ioc->reply_post_free_dma_pool,
5394 ioc->reply_post[i].reply_post_free,
5395 ioc->reply_post[i].reply_post_free_dma);
5396 dexitprintk(ioc, ioc_info(ioc,
5397 "reply_post_free_pool(0x%p): free\n",
5398 ioc->reply_post[i].reply_post_free));
5399 ioc->reply_post[i].reply_post_free =
5400 NULL;
5401 }
5402 --dma_alloc_count;
5403 }
5404 }
5405 dma_pool_destroy(ioc->reply_post_free_dma_pool);
5406 if (ioc->reply_post_free_array &&
5407 ioc->rdpq_array_enable) {
5408 dma_pool_free(ioc->reply_post_free_array_dma_pool,
5409 ioc->reply_post_free_array,
5410 ioc->reply_post_free_array_dma);
5411 ioc->reply_post_free_array = NULL;
5412 }
5413 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
5414 kfree(ioc->reply_post);
5415 }
5416
5417 if (ioc->pcie_sgl_dma_pool) {
5418 for (i = 0; i < ioc->scsiio_depth; i++) {
5419 dma_pool_free(ioc->pcie_sgl_dma_pool,
5420 ioc->pcie_sg_lookup[i].pcie_sgl,
5421 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5422 ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
5423 }
5424 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
5425 }
5426 if (ioc->config_page) {
5427 dexitprintk(ioc,
5428 ioc_info(ioc, "config_page(0x%p): free\n",
5429 ioc->config_page));
5430 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
5431 ioc->config_page, ioc->config_page_dma);
5432 }
5433
5434 kfree(ioc->hpr_lookup);
5435 ioc->hpr_lookup = NULL;
5436 kfree(ioc->internal_lookup);
5437 ioc->internal_lookup = NULL;
5438 if (ioc->chain_lookup) {
5439 for (i = 0; i < ioc->scsiio_depth; i++) {
5440 for (j = ioc->chains_per_prp_buffer;
5441 j < ioc->chains_needed_per_io; j++) {
5442 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5443 if (ct && ct->chain_buffer)
5444 dma_pool_free(ioc->chain_dma_pool,
5445 ct->chain_buffer,
5446 ct->chain_buffer_dma);
5447 }
5448 kfree(ioc->chain_lookup[i].chains_per_smid);
5449 }
5450 dma_pool_destroy(ioc->chain_dma_pool);
5451 kfree(ioc->chain_lookup);
5452 ioc->chain_lookup = NULL;
5453 }
5454
5455 kfree(ioc->io_queue_num);
5456 ioc->io_queue_num = NULL;
5457}
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469static int
5470mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
5471{
5472 long reply_pool_end_address;
5473
5474 reply_pool_end_address = reply_pool_start_address + pool_sz;
5475
5476 if (upper_32_bits(reply_pool_start_address) ==
5477 upper_32_bits(reply_pool_end_address))
5478 return 1;
5479 else
5480 return 0;
5481}
5482
5483
5484
5485
5486
5487
5488
5489static inline int
5490_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
5491{
5492 int reduce_sz = 64;
5493
5494 if ((ioc->hba_queue_depth - reduce_sz) >
5495 (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
5496 ioc->hba_queue_depth -= reduce_sz;
5497 return 0;
5498 } else
5499 return -ENOMEM;
5500}
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511static int
5512_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5513{
5514 int i = 0, j = 0;
5515 struct chain_tracker *ct;
5516
5517 ioc->pcie_sgl_dma_pool =
5518 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
5519 ioc->page_size, 0);
5520 if (!ioc->pcie_sgl_dma_pool) {
5521 ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5522 return -ENOMEM;
5523 }
5524
5525 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5526 ioc->chains_per_prp_buffer =
5527 min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
5528 for (i = 0; i < ioc->scsiio_depth; i++) {
5529 ioc->pcie_sg_lookup[i].pcie_sgl =
5530 dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5531 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5532 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5533 ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5534 return -EAGAIN;
5535 }
5536
5537 if (!mpt3sas_check_same_4gb_region(
5538 (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
5539 ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
5540 ioc->pcie_sg_lookup[i].pcie_sgl,
5541 (unsigned long long)
5542 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5543 ioc->use_32bit_dma = true;
5544 return -EAGAIN;
5545 }
5546
5547 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5548 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5549 ct->chain_buffer =
5550 ioc->pcie_sg_lookup[i].pcie_sgl +
5551 (j * ioc->chain_segment_sz);
5552 ct->chain_buffer_dma =
5553 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5554 (j * ioc->chain_segment_sz);
5555 }
5556 }
5557 dinitprintk(ioc, ioc_info(ioc,
5558 "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5559 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
5560 dinitprintk(ioc, ioc_info(ioc,
5561 "Number of chains can fit in a PRP page(%d)\n",
5562 ioc->chains_per_prp_buffer));
5563 return 0;
5564}
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574static int
5575_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5576{
5577 int i = 0, j = 0;
5578 struct chain_tracker *ctr;
5579
5580 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5581 ioc->chain_segment_sz, 16, 0);
5582 if (!ioc->chain_dma_pool)
5583 return -ENOMEM;
5584
5585 for (i = 0; i < ioc->scsiio_depth; i++) {
5586 for (j = ioc->chains_per_prp_buffer;
5587 j < ioc->chains_needed_per_io; j++) {
5588 ctr = &ioc->chain_lookup[i].chains_per_smid[j];
5589 ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
5590 GFP_KERNEL, &ctr->chain_buffer_dma);
5591 if (!ctr->chain_buffer)
5592 return -EAGAIN;
5593 if (!mpt3sas_check_same_4gb_region((long)
5594 ctr->chain_buffer, ioc->chain_segment_sz)) {
5595 ioc_err(ioc,
5596 "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
5597 ctr->chain_buffer,
5598 (unsigned long long)ctr->chain_buffer_dma);
5599 ioc->use_32bit_dma = true;
5600 return -EAGAIN;
5601 }
5602 }
5603 }
5604 dinitprintk(ioc, ioc_info(ioc,
5605 "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
5606 ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
5607 (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
5608 ioc->chain_segment_sz))/1024));
5609 return 0;
5610}
5611
5612
5613
5614
5615
5616
5617
5618
5619static int
5620_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5621{
5622 ioc->sense_dma_pool =
5623 dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
5624 if (!ioc->sense_dma_pool)
5625 return -ENOMEM;
5626 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
5627 GFP_KERNEL, &ioc->sense_dma);
5628 if (!ioc->sense)
5629 return -EAGAIN;
5630 if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
5631 dinitprintk(ioc, pr_err(
5632 "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
5633 ioc->sense, (unsigned long long) ioc->sense_dma));
5634 ioc->use_32bit_dma = true;
5635 return -EAGAIN;
5636 }
5637 ioc_info(ioc,
5638 "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
5639 ioc->sense, (unsigned long long)ioc->sense_dma,
5640 ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
5641 return 0;
5642}
5643
5644
5645
5646
5647
5648
5649
5650
5651static int
5652_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5653{
5654
5655 ioc->reply_dma_pool = dma_pool_create("reply pool",
5656 &ioc->pdev->dev, sz, 4, 0);
5657 if (!ioc->reply_dma_pool)
5658 return -ENOMEM;
5659 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5660 &ioc->reply_dma);
5661 if (!ioc->reply)
5662 return -EAGAIN;
5663 if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
5664 dinitprintk(ioc, pr_err(
5665 "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
5666 ioc->reply, (unsigned long long) ioc->reply_dma));
5667 ioc->use_32bit_dma = true;
5668 return -EAGAIN;
5669 }
5670 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5671 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5672 ioc_info(ioc,
5673 "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5674 ioc->reply, (unsigned long long)ioc->reply_dma,
5675 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
5676 return 0;
5677}
5678
5679
5680
5681
5682
5683
5684
5685
5686static int
5687_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5688{
5689
5690 ioc->reply_free_dma_pool = dma_pool_create(
5691 "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
5692 if (!ioc->reply_free_dma_pool)
5693 return -ENOMEM;
5694 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
5695 GFP_KERNEL, &ioc->reply_free_dma);
5696 if (!ioc->reply_free)
5697 return -EAGAIN;
5698 if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
5699 dinitprintk(ioc,
5700 pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
5701 ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
5702 ioc->use_32bit_dma = true;
5703 return -EAGAIN;
5704 }
5705 memset(ioc->reply_free, 0, sz);
5706 dinitprintk(ioc, ioc_info(ioc,
5707 "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5708 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
5709 dinitprintk(ioc, ioc_info(ioc,
5710 "reply_free_dma (0x%llx)\n",
5711 (unsigned long long)ioc->reply_free_dma));
5712 return 0;
5713}
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723static int
5724_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
5725 u32 reply_post_free_array_sz)
5726{
5727 ioc->reply_post_free_array_dma_pool =
5728 dma_pool_create("reply_post_free_array pool",
5729 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5730 if (!ioc->reply_post_free_array_dma_pool)
5731 return -ENOMEM;
5732 ioc->reply_post_free_array =
5733 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5734 GFP_KERNEL, &ioc->reply_post_free_array_dma);
5735 if (!ioc->reply_post_free_array)
5736 return -EAGAIN;
5737 if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array,
5738 reply_post_free_array_sz)) {
5739 dinitprintk(ioc, pr_err(
5740 "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
5741 ioc->reply_free,
5742 (unsigned long long) ioc->reply_free_dma));
5743 ioc->use_32bit_dma = true;
5744 return -EAGAIN;
5745 }
5746 return 0;
5747}
5748
5749
5750
5751
5752
5753
5754
5755static int
5756base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
5757{
5758 int i = 0;
5759 u32 dma_alloc_count = 0;
5760 int reply_post_free_sz = ioc->reply_post_queue_depth *
5761 sizeof(Mpi2DefaultReplyDescriptor_t);
5762 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5763
5764 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
5765 GFP_KERNEL);
5766 if (!ioc->reply_post)
5767 return -ENOMEM;
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777 dma_alloc_count = DIV_ROUND_UP(count,
5778 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5779 ioc->reply_post_free_dma_pool =
5780 dma_pool_create("reply_post_free pool",
5781 &ioc->pdev->dev, sz, 16, 0);
5782 if (!ioc->reply_post_free_dma_pool)
5783 return -ENOMEM;
5784 for (i = 0; i < count; i++) {
5785 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
5786 ioc->reply_post[i].reply_post_free =
5787 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
5788 GFP_KERNEL,
5789 &ioc->reply_post[i].reply_post_free_dma);
5790 if (!ioc->reply_post[i].reply_post_free)
5791 return -ENOMEM;
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801 if (!mpt3sas_check_same_4gb_region(
5802 (long)ioc->reply_post[i].reply_post_free, sz)) {
5803 dinitprintk(ioc,
5804 ioc_err(ioc, "bad Replypost free pool(0x%p)"
5805 "reply_post_free_dma = (0x%llx)\n",
5806 ioc->reply_post[i].reply_post_free,
5807 (unsigned long long)
5808 ioc->reply_post[i].reply_post_free_dma));
5809 return -EAGAIN;
5810 }
5811 dma_alloc_count--;
5812
5813 } else {
5814 ioc->reply_post[i].reply_post_free =
5815 (Mpi2ReplyDescriptorsUnion_t *)
5816 ((long)ioc->reply_post[i-1].reply_post_free
5817 + reply_post_free_sz);
5818 ioc->reply_post[i].reply_post_free_dma =
5819 (dma_addr_t)
5820 (ioc->reply_post[i-1].reply_post_free_dma +
5821 reply_post_free_sz);
5822 }
5823 }
5824 return 0;
5825}
5826
5827
5828
5829
5830
5831
5832
5833static int
5834_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5835{
5836 struct mpt3sas_facts *facts;
5837 u16 max_sge_elements;
5838 u16 chains_needed_per_io;
5839 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
5840 u32 retry_sz;
5841 u32 rdpq_sz = 0, sense_sz = 0;
5842 u16 max_request_credit, nvme_blocks_needed;
5843 unsigned short sg_tablesize;
5844 u16 sge_size;
5845 int i;
5846 int ret = 0, rc = 0;
5847
5848 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5849
5850
5851 retry_sz = 0;
5852 facts = &ioc->facts;
5853
5854
5855 if (max_sgl_entries != -1)
5856 sg_tablesize = max_sgl_entries;
5857 else {
5858 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
5859 sg_tablesize = MPT2SAS_SG_DEPTH;
5860 else
5861 sg_tablesize = MPT3SAS_SG_DEPTH;
5862 }
5863
5864
5865 if (reset_devices)
5866 sg_tablesize = min_t(unsigned short, sg_tablesize,
5867 MPT_KDUMP_MIN_PHYS_SEGMENTS);
5868
5869 if (ioc->is_mcpu_endpoint)
5870 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5871 else {
5872 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
5873 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5874 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
5875 sg_tablesize = min_t(unsigned short, sg_tablesize,
5876 SG_MAX_SEGMENTS);
5877 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
5878 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
5879 }
5880 ioc->shost->sg_tablesize = sg_tablesize;
5881 }
5882
5883 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
5884 (facts->RequestCredit / 4));
5885 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
5886 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
5887 INTERNAL_SCSIIO_CMDS_COUNT)) {
5888 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
5889 facts->RequestCredit);
5890 return -ENOMEM;
5891 }
5892 ioc->internal_depth = 10;
5893 }
5894
5895 ioc->hi_priority_depth = ioc->internal_depth - (5);
5896
5897 if (max_queue_depth != -1 && max_queue_depth != 0) {
5898 max_request_credit = min_t(u16, max_queue_depth +
5899 ioc->internal_depth, facts->RequestCredit);
5900 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
5901 max_request_credit = MAX_HBA_QUEUE_DEPTH;
5902 } else if (reset_devices)
5903 max_request_credit = min_t(u16, facts->RequestCredit,
5904 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
5905 else
5906 max_request_credit = min_t(u16, facts->RequestCredit,
5907 MAX_HBA_QUEUE_DEPTH);
5908
5909
5910
5911
5912
5913 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
5914
5915
5916 ioc->request_sz = facts->IOCRequestFrameSize * 4;
5917
5918
5919 ioc->reply_sz = facts->ReplyFrameSize * 4;
5920
5921
5922 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5923 if (facts->IOCMaxChainSegmentSize)
5924 ioc->chain_segment_sz =
5925 facts->IOCMaxChainSegmentSize *
5926 MAX_CHAIN_ELEMT_SZ;
5927 else
5928
5929 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
5930 MAX_CHAIN_ELEMT_SZ;
5931 } else
5932 ioc->chain_segment_sz = ioc->request_sz;
5933
5934
5935 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
5936
5937 retry_allocation:
5938 total_sz = 0;
5939
5940 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
5941 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
5942 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
5943
5944
5945 max_sge_elements = ioc->chain_segment_sz - sge_size;
5946 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
5947
5948
5949
5950
5951 chains_needed_per_io = ((ioc->shost->sg_tablesize -
5952 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
5953 + 1;
5954 if (chains_needed_per_io > facts->MaxChainDepth) {
5955 chains_needed_per_io = facts->MaxChainDepth;
5956 ioc->shost->sg_tablesize = min_t(u16,
5957 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
5958 * chains_needed_per_io), ioc->shost->sg_tablesize);
5959 }
5960 ioc->chains_needed_per_io = chains_needed_per_io;
5961
5962
5963 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5964
5965
5966 if (ioc->is_mcpu_endpoint)
5967 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
5968 else {
5969
5970 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
5971 ioc->reply_free_queue_depth + 1;
5972
5973 if (ioc->reply_post_queue_depth % 16)
5974 ioc->reply_post_queue_depth += 16 -
5975 (ioc->reply_post_queue_depth % 16);
5976 }
5977
5978 if (ioc->reply_post_queue_depth >
5979 facts->MaxReplyDescriptorPostQueueDepth) {
5980 ioc->reply_post_queue_depth =
5981 facts->MaxReplyDescriptorPostQueueDepth -
5982 (facts->MaxReplyDescriptorPostQueueDepth % 16);
5983 ioc->hba_queue_depth =
5984 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
5985 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5986 }
5987
5988 ioc_info(ioc,
5989 "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
5990 "sge_per_io(%d), chains_per_io(%d)\n",
5991 ioc->max_sges_in_main_message,
5992 ioc->max_sges_in_chain_message,
5993 ioc->shost->sg_tablesize,
5994 ioc->chains_needed_per_io);
5995
5996
5997 reply_post_free_sz = ioc->reply_post_queue_depth *
5998 sizeof(Mpi2DefaultReplyDescriptor_t);
5999 rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
6000 if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
6001 || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
6002 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
6003 ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
6004 if (ret == -EAGAIN) {
6005
6006
6007
6008
6009 _base_release_memory_pools(ioc);
6010 ioc->use_32bit_dma = true;
6011 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6012 ioc_err(ioc,
6013 "32 DMA mask failed %s\n", pci_name(ioc->pdev));
6014 return -ENODEV;
6015 }
6016 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
6017 return -ENOMEM;
6018 } else if (ret == -ENOMEM)
6019 return -ENOMEM;
6020 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
6021 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
6022 ioc->scsiio_depth = ioc->hba_queue_depth -
6023 ioc->hi_priority_depth - ioc->internal_depth;
6024
6025
6026
6027
6028 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
6029 dinitprintk(ioc,
6030 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
6031 ioc->shost->can_queue));
6032
6033
6034
6035
6036 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
6037 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
6038
6039
6040 sz += (ioc->hi_priority_depth * ioc->request_sz);
6041
6042
6043 sz += (ioc->internal_depth * ioc->request_sz);
6044
6045 ioc->request_dma_sz = sz;
6046 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
6047 &ioc->request_dma, GFP_KERNEL);
6048 if (!ioc->request) {
6049 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
6050 ioc->hba_queue_depth, ioc->chains_needed_per_io,
6051 ioc->request_sz, sz / 1024);
6052 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
6053 goto out;
6054 retry_sz = 64;
6055 ioc->hba_queue_depth -= retry_sz;
6056 _base_release_memory_pools(ioc);
6057 goto retry_allocation;
6058 }
6059
6060 if (retry_sz)
6061 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
6062 ioc->hba_queue_depth, ioc->chains_needed_per_io,
6063 ioc->request_sz, sz / 1024);
6064
6065
6066 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
6067 ioc->request_sz);
6068 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
6069 ioc->request_sz);
6070
6071
6072 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
6073 ioc->request_sz);
6074 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
6075 ioc->request_sz);
6076
6077 ioc_info(ioc,
6078 "request pool(0x%p) - dma(0x%llx): "
6079 "depth(%d), frame_size(%d), pool_size(%d kB)\n",
6080 ioc->request, (unsigned long long) ioc->request_dma,
6081 ioc->hba_queue_depth, ioc->request_sz,
6082 (ioc->hba_queue_depth * ioc->request_sz) / 1024);
6083
6084 total_sz += sz;
6085
6086 dinitprintk(ioc,
6087 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
6088 ioc->request, ioc->scsiio_depth));
6089
6090 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
6091 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
6092 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
6093 if (!ioc->chain_lookup) {
6094 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
6095 goto out;
6096 }
6097
6098 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
6099 for (i = 0; i < ioc->scsiio_depth; i++) {
6100 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
6101 if (!ioc->chain_lookup[i].chains_per_smid) {
6102 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
6103 goto out;
6104 }
6105 }
6106
6107
6108 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
6109 sizeof(struct request_tracker), GFP_KERNEL);
6110 if (!ioc->hpr_lookup) {
6111 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
6112 goto out;
6113 }
6114 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
6115 dinitprintk(ioc,
6116 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
6117 ioc->hi_priority,
6118 ioc->hi_priority_depth, ioc->hi_priority_smid));
6119
6120
6121 ioc->internal_lookup = kcalloc(ioc->internal_depth,
6122 sizeof(struct request_tracker), GFP_KERNEL);
6123 if (!ioc->internal_lookup) {
6124 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
6125 goto out;
6126 }
6127 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
6128 dinitprintk(ioc,
6129 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
6130 ioc->internal,
6131 ioc->internal_depth, ioc->internal_smid));
6132
6133 ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
6134 sizeof(u16), GFP_KERNEL);
6135 if (!ioc->io_queue_num)
6136 goto out;
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151 ioc->chains_per_prp_buffer = 0;
6152 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
6153 nvme_blocks_needed =
6154 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
6155 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
6156 nvme_blocks_needed++;
6157
6158 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
6159 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
6160 if (!ioc->pcie_sg_lookup) {
6161 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
6162 goto out;
6163 }
6164 sz = nvme_blocks_needed * ioc->page_size;
6165 rc = _base_allocate_pcie_sgl_pool(ioc, sz);
6166 if (rc == -ENOMEM)
6167 return -ENOMEM;
6168 else if (rc == -EAGAIN)
6169 goto try_32bit_dma;
6170 total_sz += sz * ioc->scsiio_depth;
6171 }
6172
6173 rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
6174 if (rc == -ENOMEM)
6175 return -ENOMEM;
6176 else if (rc == -EAGAIN)
6177 goto try_32bit_dma;
6178 total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
6179 ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
6180 dinitprintk(ioc,
6181 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
6182 ioc->chain_depth, ioc->chain_segment_sz,
6183 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
6184
6185 sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
6186 rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
6187 if (rc == -ENOMEM)
6188 return -ENOMEM;
6189 else if (rc == -EAGAIN)
6190 goto try_32bit_dma;
6191 total_sz += sense_sz;
6192 ioc_info(ioc,
6193 "sense pool(0x%p)- dma(0x%llx): depth(%d),"
6194 "element_size(%d), pool_size(%d kB)\n",
6195 ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
6196 SCSI_SENSE_BUFFERSIZE, sz / 1024);
6197
6198 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
6199 rc = _base_allocate_reply_pool(ioc, sz);
6200 if (rc == -ENOMEM)
6201 return -ENOMEM;
6202 else if (rc == -EAGAIN)
6203 goto try_32bit_dma;
6204 total_sz += sz;
6205
6206
6207 sz = ioc->reply_free_queue_depth * 4;
6208 rc = _base_allocate_reply_free_dma_pool(ioc, sz);
6209 if (rc == -ENOMEM)
6210 return -ENOMEM;
6211 else if (rc == -EAGAIN)
6212 goto try_32bit_dma;
6213 dinitprintk(ioc,
6214 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
6215 (unsigned long long)ioc->reply_free_dma));
6216 total_sz += sz;
6217 if (ioc->rdpq_array_enable) {
6218 reply_post_free_array_sz = ioc->reply_queue_count *
6219 sizeof(Mpi2IOCInitRDPQArrayEntry);
6220 rc = _base_allocate_reply_post_free_array(ioc,
6221 reply_post_free_array_sz);
6222 if (rc == -ENOMEM)
6223 return -ENOMEM;
6224 else if (rc == -EAGAIN)
6225 goto try_32bit_dma;
6226 }
6227 ioc->config_page_sz = 512;
6228 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
6229 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
6230 if (!ioc->config_page) {
6231 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
6232 goto out;
6233 }
6234
6235 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
6236 ioc->config_page, (unsigned long long)ioc->config_page_dma,
6237 ioc->config_page_sz);
6238 total_sz += ioc->config_page_sz;
6239
6240 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
6241 total_sz / 1024);
6242 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
6243 ioc->shost->can_queue, facts->RequestCredit);
6244 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
6245 ioc->shost->sg_tablesize);
6246 return 0;
6247
6248try_32bit_dma:
6249 _base_release_memory_pools(ioc);
6250 if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
6251
6252 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6253 pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
6254 pci_name(ioc->pdev));
6255 return -ENODEV;
6256 }
6257 } else if (_base_reduce_hba_queue_depth(ioc) != 0)
6258 return -ENOMEM;
6259 goto retry_allocation;
6260
6261 out:
6262 return -ENOMEM;
6263}
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273u32
6274mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
6275{
6276 u32 s, sc;
6277
6278 s = ioc->base_readl(&ioc->chip->Doorbell);
6279 sc = s & MPI2_IOC_STATE_MASK;
6280 return cooked ? sc : s;
6281}
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291static int
6292_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
6293{
6294 u32 count, cntdn;
6295 u32 current_state;
6296
6297 count = 0;
6298 cntdn = 1000 * timeout;
6299 do {
6300 current_state = mpt3sas_base_get_iocstate(ioc, 1);
6301 if (current_state == ioc_state)
6302 return 0;
6303 if (count && current_state == MPI2_IOC_STATE_FAULT)
6304 break;
6305 if (count && current_state == MPI2_IOC_STATE_COREDUMP)
6306 break;
6307
6308 usleep_range(1000, 1500);
6309 count++;
6310 } while (--cntdn);
6311
6312 return current_state;
6313}
6314
6315
6316
6317
6318
6319
6320
6321static inline void
6322_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
6323{
6324 unsigned int i, sz = 256;
6325 u32 __iomem *reg = (u32 __iomem *)ioc->chip;
6326
6327 ioc_info(ioc, "System Register set:\n");
6328 for (i = 0; i < (sz / sizeof(u32)); i++)
6329 pr_info("%08x: %08x\n", (i * 4), readl(®[i]));
6330}
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343static int
6344_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6345{
6346 u32 cntdn, count;
6347 u32 int_status;
6348
6349 count = 0;
6350 cntdn = 1000 * timeout;
6351 do {
6352 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6353 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6354 dhsprintk(ioc,
6355 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6356 __func__, count, timeout));
6357 return 0;
6358 }
6359
6360 usleep_range(1000, 1500);
6361 count++;
6362 } while (--cntdn);
6363
6364 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6365 __func__, count, int_status);
6366 return -EFAULT;
6367}
6368
6369static int
6370_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6371{
6372 u32 cntdn, count;
6373 u32 int_status;
6374
6375 count = 0;
6376 cntdn = 2000 * timeout;
6377 do {
6378 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6379 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6380 dhsprintk(ioc,
6381 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6382 __func__, count, timeout));
6383 return 0;
6384 }
6385
6386 udelay(500);
6387 count++;
6388 } while (--cntdn);
6389
6390 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6391 __func__, count, int_status);
6392 return -EFAULT;
6393
6394}
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406static int
6407_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
6408{
6409 u32 cntdn, count;
6410 u32 int_status;
6411 u32 doorbell;
6412
6413 count = 0;
6414 cntdn = 1000 * timeout;
6415 do {
6416 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6417 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
6418 dhsprintk(ioc,
6419 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6420 __func__, count, timeout));
6421 return 0;
6422 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6423 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
6424 if ((doorbell & MPI2_IOC_STATE_MASK) ==
6425 MPI2_IOC_STATE_FAULT) {
6426 mpt3sas_print_fault_code(ioc, doorbell);
6427 return -EFAULT;
6428 }
6429 if ((doorbell & MPI2_IOC_STATE_MASK) ==
6430 MPI2_IOC_STATE_COREDUMP) {
6431 mpt3sas_print_coredump_info(ioc, doorbell);
6432 return -EFAULT;
6433 }
6434 } else if (int_status == 0xFFFFFFFF)
6435 goto out;
6436
6437 usleep_range(1000, 1500);
6438 count++;
6439 } while (--cntdn);
6440
6441 out:
6442 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6443 __func__, count, int_status);
6444 return -EFAULT;
6445}
6446
6447
6448
6449
6450
6451
6452
6453
6454static int
6455_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
6456{
6457 u32 cntdn, count;
6458 u32 doorbell_reg;
6459
6460 count = 0;
6461 cntdn = 1000 * timeout;
6462 do {
6463 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
6464 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
6465 dhsprintk(ioc,
6466 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6467 __func__, count, timeout));
6468 return 0;
6469 }
6470
6471 usleep_range(1000, 1500);
6472 count++;
6473 } while (--cntdn);
6474
6475 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
6476 __func__, count, doorbell_reg);
6477 return -EFAULT;
6478}
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488static int
6489_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
6490{
6491 u32 ioc_state;
6492 int r = 0;
6493 unsigned long flags;
6494
6495 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
6496 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
6497 return -EFAULT;
6498 }
6499
6500 if (!(ioc->facts.IOCCapabilities &
6501 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
6502 return -EFAULT;
6503
6504 ioc_info(ioc, "sending message unit reset !!\n");
6505
6506 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
6507 &ioc->chip->Doorbell);
6508 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
6509 r = -EFAULT;
6510 goto out;
6511 }
6512
6513 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6514 if (ioc_state) {
6515 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6516 __func__, ioc_state);
6517 r = -EFAULT;
6518 goto out;
6519 }
6520 out:
6521 if (r != 0) {
6522 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6523 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6524
6525
6526
6527
6528 if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6529 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
6530 ioc->fault_reset_work_q == NULL)) {
6531 spin_unlock_irqrestore(
6532 &ioc->ioc_reset_in_progress_lock, flags);
6533 mpt3sas_print_coredump_info(ioc, ioc_state);
6534 mpt3sas_base_wait_for_coredump_completion(ioc,
6535 __func__);
6536 spin_lock_irqsave(
6537 &ioc->ioc_reset_in_progress_lock, flags);
6538 }
6539 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6540 }
6541 ioc_info(ioc, "message unit reset: %s\n",
6542 r == 0 ? "SUCCESS" : "FAILED");
6543 return r;
6544}
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556int
6557mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
6558{
6559 int wait_state_count = 0;
6560 u32 ioc_state;
6561
6562 do {
6563 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
6564 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
6565 break;
6566
6567
6568
6569
6570
6571
6572
6573
6574 if (ioc->is_driver_loading)
6575 return -ETIME;
6576
6577 ssleep(1);
6578 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
6579 __func__, ++wait_state_count);
6580 } while (--timeout);
6581 if (!timeout) {
6582 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
6583 return -EFAULT;
6584 }
6585 if (wait_state_count)
6586 ioc_info(ioc, "ioc is operational\n");
6587 return 0;
6588}
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601static int
6602_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
6603 u32 *request, int reply_bytes, u16 *reply, int timeout)
6604{
6605 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
6606 int i;
6607 u8 failed;
6608 __le32 *mfp;
6609
6610
6611 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
6612 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
6613 return -EFAULT;
6614 }
6615
6616
6617 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
6618 MPI2_HIS_IOC2SYS_DB_STATUS)
6619 writel(0, &ioc->chip->HostInterruptStatus);
6620
6621
6622 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
6623 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
6624 &ioc->chip->Doorbell);
6625
6626 if ((_base_spin_on_doorbell_int(ioc, 5))) {
6627 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6628 __LINE__);
6629 return -EFAULT;
6630 }
6631 writel(0, &ioc->chip->HostInterruptStatus);
6632
6633 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
6634 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
6635 __LINE__);
6636 return -EFAULT;
6637 }
6638
6639
6640 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
6641 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
6642 if ((_base_wait_for_doorbell_ack(ioc, 5)))
6643 failed = 1;
6644 }
6645
6646 if (failed) {
6647 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
6648 __LINE__);
6649 return -EFAULT;
6650 }
6651
6652
6653 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
6654 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6655 __LINE__);
6656 return -EFAULT;
6657 }
6658
6659
6660 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
6661 & MPI2_DOORBELL_DATA_MASK);
6662 writel(0, &ioc->chip->HostInterruptStatus);
6663 if ((_base_wait_for_doorbell_int(ioc, 5))) {
6664 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6665 __LINE__);
6666 return -EFAULT;
6667 }
6668 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
6669 & MPI2_DOORBELL_DATA_MASK);
6670 writel(0, &ioc->chip->HostInterruptStatus);
6671
6672 for (i = 2; i < default_reply->MsgLength * 2; i++) {
6673 if ((_base_wait_for_doorbell_int(ioc, 5))) {
6674 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6675 __LINE__);
6676 return -EFAULT;
6677 }
6678 if (i >= reply_bytes/2)
6679 ioc->base_readl(&ioc->chip->Doorbell);
6680 else
6681 reply[i] = le16_to_cpu(
6682 ioc->base_readl(&ioc->chip->Doorbell)
6683 & MPI2_DOORBELL_DATA_MASK);
6684 writel(0, &ioc->chip->HostInterruptStatus);
6685 }
6686
6687 _base_wait_for_doorbell_int(ioc, 5);
6688 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
6689 dhsprintk(ioc,
6690 ioc_info(ioc, "doorbell is in use (line=%d)\n",
6691 __LINE__));
6692 }
6693 writel(0, &ioc->chip->HostInterruptStatus);
6694
6695 if (ioc->logging_level & MPT_DEBUG_INIT) {
6696 mfp = (__le32 *)reply;
6697 pr_info("\toffset:data\n");
6698 for (i = 0; i < reply_bytes/4; i++)
6699 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6700 le32_to_cpu(mfp[i]));
6701 }
6702 return 0;
6703}
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716
6717
6718
6719int
6720mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
6721 Mpi2SasIoUnitControlReply_t *mpi_reply,
6722 Mpi2SasIoUnitControlRequest_t *mpi_request)
6723{
6724 u16 smid;
6725 u8 issue_reset = 0;
6726 int rc;
6727 void *request;
6728
6729 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6730
6731 mutex_lock(&ioc->base_cmds.mutex);
6732
6733 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6734 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6735 rc = -EAGAIN;
6736 goto out;
6737 }
6738
6739 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6740 if (rc)
6741 goto out;
6742
6743 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6744 if (!smid) {
6745 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6746 rc = -EAGAIN;
6747 goto out;
6748 }
6749
6750 rc = 0;
6751 ioc->base_cmds.status = MPT3_CMD_PENDING;
6752 request = mpt3sas_base_get_msg_frame(ioc, smid);
6753 ioc->base_cmds.smid = smid;
6754 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
6755 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6756 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
6757 ioc->ioc_link_reset_in_progress = 1;
6758 init_completion(&ioc->base_cmds.done);
6759 ioc->put_smid_default(ioc, smid);
6760 wait_for_completion_timeout(&ioc->base_cmds.done,
6761 msecs_to_jiffies(10000));
6762 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6763 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
6764 ioc->ioc_link_reset_in_progress)
6765 ioc->ioc_link_reset_in_progress = 0;
6766 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6767 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
6768 mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
6769 issue_reset);
6770 goto issue_host_reset;
6771 }
6772 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6773 memcpy(mpi_reply, ioc->base_cmds.reply,
6774 sizeof(Mpi2SasIoUnitControlReply_t));
6775 else
6776 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
6777 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6778 goto out;
6779
6780 issue_host_reset:
6781 if (issue_reset)
6782 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6783 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6784 rc = -EFAULT;
6785 out:
6786 mutex_unlock(&ioc->base_cmds.mutex);
6787 return rc;
6788}
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801int
6802mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
6803 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
6804{
6805 u16 smid;
6806 u8 issue_reset = 0;
6807 int rc;
6808 void *request;
6809
6810 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6811
6812 mutex_lock(&ioc->base_cmds.mutex);
6813
6814 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6815 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6816 rc = -EAGAIN;
6817 goto out;
6818 }
6819
6820 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6821 if (rc)
6822 goto out;
6823
6824 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6825 if (!smid) {
6826 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6827 rc = -EAGAIN;
6828 goto out;
6829 }
6830
6831 rc = 0;
6832 ioc->base_cmds.status = MPT3_CMD_PENDING;
6833 request = mpt3sas_base_get_msg_frame(ioc, smid);
6834 ioc->base_cmds.smid = smid;
6835 memset(request, 0, ioc->request_sz);
6836 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
6837 init_completion(&ioc->base_cmds.done);
6838 ioc->put_smid_default(ioc, smid);
6839 wait_for_completion_timeout(&ioc->base_cmds.done,
6840 msecs_to_jiffies(10000));
6841 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6842 mpt3sas_check_cmd_timeout(ioc,
6843 ioc->base_cmds.status, mpi_request,
6844 sizeof(Mpi2SepRequest_t)/4, issue_reset);
6845 goto issue_host_reset;
6846 }
6847 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6848 memcpy(mpi_reply, ioc->base_cmds.reply,
6849 sizeof(Mpi2SepReply_t));
6850 else
6851 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
6852 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6853 goto out;
6854
6855 issue_host_reset:
6856 if (issue_reset)
6857 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6858 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6859 rc = -EFAULT;
6860 out:
6861 mutex_unlock(&ioc->base_cmds.mutex);
6862 return rc;
6863}
6864
6865
6866
6867
6868
6869
6870
6871
6872static int
6873_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
6874{
6875 Mpi2PortFactsRequest_t mpi_request;
6876 Mpi2PortFactsReply_t mpi_reply;
6877 struct mpt3sas_port_facts *pfacts;
6878 int mpi_reply_sz, mpi_request_sz, r;
6879
6880 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6881
6882 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
6883 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
6884 memset(&mpi_request, 0, mpi_request_sz);
6885 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
6886 mpi_request.PortNumber = port;
6887 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6888 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6889
6890 if (r != 0) {
6891 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6892 return r;
6893 }
6894
6895 pfacts = &ioc->pfacts[port];
6896 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
6897 pfacts->PortNumber = mpi_reply.PortNumber;
6898 pfacts->VP_ID = mpi_reply.VP_ID;
6899 pfacts->VF_ID = mpi_reply.VF_ID;
6900 pfacts->MaxPostedCmdBuffers =
6901 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
6902
6903 return 0;
6904}
6905
6906
6907
6908
6909
6910
6911
6912
6913static int
6914_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
6915{
6916 u32 ioc_state;
6917 int rc;
6918
6919 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6920
6921 if (ioc->pci_error_recovery) {
6922 dfailprintk(ioc,
6923 ioc_info(ioc, "%s: host in pci error recovery\n",
6924 __func__));
6925 return -EFAULT;
6926 }
6927
6928 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6929 dhsprintk(ioc,
6930 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6931 __func__, ioc_state));
6932
6933 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6934 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6935 return 0;
6936
6937 if (ioc_state & MPI2_DOORBELL_USED) {
6938 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6939 goto issue_diag_reset;
6940 }
6941
6942 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6943 mpt3sas_print_fault_code(ioc, ioc_state &
6944 MPI2_DOORBELL_DATA_MASK);
6945 goto issue_diag_reset;
6946 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6947 MPI2_IOC_STATE_COREDUMP) {
6948 ioc_info(ioc,
6949 "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
6950 __func__, ioc_state);
6951 return -EFAULT;
6952 }
6953
6954 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6955 if (ioc_state) {
6956 dfailprintk(ioc,
6957 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6958 __func__, ioc_state));
6959 return -EFAULT;
6960 }
6961
6962 issue_diag_reset:
6963 rc = _base_diag_reset(ioc);
6964 return rc;
6965}
6966
6967
6968
6969
6970
6971
6972
6973static int
6974_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6975{
6976 Mpi2IOCFactsRequest_t mpi_request;
6977 Mpi2IOCFactsReply_t mpi_reply;
6978 struct mpt3sas_facts *facts;
6979 int mpi_reply_sz, mpi_request_sz, r;
6980
6981 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6982
6983 r = _base_wait_for_iocstate(ioc, 10);
6984 if (r) {
6985 dfailprintk(ioc,
6986 ioc_info(ioc, "%s: failed getting to correct state\n",
6987 __func__));
6988 return r;
6989 }
6990 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6991 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6992 memset(&mpi_request, 0, mpi_request_sz);
6993 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6994 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6995 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6996
6997 if (r != 0) {
6998 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6999 return r;
7000 }
7001
7002 facts = &ioc->facts;
7003 memset(facts, 0, sizeof(struct mpt3sas_facts));
7004 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
7005 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
7006 facts->VP_ID = mpi_reply.VP_ID;
7007 facts->VF_ID = mpi_reply.VF_ID;
7008 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
7009 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
7010 facts->WhoInit = mpi_reply.WhoInit;
7011 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
7012 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
7013 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
7014 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
7015 ioc->combined_reply_queue = 0;
7016 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
7017 facts->MaxReplyDescriptorPostQueueDepth =
7018 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
7019 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
7020 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
7021 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
7022 ioc->ir_firmware = 1;
7023 if ((facts->IOCCapabilities &
7024 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
7025 ioc->rdpq_array_capable = 1;
7026 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
7027 && ioc->is_aero_ioc)
7028 ioc->atomic_desc_capable = 1;
7029 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
7030 facts->IOCRequestFrameSize =
7031 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
7032 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7033 facts->IOCMaxChainSegmentSize =
7034 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
7035 }
7036 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
7037 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
7038 ioc->shost->max_id = -1;
7039 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
7040 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
7041 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
7042 facts->HighPriorityCredit =
7043 le16_to_cpu(mpi_reply.HighPriorityCredit);
7044 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
7045 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
7046 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
7047
7048
7049
7050
7051 ioc->page_size = 1 << facts->CurrentHostPageSize;
7052 if (ioc->page_size == 1) {
7053 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
7054 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
7055 }
7056 dinitprintk(ioc,
7057 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
7058 facts->CurrentHostPageSize));
7059
7060 dinitprintk(ioc,
7061 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
7062 facts->RequestCredit, facts->MaxChainDepth));
7063 dinitprintk(ioc,
7064 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
7065 facts->IOCRequestFrameSize * 4,
7066 facts->ReplyFrameSize * 4));
7067 return 0;
7068}
7069
7070
7071
7072
7073
7074
7075
7076static int
7077_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
7078{
7079 Mpi2IOCInitRequest_t mpi_request;
7080 Mpi2IOCInitReply_t mpi_reply;
7081 int i, r = 0;
7082 ktime_t current_time;
7083 u16 ioc_status;
7084 u32 reply_post_free_array_sz = 0;
7085
7086 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7087
7088 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
7089 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
7090 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
7091 mpi_request.VF_ID = 0;
7092 mpi_request.VP_ID = 0;
7093 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
7094 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
7095 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
7096
7097 if (_base_is_controller_msix_enabled(ioc))
7098 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
7099 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
7100 mpi_request.ReplyDescriptorPostQueueDepth =
7101 cpu_to_le16(ioc->reply_post_queue_depth);
7102 mpi_request.ReplyFreeQueueDepth =
7103 cpu_to_le16(ioc->reply_free_queue_depth);
7104
7105 mpi_request.SenseBufferAddressHigh =
7106 cpu_to_le32((u64)ioc->sense_dma >> 32);
7107 mpi_request.SystemReplyAddressHigh =
7108 cpu_to_le32((u64)ioc->reply_dma >> 32);
7109 mpi_request.SystemRequestFrameBaseAddress =
7110 cpu_to_le64((u64)ioc->request_dma);
7111 mpi_request.ReplyFreeQueueAddress =
7112 cpu_to_le64((u64)ioc->reply_free_dma);
7113
7114 if (ioc->rdpq_array_enable) {
7115 reply_post_free_array_sz = ioc->reply_queue_count *
7116 sizeof(Mpi2IOCInitRDPQArrayEntry);
7117 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
7118 for (i = 0; i < ioc->reply_queue_count; i++)
7119 ioc->reply_post_free_array[i].RDPQBaseAddress =
7120 cpu_to_le64(
7121 (u64)ioc->reply_post[i].reply_post_free_dma);
7122 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
7123 mpi_request.ReplyDescriptorPostQueueAddress =
7124 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
7125 } else {
7126 mpi_request.ReplyDescriptorPostQueueAddress =
7127 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
7128 }
7129
7130
7131
7132
7133 mpi_request.ConfigurationFlags |=
7134 cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
7135
7136
7137
7138
7139 current_time = ktime_get_real();
7140 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
7141
7142 if (ioc->logging_level & MPT_DEBUG_INIT) {
7143 __le32 *mfp;
7144 int i;
7145
7146 mfp = (__le32 *)&mpi_request;
7147 ioc_info(ioc, "\toffset:data\n");
7148 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
7149 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7150 le32_to_cpu(mfp[i]));
7151 }
7152
7153 r = _base_handshake_req_reply_wait(ioc,
7154 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
7155 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
7156
7157 if (r != 0) {
7158 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7159 return r;
7160 }
7161
7162 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7163 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
7164 mpi_reply.IOCLogInfo) {
7165 ioc_err(ioc, "%s: failed\n", __func__);
7166 r = -EIO;
7167 }
7168
7169
7170 ioc->timestamp_update_count = 0;
7171 return r;
7172}
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184u8
7185mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
7186 u32 reply)
7187{
7188 MPI2DefaultReply_t *mpi_reply;
7189 u16 ioc_status;
7190
7191 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
7192 return 1;
7193
7194 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7195 if (!mpi_reply)
7196 return 1;
7197
7198 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
7199 return 1;
7200
7201 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
7202 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
7203 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
7204 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
7205 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7206 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7207 ioc->port_enable_failed = 1;
7208
7209 if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
7210 ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
7211 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
7212 mpt3sas_port_enable_complete(ioc);
7213 return 1;
7214 } else {
7215 ioc->start_scan_failed = ioc_status;
7216 ioc->start_scan = 0;
7217 return 1;
7218 }
7219 }
7220 complete(&ioc->port_enable_cmds.done);
7221 return 1;
7222}
7223
7224
7225
7226
7227
7228
7229
7230static int
7231_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
7232{
7233 Mpi2PortEnableRequest_t *mpi_request;
7234 Mpi2PortEnableReply_t *mpi_reply;
7235 int r = 0;
7236 u16 smid;
7237 u16 ioc_status;
7238
7239 ioc_info(ioc, "sending port enable !!\n");
7240
7241 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7242 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7243 return -EAGAIN;
7244 }
7245
7246 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7247 if (!smid) {
7248 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7249 return -EAGAIN;
7250 }
7251
7252 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7253 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7254 ioc->port_enable_cmds.smid = smid;
7255 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7256 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7257
7258 init_completion(&ioc->port_enable_cmds.done);
7259 ioc->put_smid_default(ioc, smid);
7260 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
7261 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
7262 ioc_err(ioc, "%s: timeout\n", __func__);
7263 _debug_dump_mf(mpi_request,
7264 sizeof(Mpi2PortEnableRequest_t)/4);
7265 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
7266 r = -EFAULT;
7267 else
7268 r = -ETIME;
7269 goto out;
7270 }
7271
7272 mpi_reply = ioc->port_enable_cmds.reply;
7273 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7274 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7275 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
7276 __func__, ioc_status);
7277 r = -EFAULT;
7278 goto out;
7279 }
7280
7281 out:
7282 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7283 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
7284 return r;
7285}
7286
7287
7288
7289
7290
7291
7292
7293int
7294mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
7295{
7296 Mpi2PortEnableRequest_t *mpi_request;
7297 u16 smid;
7298
7299 ioc_info(ioc, "sending port enable !!\n");
7300
7301 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7302 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7303 return -EAGAIN;
7304 }
7305
7306 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7307 if (!smid) {
7308 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7309 return -EAGAIN;
7310 }
7311 ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
7312 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7313 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
7314 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7315 ioc->port_enable_cmds.smid = smid;
7316 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7317 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7318
7319 ioc->put_smid_default(ioc, smid);
7320 return 0;
7321}
7322
7323
7324
7325
7326
7327
7328
7329
7330
7331
7332static int
7333_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
7334{
7335
7336
7337
7338
7339
7340
7341 if (ioc->ir_firmware)
7342 return 1;
7343
7344
7345 if (!ioc->bios_pg3.BiosVersion)
7346 return 0;
7347
7348
7349
7350
7351
7352
7353
7354
7355 if ((ioc->bios_pg2.CurrentBootDeviceForm &
7356 MPI2_BIOSPAGE2_FORM_MASK) ==
7357 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7358
7359 (ioc->bios_pg2.ReqBootDeviceForm &
7360 MPI2_BIOSPAGE2_FORM_MASK) ==
7361 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7362
7363 (ioc->bios_pg2.ReqAltBootDeviceForm &
7364 MPI2_BIOSPAGE2_FORM_MASK) ==
7365 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
7366 return 0;
7367
7368 return 1;
7369}
7370
7371
7372
7373
7374
7375
7376
7377
7378static void
7379_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
7380{
7381 u32 desired_event;
7382
7383 if (event >= 128)
7384 return;
7385
7386 desired_event = (1 << (event % 32));
7387
7388 if (event < 32)
7389 ioc->event_masks[0] &= ~desired_event;
7390 else if (event < 64)
7391 ioc->event_masks[1] &= ~desired_event;
7392 else if (event < 96)
7393 ioc->event_masks[2] &= ~desired_event;
7394 else if (event < 128)
7395 ioc->event_masks[3] &= ~desired_event;
7396}
7397
7398
7399
7400
7401
7402
7403
7404static int
7405_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
7406{
7407 Mpi2EventNotificationRequest_t *mpi_request;
7408 u16 smid;
7409 int r = 0;
7410 int i, issue_diag_reset = 0;
7411
7412 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7413
7414 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7415 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7416 return -EAGAIN;
7417 }
7418
7419 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7420 if (!smid) {
7421 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7422 return -EAGAIN;
7423 }
7424 ioc->base_cmds.status = MPT3_CMD_PENDING;
7425 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7426 ioc->base_cmds.smid = smid;
7427 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
7428 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
7429 mpi_request->VF_ID = 0;
7430 mpi_request->VP_ID = 0;
7431 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7432 mpi_request->EventMasks[i] =
7433 cpu_to_le32(ioc->event_masks[i]);
7434 init_completion(&ioc->base_cmds.done);
7435 ioc->put_smid_default(ioc, smid);
7436 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
7437 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7438 ioc_err(ioc, "%s: timeout\n", __func__);
7439 _debug_dump_mf(mpi_request,
7440 sizeof(Mpi2EventNotificationRequest_t)/4);
7441 if (ioc->base_cmds.status & MPT3_CMD_RESET)
7442 r = -EFAULT;
7443 else
7444 issue_diag_reset = 1;
7445
7446 } else
7447 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
7448 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7449
7450 if (issue_diag_reset) {
7451 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
7452 return -EFAULT;
7453 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
7454 return -EFAULT;
7455 r = -EAGAIN;
7456 }
7457 return r;
7458}
7459
7460
7461
7462
7463
7464
7465
7466
7467
7468void
7469mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
7470{
7471 int i, j;
7472 u32 event_mask, desired_event;
7473 u8 send_update_to_fw;
7474
7475 for (i = 0, send_update_to_fw = 0; i <
7476 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
7477 event_mask = ~event_type[i];
7478 desired_event = 1;
7479 for (j = 0; j < 32; j++) {
7480 if (!(event_mask & desired_event) &&
7481 (ioc->event_masks[i] & desired_event)) {
7482 ioc->event_masks[i] &= ~desired_event;
7483 send_update_to_fw = 1;
7484 }
7485 desired_event = (desired_event << 1);
7486 }
7487 }
7488
7489 if (!send_update_to_fw)
7490 return;
7491
7492 mutex_lock(&ioc->base_cmds.mutex);
7493 _base_event_notification(ioc);
7494 mutex_unlock(&ioc->base_cmds.mutex);
7495}
7496
7497
7498
7499
7500
7501
7502
7503static int
7504_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
7505{
7506 u32 host_diagnostic;
7507 u32 ioc_state;
7508 u32 count;
7509 u32 hcb_size;
7510
7511 ioc_info(ioc, "sending diag reset !!\n");
7512
7513 pci_cfg_access_lock(ioc->pdev);
7514
7515 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
7516
7517 count = 0;
7518 do {
7519
7520
7521
7522 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
7523 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7524 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
7525 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
7526 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
7527 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
7528 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
7529 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
7530
7531
7532 msleep(100);
7533
7534 if (count++ > 20) {
7535 ioc_info(ioc,
7536 "Stop writing magic sequence after 20 retries\n");
7537 _base_dump_reg_set(ioc);
7538 goto out;
7539 }
7540
7541 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7542 drsprintk(ioc,
7543 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
7544 count, host_diagnostic));
7545
7546 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
7547
7548 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
7549
7550 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
7551 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
7552 &ioc->chip->HostDiagnostic);
7553
7554
7555 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
7556
7557
7558 for (count = 0; count < (300000000 /
7559 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
7560
7561 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7562
7563 if (host_diagnostic == 0xFFFFFFFF) {
7564 ioc_info(ioc,
7565 "Invalid host diagnostic register value\n");
7566 _base_dump_reg_set(ioc);
7567 goto out;
7568 }
7569 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
7570 break;
7571
7572 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
7573 }
7574
7575 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
7576
7577 drsprintk(ioc,
7578 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
7579 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
7580 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
7581 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
7582
7583 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
7584 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
7585 &ioc->chip->HCBSize);
7586 }
7587
7588 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
7589 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
7590 &ioc->chip->HostDiagnostic);
7591
7592 drsprintk(ioc,
7593 ioc_info(ioc, "disable writes to the diagnostic register\n"));
7594 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7595
7596 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
7597 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
7598 if (ioc_state) {
7599 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7600 __func__, ioc_state);
7601 _base_dump_reg_set(ioc);
7602 goto out;
7603 }
7604
7605 pci_cfg_access_unlock(ioc->pdev);
7606 ioc_info(ioc, "diag reset: SUCCESS\n");
7607 return 0;
7608
7609 out:
7610 pci_cfg_access_unlock(ioc->pdev);
7611 ioc_err(ioc, "diag reset: FAILED\n");
7612 return -EFAULT;
7613}
7614
7615
7616
7617
7618
7619
7620
7621
7622int
7623mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
7624{
7625 u32 ioc_state;
7626 int rc;
7627 int count;
7628
7629 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7630
7631 if (ioc->pci_error_recovery)
7632 return 0;
7633
7634 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7635 dhsprintk(ioc,
7636 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
7637 __func__, ioc_state));
7638
7639
7640 count = 0;
7641 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
7642 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
7643 MPI2_IOC_STATE_READY) {
7644 if (count++ == 10) {
7645 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7646 __func__, ioc_state);
7647 return -EFAULT;
7648 }
7649 ssleep(1);
7650 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7651 }
7652 }
7653
7654 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
7655 return 0;
7656
7657 if (ioc_state & MPI2_DOORBELL_USED) {
7658 ioc_info(ioc, "unexpected doorbell active!\n");
7659 goto issue_diag_reset;
7660 }
7661
7662 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
7663 mpt3sas_print_fault_code(ioc, ioc_state &
7664 MPI2_DOORBELL_DATA_MASK);
7665 goto issue_diag_reset;
7666 }
7667
7668 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
7669
7670
7671
7672
7673
7674
7675
7676 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
7677 mpt3sas_print_coredump_info(ioc, ioc_state &
7678 MPI2_DOORBELL_DATA_MASK);
7679 mpt3sas_base_wait_for_coredump_completion(ioc,
7680 __func__);
7681 }
7682 goto issue_diag_reset;
7683 }
7684
7685 if (type == FORCE_BIG_HAMMER)
7686 goto issue_diag_reset;
7687
7688 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
7689 if (!(_base_send_ioc_reset(ioc,
7690 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
7691 return 0;
7692 }
7693
7694 issue_diag_reset:
7695 rc = _base_diag_reset(ioc);
7696 return rc;
7697}
7698
7699
7700
7701
7702
7703
7704
7705static int
7706_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
7707{
7708 int r, i, index, rc;
7709 unsigned long flags;
7710 u32 reply_address;
7711 u16 smid;
7712 struct _tr_list *delayed_tr, *delayed_tr_next;
7713 struct _sc_list *delayed_sc, *delayed_sc_next;
7714 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7715 u8 hide_flag;
7716 struct adapter_reply_queue *reply_q;
7717 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
7718
7719 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7720
7721
7722 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7723 &ioc->delayed_tr_list, list) {
7724 list_del(&delayed_tr->list);
7725 kfree(delayed_tr);
7726 }
7727
7728
7729 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7730 &ioc->delayed_tr_volume_list, list) {
7731 list_del(&delayed_tr->list);
7732 kfree(delayed_tr);
7733 }
7734
7735 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
7736 &ioc->delayed_sc_list, list) {
7737 list_del(&delayed_sc->list);
7738 kfree(delayed_sc);
7739 }
7740
7741 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
7742 &ioc->delayed_event_ack_list, list) {
7743 list_del(&delayed_event_ack->list);
7744 kfree(delayed_event_ack);
7745 }
7746
7747 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7748
7749
7750 INIT_LIST_HEAD(&ioc->hpr_free_list);
7751 smid = ioc->hi_priority_smid;
7752 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
7753 ioc->hpr_lookup[i].cb_idx = 0xFF;
7754 ioc->hpr_lookup[i].smid = smid;
7755 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
7756 &ioc->hpr_free_list);
7757 }
7758
7759
7760 INIT_LIST_HEAD(&ioc->internal_free_list);
7761 smid = ioc->internal_smid;
7762 for (i = 0; i < ioc->internal_depth; i++, smid++) {
7763 ioc->internal_lookup[i].cb_idx = 0xFF;
7764 ioc->internal_lookup[i].smid = smid;
7765 list_add_tail(&ioc->internal_lookup[i].tracker_list,
7766 &ioc->internal_free_list);
7767 }
7768
7769 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7770
7771
7772 for (i = 0, reply_address = (u32)ioc->reply_dma ;
7773 i < ioc->reply_free_queue_depth ; i++, reply_address +=
7774 ioc->reply_sz) {
7775 ioc->reply_free[i] = cpu_to_le32(reply_address);
7776 if (ioc->is_mcpu_endpoint)
7777 _base_clone_reply_to_sys_mem(ioc,
7778 reply_address, i);
7779 }
7780
7781
7782 if (ioc->is_driver_loading)
7783 _base_assign_reply_queues(ioc);
7784
7785
7786 index = 0;
7787 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
7788 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7789
7790
7791
7792
7793 if (ioc->rdpq_array_enable) {
7794 reply_q->reply_post_free =
7795 ioc->reply_post[index++].reply_post_free;
7796 } else {
7797 reply_q->reply_post_free = reply_post_free_contig;
7798 reply_post_free_contig += ioc->reply_post_queue_depth;
7799 }
7800
7801 reply_q->reply_post_host_index = 0;
7802 for (i = 0; i < ioc->reply_post_queue_depth; i++)
7803 reply_q->reply_post_free[i].Words =
7804 cpu_to_le64(ULLONG_MAX);
7805 if (!_base_is_controller_msix_enabled(ioc))
7806 goto skip_init_reply_post_free_queue;
7807 }
7808 skip_init_reply_post_free_queue:
7809
7810 r = _base_send_ioc_init(ioc);
7811 if (r) {
7812
7813
7814
7815
7816
7817 if (!ioc->is_driver_loading)
7818 return r;
7819
7820 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
7821 if (rc || (_base_send_ioc_init(ioc)))
7822 return r;
7823 }
7824
7825
7826 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
7827 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
7828
7829
7830 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7831 if (ioc->combined_reply_queue)
7832 writel((reply_q->msix_index & 7)<<
7833 MPI2_RPHI_MSIX_INDEX_SHIFT,
7834 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
7835 else
7836 writel(reply_q->msix_index <<
7837 MPI2_RPHI_MSIX_INDEX_SHIFT,
7838 &ioc->chip->ReplyPostHostIndex);
7839
7840 if (!_base_is_controller_msix_enabled(ioc))
7841 goto skip_init_reply_post_host_index;
7842 }
7843
7844 skip_init_reply_post_host_index:
7845
7846 mpt3sas_base_unmask_interrupts(ioc);
7847
7848 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7849 r = _base_display_fwpkg_version(ioc);
7850 if (r)
7851 return r;
7852 }
7853
7854 r = _base_static_config_pages(ioc);
7855 if (r)
7856 return r;
7857
7858 r = _base_event_notification(ioc);
7859 if (r)
7860 return r;
7861
7862 if (!ioc->shost_recovery) {
7863
7864 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
7865 == 0x80) {
7866 hide_flag = (u8) (
7867 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
7868 MFG_PAGE10_HIDE_SSDS_MASK);
7869 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
7870 ioc->mfg_pg10_hide_flag = hide_flag;
7871 }
7872
7873 ioc->wait_for_discovery_to_complete =
7874 _base_determine_wait_on_discovery(ioc);
7875
7876 return r;
7877 }
7878
7879 r = _base_send_port_enable(ioc);
7880 if (r)
7881 return r;
7882
7883 return r;
7884}
7885
7886
7887
7888
7889
7890void
7891mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
7892{
7893 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7894
7895
7896 mutex_lock(&ioc->pci_access_mutex);
7897 if (ioc->chip_phys && ioc->chip) {
7898 mpt3sas_base_mask_interrupts(ioc);
7899 ioc->shost_recovery = 1;
7900 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
7901 ioc->shost_recovery = 0;
7902 }
7903
7904 mpt3sas_base_unmap_resources(ioc);
7905 mutex_unlock(&ioc->pci_access_mutex);
7906 return;
7907}
7908
7909
7910
7911
7912
7913
7914
7915int
7916mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7917{
7918 int r, i, rc;
7919 int cpu_id, last_cpu_id = 0;
7920
7921 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7922
7923
7924 ioc->cpu_count = num_online_cpus();
7925 for_each_online_cpu(cpu_id)
7926 last_cpu_id = cpu_id;
7927 ioc->cpu_msix_table_sz = last_cpu_id + 1;
7928 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
7929 ioc->reply_queue_count = 1;
7930 if (!ioc->cpu_msix_table) {
7931 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
7932 r = -ENOMEM;
7933 goto out_free_resources;
7934 }
7935
7936 if (ioc->is_warpdrive) {
7937 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
7938 sizeof(resource_size_t *), GFP_KERNEL);
7939 if (!ioc->reply_post_host_index) {
7940 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
7941 r = -ENOMEM;
7942 goto out_free_resources;
7943 }
7944 }
7945
7946 ioc->smp_affinity_enable = smp_affinity_enable;
7947
7948 ioc->rdpq_array_enable_assigned = 0;
7949 ioc->use_32bit_dma = false;
7950 ioc->dma_mask = 64;
7951 if (ioc->is_aero_ioc)
7952 ioc->base_readl = &_base_readl_aero;
7953 else
7954 ioc->base_readl = &_base_readl;
7955 r = mpt3sas_base_map_resources(ioc);
7956 if (r)
7957 goto out_free_resources;
7958
7959 pci_set_drvdata(ioc->pdev, ioc->shost);
7960 r = _base_get_ioc_facts(ioc);
7961 if (r) {
7962 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
7963 if (rc || (_base_get_ioc_facts(ioc)))
7964 goto out_free_resources;
7965 }
7966
7967 switch (ioc->hba_mpi_version_belonged) {
7968 case MPI2_VERSION:
7969 ioc->build_sg_scmd = &_base_build_sg_scmd;
7970 ioc->build_sg = &_base_build_sg;
7971 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
7972 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7973 break;
7974 case MPI25_VERSION:
7975 case MPI26_VERSION:
7976
7977
7978
7979
7980
7981
7982 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7983 ioc->build_sg = &_base_build_sg_ieee;
7984 ioc->build_nvme_prp = &_base_build_nvme_prp;
7985 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7986 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7987 if (ioc->high_iops_queues)
7988 ioc->get_msix_index_for_smlio =
7989 &_base_get_high_iops_msix_index;
7990 else
7991 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7992 break;
7993 }
7994 if (ioc->atomic_desc_capable) {
7995 ioc->put_smid_default = &_base_put_smid_default_atomic;
7996 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7997 ioc->put_smid_fast_path =
7998 &_base_put_smid_fast_path_atomic;
7999 ioc->put_smid_hi_priority =
8000 &_base_put_smid_hi_priority_atomic;
8001 } else {
8002 ioc->put_smid_default = &_base_put_smid_default;
8003 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
8004 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
8005 if (ioc->is_mcpu_endpoint)
8006 ioc->put_smid_scsi_io =
8007 &_base_put_smid_mpi_ep_scsi_io;
8008 else
8009 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
8010 }
8011
8012
8013
8014
8015
8016
8017 ioc->build_sg_mpi = &_base_build_sg;
8018 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
8019
8020 r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8021 if (r)
8022 goto out_free_resources;
8023
8024 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
8025 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
8026 if (!ioc->pfacts) {
8027 r = -ENOMEM;
8028 goto out_free_resources;
8029 }
8030
8031 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
8032 r = _base_get_port_facts(ioc, i);
8033 if (r) {
8034 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8035 if (rc || (_base_get_port_facts(ioc, i)))
8036 goto out_free_resources;
8037 }
8038 }
8039
8040 r = _base_allocate_memory_pools(ioc);
8041 if (r)
8042 goto out_free_resources;
8043
8044 if (irqpoll_weight > 0)
8045 ioc->thresh_hold = irqpoll_weight;
8046 else
8047 ioc->thresh_hold = ioc->hba_queue_depth/4;
8048
8049 _base_init_irqpolls(ioc);
8050 init_waitqueue_head(&ioc->reset_wq);
8051
8052
8053 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8054 if (ioc->facts.MaxDevHandle % 8)
8055 ioc->pd_handles_sz++;
8056 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
8057 GFP_KERNEL);
8058 if (!ioc->pd_handles) {
8059 r = -ENOMEM;
8060 goto out_free_resources;
8061 }
8062 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
8063 GFP_KERNEL);
8064 if (!ioc->blocking_handles) {
8065 r = -ENOMEM;
8066 goto out_free_resources;
8067 }
8068
8069
8070 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
8071 if (ioc->facts.MaxDevHandle % 8)
8072 ioc->pend_os_device_add_sz++;
8073 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
8074 GFP_KERNEL);
8075 if (!ioc->pend_os_device_add) {
8076 r = -ENOMEM;
8077 goto out_free_resources;
8078 }
8079
8080 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
8081 ioc->device_remove_in_progress =
8082 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
8083 if (!ioc->device_remove_in_progress) {
8084 r = -ENOMEM;
8085 goto out_free_resources;
8086 }
8087
8088 ioc->fwfault_debug = mpt3sas_fwfault_debug;
8089
8090
8091 mutex_init(&ioc->base_cmds.mutex);
8092 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8093 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
8094
8095
8096 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8097 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
8098
8099
8100 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8101 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
8102 mutex_init(&ioc->transport_cmds.mutex);
8103
8104
8105 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8106 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8107 mutex_init(&ioc->scsih_cmds.mutex);
8108
8109
8110 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8111 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
8112 mutex_init(&ioc->tm_cmds.mutex);
8113
8114
8115 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8116 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
8117 mutex_init(&ioc->config_cmds.mutex);
8118
8119
8120 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8121 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
8122 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
8123 mutex_init(&ioc->ctl_cmds.mutex);
8124
8125 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
8126 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
8127 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
8128 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
8129 r = -ENOMEM;
8130 goto out_free_resources;
8131 }
8132
8133 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
8134 ioc->event_masks[i] = -1;
8135
8136
8137 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
8138 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
8139 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
8140 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
8141 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
8142 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
8143 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
8144 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
8145 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
8146 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
8147 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
8148 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
8149 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
8150 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
8151 if (ioc->is_gen35_ioc) {
8152 _base_unmask_events(ioc,
8153 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
8154 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
8155 _base_unmask_events(ioc,
8156 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
8157 }
8158 }
8159 r = _base_make_ioc_operational(ioc);
8160 if (r == -EAGAIN) {
8161 r = _base_make_ioc_operational(ioc);
8162 if (r)
8163 goto out_free_resources;
8164 }
8165
8166
8167
8168
8169
8170 memcpy(&ioc->prev_fw_facts, &ioc->facts,
8171 sizeof(struct mpt3sas_facts));
8172
8173 ioc->non_operational_loop = 0;
8174 ioc->ioc_coredump_loop = 0;
8175 ioc->got_task_abort_from_ioctl = 0;
8176 return 0;
8177
8178 out_free_resources:
8179
8180 ioc->remove_host = 1;
8181
8182 mpt3sas_base_free_resources(ioc);
8183 _base_release_memory_pools(ioc);
8184 pci_set_drvdata(ioc->pdev, NULL);
8185 kfree(ioc->cpu_msix_table);
8186 if (ioc->is_warpdrive)
8187 kfree(ioc->reply_post_host_index);
8188 kfree(ioc->pd_handles);
8189 kfree(ioc->blocking_handles);
8190 kfree(ioc->device_remove_in_progress);
8191 kfree(ioc->pend_os_device_add);
8192 kfree(ioc->tm_cmds.reply);
8193 kfree(ioc->transport_cmds.reply);
8194 kfree(ioc->scsih_cmds.reply);
8195 kfree(ioc->config_cmds.reply);
8196 kfree(ioc->base_cmds.reply);
8197 kfree(ioc->port_enable_cmds.reply);
8198 kfree(ioc->ctl_cmds.reply);
8199 kfree(ioc->ctl_cmds.sense);
8200 kfree(ioc->pfacts);
8201 ioc->ctl_cmds.reply = NULL;
8202 ioc->base_cmds.reply = NULL;
8203 ioc->tm_cmds.reply = NULL;
8204 ioc->scsih_cmds.reply = NULL;
8205 ioc->transport_cmds.reply = NULL;
8206 ioc->config_cmds.reply = NULL;
8207 ioc->pfacts = NULL;
8208 return r;
8209}
8210
8211
8212
8213
8214
8215
8216void
8217mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
8218{
8219 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8220
8221 mpt3sas_base_stop_watchdog(ioc);
8222 mpt3sas_base_free_resources(ioc);
8223 _base_release_memory_pools(ioc);
8224 mpt3sas_free_enclosure_list(ioc);
8225 pci_set_drvdata(ioc->pdev, NULL);
8226 kfree(ioc->cpu_msix_table);
8227 if (ioc->is_warpdrive)
8228 kfree(ioc->reply_post_host_index);
8229 kfree(ioc->pd_handles);
8230 kfree(ioc->blocking_handles);
8231 kfree(ioc->device_remove_in_progress);
8232 kfree(ioc->pend_os_device_add);
8233 kfree(ioc->pfacts);
8234 kfree(ioc->ctl_cmds.reply);
8235 kfree(ioc->ctl_cmds.sense);
8236 kfree(ioc->base_cmds.reply);
8237 kfree(ioc->port_enable_cmds.reply);
8238 kfree(ioc->tm_cmds.reply);
8239 kfree(ioc->transport_cmds.reply);
8240 kfree(ioc->scsih_cmds.reply);
8241 kfree(ioc->config_cmds.reply);
8242}
8243
8244
8245
8246
8247
8248static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
8249{
8250 mpt3sas_scsih_pre_reset_handler(ioc);
8251 mpt3sas_ctl_pre_reset_handler(ioc);
8252 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
8253}
8254
8255
8256
8257
8258
8259static void
8260_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
8261{
8262 dtmprintk(ioc,
8263 ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
8264 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
8265 ioc->transport_cmds.status |= MPT3_CMD_RESET;
8266 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
8267 complete(&ioc->transport_cmds.done);
8268 }
8269 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
8270 ioc->base_cmds.status |= MPT3_CMD_RESET;
8271 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
8272 complete(&ioc->base_cmds.done);
8273 }
8274 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
8275 ioc->port_enable_failed = 1;
8276 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
8277 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
8278 if (ioc->is_driver_loading) {
8279 ioc->start_scan_failed =
8280 MPI2_IOCSTATUS_INTERNAL_ERROR;
8281 ioc->start_scan = 0;
8282 } else {
8283 complete(&ioc->port_enable_cmds.done);
8284 }
8285 }
8286 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
8287 ioc->config_cmds.status |= MPT3_CMD_RESET;
8288 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
8289 ioc->config_cmds.smid = USHRT_MAX;
8290 complete(&ioc->config_cmds.done);
8291 }
8292}
8293
8294
8295
8296
8297
8298static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
8299{
8300 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
8301 mpt3sas_ctl_clear_outstanding_ioctls(ioc);
8302 _base_clear_outstanding_mpt_commands(ioc);
8303}
8304
8305
8306
8307
8308
8309static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
8310{
8311 mpt3sas_scsih_reset_done_handler(ioc);
8312 mpt3sas_ctl_reset_done_handler(ioc);
8313 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
8314}
8315
8316
8317
8318
8319
8320
8321
8322
8323void
8324mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
8325{
8326 u32 ioc_state;
8327
8328 ioc->pending_io_count = 0;
8329
8330 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8331 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
8332 return;
8333
8334
8335 ioc->pending_io_count = scsi_host_busy(ioc->shost);
8336
8337 if (!ioc->pending_io_count)
8338 return;
8339
8340
8341 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
8342}
8343
8344
8345
8346
8347
8348
8349
8350
8351static int
8352_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
8353{
8354 u16 pd_handles_sz;
8355 void *pd_handles = NULL, *blocking_handles = NULL;
8356 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
8357 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
8358
8359 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
8360 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8361 if (ioc->facts.MaxDevHandle % 8)
8362 pd_handles_sz++;
8363
8364 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
8365 GFP_KERNEL);
8366 if (!pd_handles) {
8367 ioc_info(ioc,
8368 "Unable to allocate the memory for pd_handles of sz: %d\n",
8369 pd_handles_sz);
8370 return -ENOMEM;
8371 }
8372 memset(pd_handles + ioc->pd_handles_sz, 0,
8373 (pd_handles_sz - ioc->pd_handles_sz));
8374 ioc->pd_handles = pd_handles;
8375
8376 blocking_handles = krealloc(ioc->blocking_handles,
8377 pd_handles_sz, GFP_KERNEL);
8378 if (!blocking_handles) {
8379 ioc_info(ioc,
8380 "Unable to allocate the memory for "
8381 "blocking_handles of sz: %d\n",
8382 pd_handles_sz);
8383 return -ENOMEM;
8384 }
8385 memset(blocking_handles + ioc->pd_handles_sz, 0,
8386 (pd_handles_sz - ioc->pd_handles_sz));
8387 ioc->blocking_handles = blocking_handles;
8388 ioc->pd_handles_sz = pd_handles_sz;
8389
8390 pend_os_device_add = krealloc(ioc->pend_os_device_add,
8391 pd_handles_sz, GFP_KERNEL);
8392 if (!pend_os_device_add) {
8393 ioc_info(ioc,
8394 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
8395 pd_handles_sz);
8396 return -ENOMEM;
8397 }
8398 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
8399 (pd_handles_sz - ioc->pend_os_device_add_sz));
8400 ioc->pend_os_device_add = pend_os_device_add;
8401 ioc->pend_os_device_add_sz = pd_handles_sz;
8402
8403 device_remove_in_progress = krealloc(
8404 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
8405 if (!device_remove_in_progress) {
8406 ioc_info(ioc,
8407 "Unable to allocate the memory for "
8408 "device_remove_in_progress of sz: %d\n "
8409 , pd_handles_sz);
8410 return -ENOMEM;
8411 }
8412 memset(device_remove_in_progress +
8413 ioc->device_remove_in_progress_sz, 0,
8414 (pd_handles_sz - ioc->device_remove_in_progress_sz));
8415 ioc->device_remove_in_progress = device_remove_in_progress;
8416 ioc->device_remove_in_progress_sz = pd_handles_sz;
8417 }
8418
8419 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
8420 return 0;
8421}
8422
8423
8424
8425
8426
8427
8428
8429
8430int
8431mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
8432 enum reset_type type)
8433{
8434 int r;
8435 unsigned long flags;
8436 u32 ioc_state;
8437 u8 is_fault = 0, is_trigger = 0;
8438
8439 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
8440
8441 if (ioc->pci_error_recovery) {
8442 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
8443 r = 0;
8444 goto out_unlocked;
8445 }
8446
8447 if (mpt3sas_fwfault_debug)
8448 mpt3sas_halt_firmware(ioc);
8449
8450
8451 mutex_lock(&ioc->reset_in_progress_mutex);
8452
8453 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8454 ioc->shost_recovery = 1;
8455 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8456
8457 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8458 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
8459 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8460 MPT3_DIAG_BUFFER_IS_RELEASED))) {
8461 is_trigger = 1;
8462 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8463 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
8464 (ioc_state & MPI2_IOC_STATE_MASK) ==
8465 MPI2_IOC_STATE_COREDUMP) {
8466 is_fault = 1;
8467 ioc->htb_rel.trigger_info_dwords[1] =
8468 (ioc_state & MPI2_DOORBELL_DATA_MASK);
8469 }
8470 }
8471 _base_pre_reset_handler(ioc);
8472 mpt3sas_wait_for_commands_to_complete(ioc);
8473 mpt3sas_base_mask_interrupts(ioc);
8474 r = mpt3sas_base_make_ioc_ready(ioc, type);
8475 if (r)
8476 goto out;
8477 _base_clear_outstanding_commands(ioc);
8478
8479
8480
8481
8482 if (ioc->is_driver_loading && ioc->port_enable_failed) {
8483 ioc->remove_host = 1;
8484 r = -EFAULT;
8485 goto out;
8486 }
8487 r = _base_get_ioc_facts(ioc);
8488 if (r)
8489 goto out;
8490
8491 r = _base_check_ioc_facts_changes(ioc);
8492 if (r) {
8493 ioc_info(ioc,
8494 "Some of the parameters got changed in this new firmware"
8495 " image and it requires system reboot\n");
8496 goto out;
8497 }
8498 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
8499 panic("%s: Issue occurred with flashing controller firmware."
8500 "Please reboot the system and ensure that the correct"
8501 " firmware version is running\n", ioc->name);
8502
8503 r = _base_make_ioc_operational(ioc);
8504 if (!r)
8505 _base_reset_done_handler(ioc);
8506
8507 out:
8508 ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
8509
8510 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8511 ioc->shost_recovery = 0;
8512 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8513 ioc->ioc_reset_count++;
8514 mutex_unlock(&ioc->reset_in_progress_mutex);
8515
8516 out_unlocked:
8517 if ((r == 0) && is_trigger) {
8518 if (is_fault)
8519 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
8520 else
8521 mpt3sas_trigger_master(ioc,
8522 MASTER_TRIGGER_ADAPTER_RESET);
8523 }
8524 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
8525 return r;
8526}
8527