1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/sched.h>
18#include <linux/rtc.h>
19#include <linux/bcd.h>
20#include <linux/reboot.h>
21#include <linux/cciss_ioctl.h>
22#include <linux/blk-mq-pci.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_eh.h>
27#include <scsi/scsi_transport_sas.h>
28#include <asm/unaligned.h>
29#include "smartpqi.h"
30#include "smartpqi_sis.h"
31
32#if !defined(BUILD_TIMESTAMP)
33#define BUILD_TIMESTAMP
34#endif
35
36#define DRIVER_VERSION "2.1.8-045"
37#define DRIVER_MAJOR 2
38#define DRIVER_MINOR 1
39#define DRIVER_RELEASE 8
40#define DRIVER_REVISION 45
41
42#define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44#define DRIVER_NAME_SHORT "smartpqi"
45
46#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
47
48#define PQI_POST_RESET_DELAY_SECS 5
49#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
50
51MODULE_AUTHOR("Microsemi");
52MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
53 DRIVER_VERSION);
54MODULE_VERSION(DRIVER_VERSION);
55MODULE_LICENSE("GPL");
56
57static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
58static void pqi_ctrl_offline_worker(struct work_struct *work);
59static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
60static void pqi_scan_start(struct Scsi_Host *shost);
61static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
62 struct pqi_queue_group *queue_group, enum pqi_io_path path,
63 struct pqi_io_request *io_request);
64static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
65 struct pqi_iu_header *request, unsigned int flags,
66 struct pqi_raid_error_info *error_info);
67static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
68 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
69 unsigned int cdb_length, struct pqi_queue_group *queue_group,
70 struct pqi_encryption_info *encryption_info, bool raid_bypass);
71static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
72 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
73 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
74 struct pqi_scsi_dev_raid_map_data *rmd);
75static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
76 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
77 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
78 struct pqi_scsi_dev_raid_map_data *rmd);
79static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
80static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
81static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
82static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
83static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
84static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
85static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
86 struct pqi_scsi_dev *device, unsigned long timeout_msecs);
87
88
89#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
90
91static struct scsi_transport_template *pqi_sas_transport_template;
92
93static atomic_t pqi_controller_count = ATOMIC_INIT(0);
94
95enum pqi_lockup_action {
96 NONE,
97 REBOOT,
98 PANIC
99};
100
101static enum pqi_lockup_action pqi_lockup_action = NONE;
102
103static struct {
104 enum pqi_lockup_action action;
105 char *name;
106} pqi_lockup_actions[] = {
107 {
108 .action = NONE,
109 .name = "none",
110 },
111 {
112 .action = REBOOT,
113 .name = "reboot",
114 },
115 {
116 .action = PANIC,
117 .name = "panic",
118 },
119};
120
121static unsigned int pqi_supported_event_types[] = {
122 PQI_EVENT_TYPE_HOTPLUG,
123 PQI_EVENT_TYPE_HARDWARE,
124 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
125 PQI_EVENT_TYPE_LOGICAL_DEVICE,
126 PQI_EVENT_TYPE_OFA,
127 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
128 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
129};
130
131static int pqi_disable_device_id_wildcards;
132module_param_named(disable_device_id_wildcards,
133 pqi_disable_device_id_wildcards, int, 0644);
134MODULE_PARM_DESC(disable_device_id_wildcards,
135 "Disable device ID wildcards.");
136
137static int pqi_disable_heartbeat;
138module_param_named(disable_heartbeat,
139 pqi_disable_heartbeat, int, 0644);
140MODULE_PARM_DESC(disable_heartbeat,
141 "Disable heartbeat.");
142
143static int pqi_disable_ctrl_shutdown;
144module_param_named(disable_ctrl_shutdown,
145 pqi_disable_ctrl_shutdown, int, 0644);
146MODULE_PARM_DESC(disable_ctrl_shutdown,
147 "Disable controller shutdown when controller locked up.");
148
149static char *pqi_lockup_action_param;
150module_param_named(lockup_action,
151 pqi_lockup_action_param, charp, 0644);
152MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
153 "\t\tSupported: none, reboot, panic\n"
154 "\t\tDefault: none");
155
156static int pqi_expose_ld_first;
157module_param_named(expose_ld_first,
158 pqi_expose_ld_first, int, 0644);
159MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
160
161static int pqi_hide_vsep;
162module_param_named(hide_vsep,
163 pqi_hide_vsep, int, 0644);
164MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
165
166static char *raid_levels[] = {
167 "RAID-0",
168 "RAID-4",
169 "RAID-1(1+0)",
170 "RAID-5",
171 "RAID-5+1",
172 "RAID-6",
173 "RAID-1(Triple)",
174};
175
176static char *pqi_raid_level_to_string(u8 raid_level)
177{
178 if (raid_level < ARRAY_SIZE(raid_levels))
179 return raid_levels[raid_level];
180
181 return "RAID UNKNOWN";
182}
183
184#define SA_RAID_0 0
185#define SA_RAID_4 1
186#define SA_RAID_1 2
187#define SA_RAID_5 3
188#define SA_RAID_51 4
189#define SA_RAID_6 5
190#define SA_RAID_TRIPLE 6
191#define SA_RAID_MAX SA_RAID_TRIPLE
192#define SA_RAID_UNKNOWN 0xff
193
194static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
195{
196 pqi_prep_for_scsi_done(scmd);
197 scmd->scsi_done(scmd);
198}
199
200static inline void pqi_disable_write_same(struct scsi_device *sdev)
201{
202 sdev->no_write_same = 1;
203}
204
205static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
206{
207 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
208}
209
210static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
211{
212 return !device->is_physical_device;
213}
214
215static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
216{
217 return scsi3addr[2] != 0;
218}
219
220static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
221{
222 return !ctrl_info->controller_online;
223}
224
225static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
226{
227 if (ctrl_info->controller_online)
228 if (!sis_is_firmware_running(ctrl_info))
229 pqi_take_ctrl_offline(ctrl_info);
230}
231
232static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
233{
234 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
235}
236
237static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
238{
239 return sis_read_driver_scratch(ctrl_info);
240}
241
242static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
243 enum pqi_ctrl_mode mode)
244{
245 sis_write_driver_scratch(ctrl_info, mode);
246}
247
248static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
249{
250 ctrl_info->scan_blocked = true;
251 mutex_lock(&ctrl_info->scan_mutex);
252}
253
254static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
255{
256 ctrl_info->scan_blocked = false;
257 mutex_unlock(&ctrl_info->scan_mutex);
258}
259
260static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
261{
262 return ctrl_info->scan_blocked;
263}
264
265static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
266{
267 mutex_lock(&ctrl_info->lun_reset_mutex);
268}
269
270static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
271{
272 mutex_unlock(&ctrl_info->lun_reset_mutex);
273}
274
275static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
276{
277 struct Scsi_Host *shost;
278 unsigned int num_loops;
279 int msecs_sleep;
280
281 shost = ctrl_info->scsi_host;
282
283 scsi_block_requests(shost);
284
285 num_loops = 0;
286 msecs_sleep = 20;
287 while (scsi_host_busy(shost)) {
288 num_loops++;
289 if (num_loops == 10)
290 msecs_sleep = 500;
291 msleep(msecs_sleep);
292 }
293}
294
295static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
296{
297 scsi_unblock_requests(ctrl_info->scsi_host);
298}
299
300static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
301{
302 atomic_inc(&ctrl_info->num_busy_threads);
303}
304
305static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
306{
307 atomic_dec(&ctrl_info->num_busy_threads);
308}
309
310static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
311{
312 return ctrl_info->block_requests;
313}
314
315static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
316{
317 ctrl_info->block_requests = true;
318}
319
320static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
321{
322 ctrl_info->block_requests = false;
323 wake_up_all(&ctrl_info->block_requests_wait);
324}
325
326static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
327{
328 if (!pqi_ctrl_blocked(ctrl_info))
329 return;
330
331 atomic_inc(&ctrl_info->num_blocked_threads);
332 wait_event(ctrl_info->block_requests_wait,
333 !pqi_ctrl_blocked(ctrl_info));
334 atomic_dec(&ctrl_info->num_blocked_threads);
335}
336
337#define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
338
339static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
340{
341 unsigned long start_jiffies;
342 unsigned long warning_timeout;
343 bool displayed_warning;
344
345 displayed_warning = false;
346 start_jiffies = jiffies;
347 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
348
349 while (atomic_read(&ctrl_info->num_busy_threads) >
350 atomic_read(&ctrl_info->num_blocked_threads)) {
351 if (time_after(jiffies, warning_timeout)) {
352 dev_warn(&ctrl_info->pci_dev->dev,
353 "waiting %u seconds for driver activity to quiesce\n",
354 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
355 displayed_warning = true;
356 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
357 }
358 usleep_range(1000, 2000);
359 }
360
361 if (displayed_warning)
362 dev_warn(&ctrl_info->pci_dev->dev,
363 "driver activity quiesced after waiting for %u seconds\n",
364 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
365}
366
367static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
368{
369 return device->device_offline;
370}
371
372static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
373{
374 mutex_lock(&ctrl_info->ofa_mutex);
375}
376
377static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
378{
379 mutex_unlock(&ctrl_info->ofa_mutex);
380}
381
382static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
383{
384 mutex_lock(&ctrl_info->ofa_mutex);
385 mutex_unlock(&ctrl_info->ofa_mutex);
386}
387
388static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
389{
390 return mutex_is_locked(&ctrl_info->ofa_mutex);
391}
392
393static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
394{
395 device->in_remove = true;
396}
397
398static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
399{
400 return device->in_remove;
401}
402
403static inline int pqi_event_type_to_event_index(unsigned int event_type)
404{
405 int index;
406
407 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
408 if (event_type == pqi_supported_event_types[index])
409 return index;
410
411 return -1;
412}
413
414static inline bool pqi_is_supported_event(unsigned int event_type)
415{
416 return pqi_event_type_to_event_index(event_type) != -1;
417}
418
419static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
420 unsigned long delay)
421{
422 if (pqi_ctrl_offline(ctrl_info))
423 return;
424
425 schedule_delayed_work(&ctrl_info->rescan_work, delay);
426}
427
428static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
429{
430 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
431}
432
433#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
434
435static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
436{
437 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
438}
439
440static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
441{
442 cancel_delayed_work_sync(&ctrl_info->rescan_work);
443}
444
445static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
446{
447 if (!ctrl_info->heartbeat_counter)
448 return 0;
449
450 return readl(ctrl_info->heartbeat_counter);
451}
452
453static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
454{
455 return readb(ctrl_info->soft_reset_status);
456}
457
458static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
459{
460 u8 status;
461
462 status = pqi_read_soft_reset_status(ctrl_info);
463 status &= ~PQI_SOFT_RESET_ABORT;
464 writeb(status, ctrl_info->soft_reset_status);
465}
466
467static int pqi_map_single(struct pci_dev *pci_dev,
468 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
469 size_t buffer_length, enum dma_data_direction data_direction)
470{
471 dma_addr_t bus_address;
472
473 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
474 return 0;
475
476 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
477 data_direction);
478 if (dma_mapping_error(&pci_dev->dev, bus_address))
479 return -ENOMEM;
480
481 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
482 put_unaligned_le32(buffer_length, &sg_descriptor->length);
483 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
484
485 return 0;
486}
487
488static void pqi_pci_unmap(struct pci_dev *pci_dev,
489 struct pqi_sg_descriptor *descriptors, int num_descriptors,
490 enum dma_data_direction data_direction)
491{
492 int i;
493
494 if (data_direction == DMA_NONE)
495 return;
496
497 for (i = 0; i < num_descriptors; i++)
498 dma_unmap_single(&pci_dev->dev,
499 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
500 get_unaligned_le32(&descriptors[i].length),
501 data_direction);
502}
503
504static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
505 struct pqi_raid_path_request *request, u8 cmd,
506 u8 *scsi3addr, void *buffer, size_t buffer_length,
507 u16 vpd_page, enum dma_data_direction *dir)
508{
509 u8 *cdb;
510 size_t cdb_length = buffer_length;
511
512 memset(request, 0, sizeof(*request));
513
514 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
515 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
516 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
517 &request->header.iu_length);
518 put_unaligned_le32(buffer_length, &request->buffer_length);
519 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
520 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
521 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
522
523 cdb = request->cdb;
524
525 switch (cmd) {
526 case INQUIRY:
527 request->data_direction = SOP_READ_FLAG;
528 cdb[0] = INQUIRY;
529 if (vpd_page & VPD_PAGE) {
530 cdb[1] = 0x1;
531 cdb[2] = (u8)vpd_page;
532 }
533 cdb[4] = (u8)cdb_length;
534 break;
535 case CISS_REPORT_LOG:
536 case CISS_REPORT_PHYS:
537 request->data_direction = SOP_READ_FLAG;
538 cdb[0] = cmd;
539 if (cmd == CISS_REPORT_PHYS)
540 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
541 else
542 cdb[1] = ctrl_info->ciss_report_log_flags;
543 put_unaligned_be32(cdb_length, &cdb[6]);
544 break;
545 case CISS_GET_RAID_MAP:
546 request->data_direction = SOP_READ_FLAG;
547 cdb[0] = CISS_READ;
548 cdb[1] = CISS_GET_RAID_MAP;
549 put_unaligned_be32(cdb_length, &cdb[6]);
550 break;
551 case SA_FLUSH_CACHE:
552 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
553 request->data_direction = SOP_WRITE_FLAG;
554 cdb[0] = BMIC_WRITE;
555 cdb[6] = BMIC_FLUSH_CACHE;
556 put_unaligned_be16(cdb_length, &cdb[7]);
557 break;
558 case BMIC_SENSE_DIAG_OPTIONS:
559 cdb_length = 0;
560 fallthrough;
561 case BMIC_IDENTIFY_CONTROLLER:
562 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
563 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
564 case BMIC_SENSE_FEATURE:
565 request->data_direction = SOP_READ_FLAG;
566 cdb[0] = BMIC_READ;
567 cdb[6] = cmd;
568 put_unaligned_be16(cdb_length, &cdb[7]);
569 break;
570 case BMIC_SET_DIAG_OPTIONS:
571 cdb_length = 0;
572 fallthrough;
573 case BMIC_WRITE_HOST_WELLNESS:
574 request->data_direction = SOP_WRITE_FLAG;
575 cdb[0] = BMIC_WRITE;
576 cdb[6] = cmd;
577 put_unaligned_be16(cdb_length, &cdb[7]);
578 break;
579 case BMIC_CSMI_PASSTHRU:
580 request->data_direction = SOP_BIDIRECTIONAL;
581 cdb[0] = BMIC_WRITE;
582 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
583 cdb[6] = cmd;
584 put_unaligned_be16(cdb_length, &cdb[7]);
585 break;
586 default:
587 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
588 break;
589 }
590
591 switch (request->data_direction) {
592 case SOP_READ_FLAG:
593 *dir = DMA_FROM_DEVICE;
594 break;
595 case SOP_WRITE_FLAG:
596 *dir = DMA_TO_DEVICE;
597 break;
598 case SOP_NO_DIRECTION_FLAG:
599 *dir = DMA_NONE;
600 break;
601 default:
602 *dir = DMA_BIDIRECTIONAL;
603 break;
604 }
605
606 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
607 buffer, buffer_length, *dir);
608}
609
610static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
611{
612 io_request->scmd = NULL;
613 io_request->status = 0;
614 io_request->error_info = NULL;
615 io_request->raid_bypass = false;
616}
617
618static struct pqi_io_request *pqi_alloc_io_request(
619 struct pqi_ctrl_info *ctrl_info)
620{
621 struct pqi_io_request *io_request;
622 u16 i = ctrl_info->next_io_request_slot;
623
624 while (1) {
625 io_request = &ctrl_info->io_request_pool[i];
626 if (atomic_inc_return(&io_request->refcount) == 1)
627 break;
628 atomic_dec(&io_request->refcount);
629 i = (i + 1) % ctrl_info->max_io_slots;
630 }
631
632
633 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
634
635 pqi_reinit_io_request(io_request);
636
637 return io_request;
638}
639
640static void pqi_free_io_request(struct pqi_io_request *io_request)
641{
642 atomic_dec(&io_request->refcount);
643}
644
645static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
646 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
647 struct pqi_raid_error_info *error_info)
648{
649 int rc;
650 struct pqi_raid_path_request request;
651 enum dma_data_direction dir;
652
653 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
654 buffer, buffer_length, vpd_page, &dir);
655 if (rc)
656 return rc;
657
658 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
659
660 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
661
662 return rc;
663}
664
665
666
667static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
668 u8 cmd, void *buffer, size_t buffer_length)
669{
670 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
671 buffer, buffer_length, 0, NULL);
672}
673
674static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
675 u8 cmd, void *buffer, size_t buffer_length,
676 struct pqi_raid_error_info *error_info)
677{
678 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
679 buffer, buffer_length, 0, error_info);
680}
681
682static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
683 struct bmic_identify_controller *buffer)
684{
685 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
686 buffer, sizeof(*buffer));
687}
688
689static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
690 struct bmic_sense_subsystem_info *sense_info)
691{
692 return pqi_send_ctrl_raid_request(ctrl_info,
693 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
694 sizeof(*sense_info));
695}
696
697static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
698 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
699{
700 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
701 buffer, buffer_length, vpd_page, NULL);
702}
703
704static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
705 struct pqi_scsi_dev *device,
706 struct bmic_identify_physical_device *buffer, size_t buffer_length)
707{
708 int rc;
709 enum dma_data_direction dir;
710 u16 bmic_device_index;
711 struct pqi_raid_path_request request;
712
713 rc = pqi_build_raid_path_request(ctrl_info, &request,
714 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
715 buffer_length, 0, &dir);
716 if (rc)
717 return rc;
718
719 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
720 request.cdb[2] = (u8)bmic_device_index;
721 request.cdb[9] = (u8)(bmic_device_index >> 8);
722
723 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
724
725 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
726
727 return rc;
728}
729
730static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
731{
732 u32 bytes;
733
734 bytes = get_unaligned_le16(limit);
735 if (bytes == 0)
736 bytes = ~0;
737 else
738 bytes *= 1024;
739
740 return bytes;
741}
742
743#pragma pack(1)
744
745struct bmic_sense_feature_buffer {
746 struct bmic_sense_feature_buffer_header header;
747 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
748};
749
750#pragma pack()
751
752#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
753 offsetofend(struct bmic_sense_feature_buffer, \
754 aio_subpage.max_write_raid_1_10_3drive)
755
756#define MINIMUM_AIO_SUBPAGE_LENGTH \
757 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
758 max_write_raid_1_10_3drive) - \
759 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
760
761static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
762{
763 int rc;
764 enum dma_data_direction dir;
765 struct pqi_raid_path_request request;
766 struct bmic_sense_feature_buffer *buffer;
767
768 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
769 if (!buffer)
770 return -ENOMEM;
771
772 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
773 buffer, sizeof(*buffer), 0, &dir);
774 if (rc)
775 goto error;
776
777 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
778 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
779
780 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
781
782 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
783
784 if (rc)
785 goto error;
786
787 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
788 buffer->header.subpage_code !=
789 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
790 get_unaligned_le16(&buffer->header.buffer_length) <
791 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
792 buffer->aio_subpage.header.page_code !=
793 BMIC_SENSE_FEATURE_IO_PAGE ||
794 buffer->aio_subpage.header.subpage_code !=
795 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
796 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
797 MINIMUM_AIO_SUBPAGE_LENGTH) {
798 goto error;
799 }
800
801 ctrl_info->max_transfer_encrypted_sas_sata =
802 pqi_aio_limit_to_bytes(
803 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
804
805 ctrl_info->max_transfer_encrypted_nvme =
806 pqi_aio_limit_to_bytes(
807 &buffer->aio_subpage.max_transfer_encrypted_nvme);
808
809 ctrl_info->max_write_raid_5_6 =
810 pqi_aio_limit_to_bytes(
811 &buffer->aio_subpage.max_write_raid_5_6);
812
813 ctrl_info->max_write_raid_1_10_2drive =
814 pqi_aio_limit_to_bytes(
815 &buffer->aio_subpage.max_write_raid_1_10_2drive);
816
817 ctrl_info->max_write_raid_1_10_3drive =
818 pqi_aio_limit_to_bytes(
819 &buffer->aio_subpage.max_write_raid_1_10_3drive);
820
821error:
822 kfree(buffer);
823
824 return rc;
825}
826
827static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
828 enum bmic_flush_cache_shutdown_event shutdown_event)
829{
830 int rc;
831 struct bmic_flush_cache *flush_cache;
832
833 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
834 if (!flush_cache)
835 return -ENOMEM;
836
837 flush_cache->shutdown_event = shutdown_event;
838
839 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
840 sizeof(*flush_cache));
841
842 kfree(flush_cache);
843
844 return rc;
845}
846
847int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
848 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
849 struct pqi_raid_error_info *error_info)
850{
851 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
852 buffer, buffer_length, error_info);
853}
854
855#define PQI_FETCH_PTRAID_DATA (1 << 31)
856
857static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
858{
859 int rc;
860 struct bmic_diag_options *diag;
861
862 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
863 if (!diag)
864 return -ENOMEM;
865
866 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
867 diag, sizeof(*diag));
868 if (rc)
869 goto out;
870
871 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
872
873 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
874 sizeof(*diag));
875
876out:
877 kfree(diag);
878
879 return rc;
880}
881
882static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
883 void *buffer, size_t buffer_length)
884{
885 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
886 buffer, buffer_length);
887}
888
889#pragma pack(1)
890
891struct bmic_host_wellness_driver_version {
892 u8 start_tag[4];
893 u8 driver_version_tag[2];
894 __le16 driver_version_length;
895 char driver_version[32];
896 u8 dont_write_tag[2];
897 u8 end_tag[2];
898};
899
900#pragma pack()
901
902static int pqi_write_driver_version_to_host_wellness(
903 struct pqi_ctrl_info *ctrl_info)
904{
905 int rc;
906 struct bmic_host_wellness_driver_version *buffer;
907 size_t buffer_length;
908
909 buffer_length = sizeof(*buffer);
910
911 buffer = kmalloc(buffer_length, GFP_KERNEL);
912 if (!buffer)
913 return -ENOMEM;
914
915 buffer->start_tag[0] = '<';
916 buffer->start_tag[1] = 'H';
917 buffer->start_tag[2] = 'W';
918 buffer->start_tag[3] = '>';
919 buffer->driver_version_tag[0] = 'D';
920 buffer->driver_version_tag[1] = 'V';
921 put_unaligned_le16(sizeof(buffer->driver_version),
922 &buffer->driver_version_length);
923 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
924 sizeof(buffer->driver_version) - 1);
925 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
926 buffer->dont_write_tag[0] = 'D';
927 buffer->dont_write_tag[1] = 'W';
928 buffer->end_tag[0] = 'Z';
929 buffer->end_tag[1] = 'Z';
930
931 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
932
933 kfree(buffer);
934
935 return rc;
936}
937
938#pragma pack(1)
939
940struct bmic_host_wellness_time {
941 u8 start_tag[4];
942 u8 time_tag[2];
943 __le16 time_length;
944 u8 time[8];
945 u8 dont_write_tag[2];
946 u8 end_tag[2];
947};
948
949#pragma pack()
950
951static int pqi_write_current_time_to_host_wellness(
952 struct pqi_ctrl_info *ctrl_info)
953{
954 int rc;
955 struct bmic_host_wellness_time *buffer;
956 size_t buffer_length;
957 time64_t local_time;
958 unsigned int year;
959 struct tm tm;
960
961 buffer_length = sizeof(*buffer);
962
963 buffer = kmalloc(buffer_length, GFP_KERNEL);
964 if (!buffer)
965 return -ENOMEM;
966
967 buffer->start_tag[0] = '<';
968 buffer->start_tag[1] = 'H';
969 buffer->start_tag[2] = 'W';
970 buffer->start_tag[3] = '>';
971 buffer->time_tag[0] = 'T';
972 buffer->time_tag[1] = 'D';
973 put_unaligned_le16(sizeof(buffer->time),
974 &buffer->time_length);
975
976 local_time = ktime_get_real_seconds();
977 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
978 year = tm.tm_year + 1900;
979
980 buffer->time[0] = bin2bcd(tm.tm_hour);
981 buffer->time[1] = bin2bcd(tm.tm_min);
982 buffer->time[2] = bin2bcd(tm.tm_sec);
983 buffer->time[3] = 0;
984 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
985 buffer->time[5] = bin2bcd(tm.tm_mday);
986 buffer->time[6] = bin2bcd(year / 100);
987 buffer->time[7] = bin2bcd(year % 100);
988
989 buffer->dont_write_tag[0] = 'D';
990 buffer->dont_write_tag[1] = 'W';
991 buffer->end_tag[0] = 'Z';
992 buffer->end_tag[1] = 'Z';
993
994 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
995
996 kfree(buffer);
997
998 return rc;
999}
1000
1001#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
1002
1003static void pqi_update_time_worker(struct work_struct *work)
1004{
1005 int rc;
1006 struct pqi_ctrl_info *ctrl_info;
1007
1008 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1009 update_time_work);
1010
1011 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1012 if (rc)
1013 dev_warn(&ctrl_info->pci_dev->dev,
1014 "error updating time on controller\n");
1015
1016 schedule_delayed_work(&ctrl_info->update_time_work,
1017 PQI_UPDATE_TIME_WORK_INTERVAL);
1018}
1019
1020static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1021{
1022 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1023}
1024
1025static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1026{
1027 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1028}
1029
1030static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1031 size_t buffer_length)
1032{
1033 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1034}
1035
1036static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1037{
1038 int rc;
1039 size_t lun_list_length;
1040 size_t lun_data_length;
1041 size_t new_lun_list_length;
1042 void *lun_data = NULL;
1043 struct report_lun_header *report_lun_header;
1044
1045 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1046 if (!report_lun_header) {
1047 rc = -ENOMEM;
1048 goto out;
1049 }
1050
1051 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1052 if (rc)
1053 goto out;
1054
1055 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1056
1057again:
1058 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1059
1060 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1061 if (!lun_data) {
1062 rc = -ENOMEM;
1063 goto out;
1064 }
1065
1066 if (lun_list_length == 0) {
1067 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1068 goto out;
1069 }
1070
1071 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1072 if (rc)
1073 goto out;
1074
1075 new_lun_list_length =
1076 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1077
1078 if (new_lun_list_length > lun_list_length) {
1079 lun_list_length = new_lun_list_length;
1080 kfree(lun_data);
1081 goto again;
1082 }
1083
1084out:
1085 kfree(report_lun_header);
1086
1087 if (rc) {
1088 kfree(lun_data);
1089 lun_data = NULL;
1090 }
1091
1092 *buffer = lun_data;
1093
1094 return rc;
1095}
1096
1097static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1098{
1099 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer);
1100}
1101
1102static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1103{
1104 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1105}
1106
1107static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1108 struct report_phys_lun_extended **physdev_list,
1109 struct report_log_lun_extended **logdev_list)
1110{
1111 int rc;
1112 size_t logdev_list_length;
1113 size_t logdev_data_length;
1114 struct report_log_lun_extended *internal_logdev_list;
1115 struct report_log_lun_extended *logdev_data;
1116 struct report_lun_header report_lun_header;
1117
1118 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1119 if (rc)
1120 dev_err(&ctrl_info->pci_dev->dev,
1121 "report physical LUNs failed\n");
1122
1123 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1124 if (rc)
1125 dev_err(&ctrl_info->pci_dev->dev,
1126 "report logical LUNs failed\n");
1127
1128
1129
1130
1131
1132 logdev_data = *logdev_list;
1133
1134 if (logdev_data) {
1135 logdev_list_length =
1136 get_unaligned_be32(&logdev_data->header.list_length);
1137 } else {
1138 memset(&report_lun_header, 0, sizeof(report_lun_header));
1139 logdev_data =
1140 (struct report_log_lun_extended *)&report_lun_header;
1141 logdev_list_length = 0;
1142 }
1143
1144 logdev_data_length = sizeof(struct report_lun_header) +
1145 logdev_list_length;
1146
1147 internal_logdev_list = kmalloc(logdev_data_length +
1148 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1149 if (!internal_logdev_list) {
1150 kfree(*logdev_list);
1151 *logdev_list = NULL;
1152 return -ENOMEM;
1153 }
1154
1155 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1156 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1157 sizeof(struct report_log_lun_extended_entry));
1158 put_unaligned_be32(logdev_list_length +
1159 sizeof(struct report_log_lun_extended_entry),
1160 &internal_logdev_list->header.list_length);
1161
1162 kfree(*logdev_list);
1163 *logdev_list = internal_logdev_list;
1164
1165 return 0;
1166}
1167
1168static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1169 int bus, int target, int lun)
1170{
1171 device->bus = bus;
1172 device->target = target;
1173 device->lun = lun;
1174}
1175
1176static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1177{
1178 u8 *scsi3addr;
1179 u32 lunid;
1180 int bus;
1181 int target;
1182 int lun;
1183
1184 scsi3addr = device->scsi3addr;
1185 lunid = get_unaligned_le32(scsi3addr);
1186
1187 if (pqi_is_hba_lunid(scsi3addr)) {
1188
1189 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1190 device->target_lun_valid = true;
1191 return;
1192 }
1193
1194 if (pqi_is_logical_device(device)) {
1195 if (device->is_external_raid_device) {
1196 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1197 target = (lunid >> 16) & 0x3fff;
1198 lun = lunid & 0xff;
1199 } else {
1200 bus = PQI_RAID_VOLUME_BUS;
1201 target = 0;
1202 lun = lunid & 0x3fff;
1203 }
1204 pqi_set_bus_target_lun(device, bus, target, lun);
1205 device->target_lun_valid = true;
1206 return;
1207 }
1208
1209
1210
1211
1212
1213 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1214}
1215
1216static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1217 struct pqi_scsi_dev *device)
1218{
1219 int rc;
1220 u8 raid_level;
1221 u8 *buffer;
1222
1223 raid_level = SA_RAID_UNKNOWN;
1224
1225 buffer = kmalloc(64, GFP_KERNEL);
1226 if (buffer) {
1227 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1228 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1229 if (rc == 0) {
1230 raid_level = buffer[8];
1231 if (raid_level > SA_RAID_MAX)
1232 raid_level = SA_RAID_UNKNOWN;
1233 }
1234 kfree(buffer);
1235 }
1236
1237 device->raid_level = raid_level;
1238}
1239
1240static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1241 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1242{
1243 char *err_msg;
1244 u32 raid_map_size;
1245 u32 r5or6_blocks_per_row;
1246
1247 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1248
1249 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1250 err_msg = "RAID map too small";
1251 goto bad_raid_map;
1252 }
1253
1254 if (device->raid_level == SA_RAID_1) {
1255 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1256 err_msg = "invalid RAID-1 map";
1257 goto bad_raid_map;
1258 }
1259 } else if (device->raid_level == SA_RAID_TRIPLE) {
1260 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1261 err_msg = "invalid RAID-1(Triple) map";
1262 goto bad_raid_map;
1263 }
1264 } else if ((device->raid_level == SA_RAID_5 ||
1265 device->raid_level == SA_RAID_6) &&
1266 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1267
1268 r5or6_blocks_per_row =
1269 get_unaligned_le16(&raid_map->strip_size) *
1270 get_unaligned_le16(&raid_map->data_disks_per_row);
1271 if (r5or6_blocks_per_row == 0) {
1272 err_msg = "invalid RAID-5 or RAID-6 map";
1273 goto bad_raid_map;
1274 }
1275 }
1276
1277 return 0;
1278
1279bad_raid_map:
1280 dev_warn(&ctrl_info->pci_dev->dev,
1281 "logical device %08x%08x %s\n",
1282 *((u32 *)&device->scsi3addr),
1283 *((u32 *)&device->scsi3addr[4]), err_msg);
1284
1285 return -EINVAL;
1286}
1287
1288static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1289 struct pqi_scsi_dev *device)
1290{
1291 int rc;
1292 u32 raid_map_size;
1293 struct raid_map *raid_map;
1294
1295 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1296 if (!raid_map)
1297 return -ENOMEM;
1298
1299 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1300 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1301 if (rc)
1302 goto error;
1303
1304 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1305
1306 if (raid_map_size > sizeof(*raid_map)) {
1307
1308 kfree(raid_map);
1309
1310 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1311 if (!raid_map)
1312 return -ENOMEM;
1313
1314 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1315 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1316 if (rc)
1317 goto error;
1318
1319 if (get_unaligned_le32(&raid_map->structure_size)
1320 != raid_map_size) {
1321 dev_warn(&ctrl_info->pci_dev->dev,
1322 "requested %u bytes, received %u bytes\n",
1323 raid_map_size,
1324 get_unaligned_le32(&raid_map->structure_size));
1325 goto error;
1326 }
1327 }
1328
1329 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1330 if (rc)
1331 goto error;
1332
1333 device->raid_map = raid_map;
1334
1335 return 0;
1336
1337error:
1338 kfree(raid_map);
1339
1340 return rc;
1341}
1342
1343static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1344 struct pqi_scsi_dev *device)
1345{
1346 if (!ctrl_info->lv_drive_type_mix_valid) {
1347 device->max_transfer_encrypted = ~0;
1348 return;
1349 }
1350
1351 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1352 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1353 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1354 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1355 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1356 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1357 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1358 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1359 device->max_transfer_encrypted =
1360 ctrl_info->max_transfer_encrypted_sas_sata;
1361 break;
1362 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1363 device->max_transfer_encrypted =
1364 ctrl_info->max_transfer_encrypted_nvme;
1365 break;
1366 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1367 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1368 default:
1369 device->max_transfer_encrypted =
1370 min(ctrl_info->max_transfer_encrypted_sas_sata,
1371 ctrl_info->max_transfer_encrypted_nvme);
1372 break;
1373 }
1374}
1375
1376static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1377 struct pqi_scsi_dev *device)
1378{
1379 int rc;
1380 u8 *buffer;
1381 u8 bypass_status;
1382
1383 buffer = kmalloc(64, GFP_KERNEL);
1384 if (!buffer)
1385 return;
1386
1387 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1388 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1389 if (rc)
1390 goto out;
1391
1392#define RAID_BYPASS_STATUS 4
1393#define RAID_BYPASS_CONFIGURED 0x1
1394#define RAID_BYPASS_ENABLED 0x2
1395
1396 bypass_status = buffer[RAID_BYPASS_STATUS];
1397 device->raid_bypass_configured =
1398 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1399 if (device->raid_bypass_configured &&
1400 (bypass_status & RAID_BYPASS_ENABLED) &&
1401 pqi_get_raid_map(ctrl_info, device) == 0) {
1402 device->raid_bypass_enabled = true;
1403 if (get_unaligned_le16(&device->raid_map->flags) &
1404 RAID_MAP_ENCRYPTION_ENABLED)
1405 pqi_set_max_transfer_encrypted(ctrl_info, device);
1406 }
1407
1408out:
1409 kfree(buffer);
1410}
1411
1412
1413
1414
1415
1416static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1417 struct pqi_scsi_dev *device)
1418{
1419 int rc;
1420 size_t page_length;
1421 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1422 bool volume_offline = true;
1423 u32 volume_flags;
1424 struct ciss_vpd_logical_volume_status *vpd;
1425
1426 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1427 if (!vpd)
1428 goto no_buffer;
1429
1430 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1431 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1432 if (rc)
1433 goto out;
1434
1435 if (vpd->page_code != CISS_VPD_LV_STATUS)
1436 goto out;
1437
1438 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1439 volume_status) + vpd->page_length;
1440 if (page_length < sizeof(*vpd))
1441 goto out;
1442
1443 volume_status = vpd->volume_status;
1444 volume_flags = get_unaligned_be32(&vpd->flags);
1445 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1446
1447out:
1448 kfree(vpd);
1449no_buffer:
1450 device->volume_status = volume_status;
1451 device->volume_offline = volume_offline;
1452}
1453
1454#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1455
1456static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1457 struct pqi_scsi_dev *device,
1458 struct bmic_identify_physical_device *id_phys)
1459{
1460 int rc;
1461
1462 memset(id_phys, 0, sizeof(*id_phys));
1463
1464 rc = pqi_identify_physical_device(ctrl_info, device,
1465 id_phys, sizeof(*id_phys));
1466 if (rc) {
1467 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1468 return rc;
1469 }
1470
1471 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1472 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1473
1474 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1475 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1476
1477 device->box_index = id_phys->box_index;
1478 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1479 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1480 device->queue_depth =
1481 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1482 device->active_path_index = id_phys->active_path_number;
1483 device->path_map = id_phys->redundant_path_present_map;
1484 memcpy(&device->box,
1485 &id_phys->alternate_paths_phys_box_on_port,
1486 sizeof(device->box));
1487 memcpy(&device->phys_connector,
1488 &id_phys->alternate_paths_phys_connector,
1489 sizeof(device->phys_connector));
1490 device->bay = id_phys->phys_bay_in_box;
1491
1492 memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
1493 sizeof(device->page_83_identifier));
1494
1495 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1496 id_phys->phy_count)
1497 device->phy_id =
1498 id_phys->phy_to_phy_map[device->active_path_index];
1499 else
1500 device->phy_id = 0xFF;
1501
1502 return 0;
1503}
1504
1505static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1506 struct pqi_scsi_dev *device)
1507{
1508 int rc;
1509 u8 *buffer;
1510
1511 buffer = kmalloc(64, GFP_KERNEL);
1512 if (!buffer)
1513 return -ENOMEM;
1514
1515
1516 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1517 if (rc)
1518 goto out;
1519
1520 scsi_sanitize_inquiry_string(&buffer[8], 8);
1521 scsi_sanitize_inquiry_string(&buffer[16], 16);
1522
1523 device->devtype = buffer[0] & 0x1f;
1524 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1525 memcpy(device->model, &buffer[16], sizeof(device->model));
1526
1527 if (device->devtype == TYPE_DISK) {
1528 if (device->is_external_raid_device) {
1529 device->raid_level = SA_RAID_UNKNOWN;
1530 device->volume_status = CISS_LV_OK;
1531 device->volume_offline = false;
1532 } else {
1533 pqi_get_raid_level(ctrl_info, device);
1534 pqi_get_raid_bypass_status(ctrl_info, device);
1535 pqi_get_volume_status(ctrl_info, device);
1536 }
1537 }
1538
1539out:
1540 kfree(buffer);
1541
1542 return rc;
1543}
1544
1545static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1546 struct pqi_scsi_dev *device,
1547 struct bmic_identify_physical_device *id_phys)
1548{
1549 int rc;
1550
1551 if (device->is_expander_smp_device)
1552 return 0;
1553
1554 if (pqi_is_logical_device(device))
1555 rc = pqi_get_logical_device_info(ctrl_info, device);
1556 else
1557 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1558
1559 return rc;
1560}
1561
1562static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1563 struct pqi_scsi_dev *device)
1564{
1565 char *status;
1566 static const char unknown_state_str[] =
1567 "Volume is in an unknown state (%u)";
1568 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1569
1570 switch (device->volume_status) {
1571 case CISS_LV_OK:
1572 status = "Volume online";
1573 break;
1574 case CISS_LV_FAILED:
1575 status = "Volume failed";
1576 break;
1577 case CISS_LV_NOT_CONFIGURED:
1578 status = "Volume not configured";
1579 break;
1580 case CISS_LV_DEGRADED:
1581 status = "Volume degraded";
1582 break;
1583 case CISS_LV_READY_FOR_RECOVERY:
1584 status = "Volume ready for recovery operation";
1585 break;
1586 case CISS_LV_UNDERGOING_RECOVERY:
1587 status = "Volume undergoing recovery";
1588 break;
1589 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1590 status = "Wrong physical drive was replaced";
1591 break;
1592 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1593 status = "A physical drive not properly connected";
1594 break;
1595 case CISS_LV_HARDWARE_OVERHEATING:
1596 status = "Hardware is overheating";
1597 break;
1598 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1599 status = "Hardware has overheated";
1600 break;
1601 case CISS_LV_UNDERGOING_EXPANSION:
1602 status = "Volume undergoing expansion";
1603 break;
1604 case CISS_LV_NOT_AVAILABLE:
1605 status = "Volume waiting for transforming volume";
1606 break;
1607 case CISS_LV_QUEUED_FOR_EXPANSION:
1608 status = "Volume queued for expansion";
1609 break;
1610 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1611 status = "Volume disabled due to SCSI ID conflict";
1612 break;
1613 case CISS_LV_EJECTED:
1614 status = "Volume has been ejected";
1615 break;
1616 case CISS_LV_UNDERGOING_ERASE:
1617 status = "Volume undergoing background erase";
1618 break;
1619 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1620 status = "Volume ready for predictive spare rebuild";
1621 break;
1622 case CISS_LV_UNDERGOING_RPI:
1623 status = "Volume undergoing rapid parity initialization";
1624 break;
1625 case CISS_LV_PENDING_RPI:
1626 status = "Volume queued for rapid parity initialization";
1627 break;
1628 case CISS_LV_ENCRYPTED_NO_KEY:
1629 status = "Encrypted volume inaccessible - key not present";
1630 break;
1631 case CISS_LV_UNDERGOING_ENCRYPTION:
1632 status = "Volume undergoing encryption process";
1633 break;
1634 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1635 status = "Volume undergoing encryption re-keying process";
1636 break;
1637 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1638 status = "Volume encrypted but encryption is disabled";
1639 break;
1640 case CISS_LV_PENDING_ENCRYPTION:
1641 status = "Volume pending migration to encrypted state";
1642 break;
1643 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1644 status = "Volume pending encryption rekeying";
1645 break;
1646 case CISS_LV_NOT_SUPPORTED:
1647 status = "Volume not supported on this controller";
1648 break;
1649 case CISS_LV_STATUS_UNAVAILABLE:
1650 status = "Volume status not available";
1651 break;
1652 default:
1653 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1654 unknown_state_str, device->volume_status);
1655 status = unknown_state_buffer;
1656 break;
1657 }
1658
1659 dev_info(&ctrl_info->pci_dev->dev,
1660 "scsi %d:%d:%d:%d %s\n",
1661 ctrl_info->scsi_host->host_no,
1662 device->bus, device->target, device->lun, status);
1663}
1664
1665static void pqi_rescan_worker(struct work_struct *work)
1666{
1667 struct pqi_ctrl_info *ctrl_info;
1668
1669 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1670 rescan_work);
1671
1672 pqi_scan_scsi_devices(ctrl_info);
1673}
1674
1675static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1676 struct pqi_scsi_dev *device)
1677{
1678 int rc;
1679
1680 if (pqi_is_logical_device(device))
1681 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1682 device->target, device->lun);
1683 else
1684 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1685
1686 return rc;
1687}
1688
1689#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1690
1691static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1692{
1693 int rc;
1694
1695 pqi_device_remove_start(device);
1696
1697 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1698 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1699 if (rc)
1700 dev_err(&ctrl_info->pci_dev->dev,
1701 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1702 ctrl_info->scsi_host->host_no, device->bus,
1703 device->target, device->lun,
1704 atomic_read(&device->scsi_cmds_outstanding));
1705
1706 if (pqi_is_logical_device(device))
1707 scsi_remove_device(device->sdev);
1708 else
1709 pqi_remove_sas_device(device);
1710}
1711
1712
1713
1714static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1715 int bus, int target, int lun)
1716{
1717 struct pqi_scsi_dev *device;
1718
1719 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1720 if (device->bus == bus && device->target == target && device->lun == lun)
1721 return device;
1722
1723 return NULL;
1724}
1725
1726static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1727{
1728 if (dev1->is_physical_device != dev2->is_physical_device)
1729 return false;
1730
1731 if (dev1->is_physical_device)
1732 return dev1->wwid == dev2->wwid;
1733
1734 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1735}
1736
1737enum pqi_find_result {
1738 DEVICE_NOT_FOUND,
1739 DEVICE_CHANGED,
1740 DEVICE_SAME,
1741};
1742
1743static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1744 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1745{
1746 struct pqi_scsi_dev *device;
1747
1748 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1749 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1750 *matching_device = device;
1751 if (pqi_device_equal(device_to_find, device)) {
1752 if (device_to_find->volume_offline)
1753 return DEVICE_CHANGED;
1754 return DEVICE_SAME;
1755 }
1756 return DEVICE_CHANGED;
1757 }
1758 }
1759
1760 return DEVICE_NOT_FOUND;
1761}
1762
1763static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1764{
1765 if (device->is_expander_smp_device)
1766 return "Enclosure SMP ";
1767
1768 return scsi_device_type(device->devtype);
1769}
1770
1771#define PQI_DEV_INFO_BUFFER_LENGTH 128
1772
1773static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1774 char *action, struct pqi_scsi_dev *device)
1775{
1776 ssize_t count;
1777 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1778
1779 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1780 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1781
1782 if (device->target_lun_valid)
1783 count += scnprintf(buffer + count,
1784 PQI_DEV_INFO_BUFFER_LENGTH - count,
1785 "%d:%d",
1786 device->target,
1787 device->lun);
1788 else
1789 count += scnprintf(buffer + count,
1790 PQI_DEV_INFO_BUFFER_LENGTH - count,
1791 "-:-");
1792
1793 if (pqi_is_logical_device(device))
1794 count += scnprintf(buffer + count,
1795 PQI_DEV_INFO_BUFFER_LENGTH - count,
1796 " %08x%08x",
1797 *((u32 *)&device->scsi3addr),
1798 *((u32 *)&device->scsi3addr[4]));
1799 else
1800 count += scnprintf(buffer + count,
1801 PQI_DEV_INFO_BUFFER_LENGTH - count,
1802 " %016llx", device->sas_address);
1803
1804 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1805 " %s %.8s %.16s ",
1806 pqi_device_type(device),
1807 device->vendor,
1808 device->model);
1809
1810 if (pqi_is_logical_device(device)) {
1811 if (device->devtype == TYPE_DISK)
1812 count += scnprintf(buffer + count,
1813 PQI_DEV_INFO_BUFFER_LENGTH - count,
1814 "SSDSmartPathCap%c En%c %-12s",
1815 device->raid_bypass_configured ? '+' : '-',
1816 device->raid_bypass_enabled ? '+' : '-',
1817 pqi_raid_level_to_string(device->raid_level));
1818 } else {
1819 count += scnprintf(buffer + count,
1820 PQI_DEV_INFO_BUFFER_LENGTH - count,
1821 "AIO%c", device->aio_enabled ? '+' : '-');
1822 if (device->devtype == TYPE_DISK ||
1823 device->devtype == TYPE_ZBC)
1824 count += scnprintf(buffer + count,
1825 PQI_DEV_INFO_BUFFER_LENGTH - count,
1826 " qd=%-6d", device->queue_depth);
1827 }
1828
1829 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1830}
1831
1832
1833
1834static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1835 struct pqi_scsi_dev *new_device)
1836{
1837 existing_device->device_type = new_device->device_type;
1838 existing_device->bus = new_device->bus;
1839 if (new_device->target_lun_valid) {
1840 existing_device->target = new_device->target;
1841 existing_device->lun = new_device->lun;
1842 existing_device->target_lun_valid = true;
1843 }
1844
1845 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
1846 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
1847 new_device->volume_status == CISS_LV_OK)
1848 existing_device->rescan = true;
1849
1850
1851
1852 existing_device->is_physical_device = new_device->is_physical_device;
1853 existing_device->is_external_raid_device =
1854 new_device->is_external_raid_device;
1855 existing_device->is_expander_smp_device =
1856 new_device->is_expander_smp_device;
1857 existing_device->aio_enabled = new_device->aio_enabled;
1858 memcpy(existing_device->vendor, new_device->vendor,
1859 sizeof(existing_device->vendor));
1860 memcpy(existing_device->model, new_device->model,
1861 sizeof(existing_device->model));
1862 existing_device->sas_address = new_device->sas_address;
1863 existing_device->raid_level = new_device->raid_level;
1864 existing_device->queue_depth = new_device->queue_depth;
1865 existing_device->aio_handle = new_device->aio_handle;
1866 existing_device->volume_status = new_device->volume_status;
1867 existing_device->active_path_index = new_device->active_path_index;
1868 existing_device->phy_id = new_device->phy_id;
1869 existing_device->path_map = new_device->path_map;
1870 existing_device->bay = new_device->bay;
1871 existing_device->box_index = new_device->box_index;
1872 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1873 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
1874 memcpy(existing_device->box, new_device->box,
1875 sizeof(existing_device->box));
1876 memcpy(existing_device->phys_connector, new_device->phys_connector,
1877 sizeof(existing_device->phys_connector));
1878 existing_device->next_bypass_group = 0;
1879 kfree(existing_device->raid_map);
1880 existing_device->raid_map = new_device->raid_map;
1881 existing_device->raid_bypass_configured =
1882 new_device->raid_bypass_configured;
1883 existing_device->raid_bypass_enabled =
1884 new_device->raid_bypass_enabled;
1885 existing_device->device_offline = false;
1886
1887
1888 new_device->raid_map = NULL;
1889}
1890
1891static inline void pqi_free_device(struct pqi_scsi_dev *device)
1892{
1893 if (device) {
1894 kfree(device->raid_map);
1895 kfree(device);
1896 }
1897}
1898
1899
1900
1901
1902
1903
1904static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1905 struct pqi_scsi_dev *device)
1906{
1907 unsigned long flags;
1908
1909 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1910 list_del(&device->scsi_device_list_entry);
1911 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1912
1913
1914 device->keep_device = false;
1915}
1916
1917static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1918{
1919 if (device->is_expander_smp_device)
1920 return device->sas_port != NULL;
1921
1922 return device->sdev != NULL;
1923}
1924
1925static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1926 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1927{
1928 int rc;
1929 unsigned int i;
1930 unsigned long flags;
1931 enum pqi_find_result find_result;
1932 struct pqi_scsi_dev *device;
1933 struct pqi_scsi_dev *next;
1934 struct pqi_scsi_dev *matching_device;
1935 LIST_HEAD(add_list);
1936 LIST_HEAD(delete_list);
1937
1938
1939
1940
1941
1942
1943
1944
1945 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1946
1947
1948 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1949 device->device_gone = true;
1950
1951 for (i = 0; i < num_new_devices; i++) {
1952 device = new_device_list[i];
1953
1954 find_result = pqi_scsi_find_entry(ctrl_info, device,
1955 &matching_device);
1956
1957 switch (find_result) {
1958 case DEVICE_SAME:
1959
1960
1961
1962
1963 device->new_device = false;
1964 matching_device->device_gone = false;
1965 pqi_scsi_update_device(matching_device, device);
1966 break;
1967 case DEVICE_NOT_FOUND:
1968
1969
1970
1971
1972 device->new_device = true;
1973 break;
1974 case DEVICE_CHANGED:
1975
1976
1977
1978
1979 device->new_device = true;
1980 break;
1981 }
1982 }
1983
1984
1985 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1986 scsi_device_list_entry) {
1987 if (device->device_gone) {
1988 list_del_init(&device->scsi_device_list_entry);
1989 list_add_tail(&device->delete_list_entry, &delete_list);
1990 }
1991 }
1992
1993
1994 for (i = 0; i < num_new_devices; i++) {
1995 device = new_device_list[i];
1996 if (!device->new_device)
1997 continue;
1998 if (device->volume_offline)
1999 continue;
2000 list_add_tail(&device->scsi_device_list_entry,
2001 &ctrl_info->scsi_device_list);
2002 list_add_tail(&device->add_list_entry, &add_list);
2003
2004 device->keep_device = true;
2005 }
2006
2007 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2008
2009
2010
2011
2012
2013
2014 if (pqi_ofa_in_progress(ctrl_info)) {
2015 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2016 if (pqi_is_device_added(device))
2017 pqi_device_remove_start(device);
2018 pqi_ctrl_unblock_device_reset(ctrl_info);
2019 pqi_scsi_unblock_requests(ctrl_info);
2020 }
2021
2022
2023 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2024 if (device->volume_offline) {
2025 pqi_dev_info(ctrl_info, "offline", device);
2026 pqi_show_volume_status(ctrl_info, device);
2027 }
2028 list_del(&device->delete_list_entry);
2029 if (pqi_is_device_added(device)) {
2030 pqi_remove_device(ctrl_info, device);
2031 } else {
2032 if (!device->volume_offline)
2033 pqi_dev_info(ctrl_info, "removed", device);
2034 pqi_free_device(device);
2035 }
2036 }
2037
2038
2039
2040
2041
2042 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2043 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2044 device->advertised_queue_depth = device->queue_depth;
2045 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2046 if (device->rescan) {
2047 scsi_rescan_device(&device->sdev->sdev_gendev);
2048 device->rescan = false;
2049 }
2050 }
2051 }
2052
2053
2054 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2055 if (!pqi_is_device_added(device)) {
2056 rc = pqi_add_device(ctrl_info, device);
2057 if (rc == 0) {
2058 pqi_dev_info(ctrl_info, "added", device);
2059 } else {
2060 dev_warn(&ctrl_info->pci_dev->dev,
2061 "scsi %d:%d:%d:%d addition failed, device not added\n",
2062 ctrl_info->scsi_host->host_no,
2063 device->bus, device->target,
2064 device->lun);
2065 pqi_fixup_botched_add(ctrl_info, device);
2066 }
2067 }
2068 }
2069}
2070
2071static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2072{
2073
2074
2075
2076
2077
2078
2079 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2080 !pqi_is_hba_lunid(device->scsi3addr))
2081 return false;
2082
2083 return true;
2084}
2085
2086static inline bool pqi_skip_device(u8 *scsi3addr)
2087{
2088
2089 if (MASKED_DEVICE(scsi3addr))
2090 return true;
2091
2092 return false;
2093}
2094
2095static inline void pqi_mask_device(u8 *scsi3addr)
2096{
2097 scsi3addr[3] |= 0xc0;
2098}
2099
2100static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
2101{
2102 switch (device->device_type) {
2103 case SA_DEVICE_TYPE_SAS:
2104 case SA_DEVICE_TYPE_EXPANDER_SMP:
2105 case SA_DEVICE_TYPE_SES:
2106 return true;
2107 }
2108
2109 return false;
2110}
2111
2112static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2113{
2114 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2115}
2116
2117static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
2118 struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry)
2119{
2120 if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
2121 pqi_is_device_with_sas_address(device))
2122 device->wwid = phys_lun_ext_entry->wwid;
2123 else
2124 device->wwid = cpu_to_be64(get_unaligned_be64(&device->page_83_identifier));
2125}
2126
2127static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2128{
2129 int i;
2130 int rc;
2131 LIST_HEAD(new_device_list_head);
2132 struct report_phys_lun_extended *physdev_list = NULL;
2133 struct report_log_lun_extended *logdev_list = NULL;
2134 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
2135 struct report_log_lun_extended_entry *log_lun_ext_entry;
2136 struct bmic_identify_physical_device *id_phys = NULL;
2137 u32 num_physicals;
2138 u32 num_logicals;
2139 struct pqi_scsi_dev **new_device_list = NULL;
2140 struct pqi_scsi_dev *device;
2141 struct pqi_scsi_dev *next;
2142 unsigned int num_new_devices;
2143 unsigned int num_valid_devices;
2144 bool is_physical_device;
2145 u8 *scsi3addr;
2146 unsigned int physical_index;
2147 unsigned int logical_index;
2148 static char *out_of_memory_msg =
2149 "failed to allocate memory, device discovery stopped";
2150
2151 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2152 if (rc)
2153 goto out;
2154
2155 if (physdev_list)
2156 num_physicals =
2157 get_unaligned_be32(&physdev_list->header.list_length)
2158 / sizeof(physdev_list->lun_entries[0]);
2159 else
2160 num_physicals = 0;
2161
2162 if (logdev_list)
2163 num_logicals =
2164 get_unaligned_be32(&logdev_list->header.list_length)
2165 / sizeof(logdev_list->lun_entries[0]);
2166 else
2167 num_logicals = 0;
2168
2169 if (num_physicals) {
2170
2171
2172
2173
2174
2175
2176 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2177 if (!id_phys) {
2178 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2179 out_of_memory_msg);
2180 rc = -ENOMEM;
2181 goto out;
2182 }
2183
2184 if (pqi_hide_vsep) {
2185 for (i = num_physicals - 1; i >= 0; i--) {
2186 phys_lun_ext_entry =
2187 &physdev_list->lun_entries[i];
2188 if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) {
2189 pqi_mask_device(phys_lun_ext_entry->lunid);
2190 break;
2191 }
2192 }
2193 }
2194 }
2195
2196 if (num_logicals &&
2197 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2198 ctrl_info->lv_drive_type_mix_valid = true;
2199
2200 num_new_devices = num_physicals + num_logicals;
2201
2202 new_device_list = kmalloc_array(num_new_devices,
2203 sizeof(*new_device_list),
2204 GFP_KERNEL);
2205 if (!new_device_list) {
2206 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2207 rc = -ENOMEM;
2208 goto out;
2209 }
2210
2211 for (i = 0; i < num_new_devices; i++) {
2212 device = kzalloc(sizeof(*device), GFP_KERNEL);
2213 if (!device) {
2214 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2215 out_of_memory_msg);
2216 rc = -ENOMEM;
2217 goto out;
2218 }
2219 list_add_tail(&device->new_device_list_entry,
2220 &new_device_list_head);
2221 }
2222
2223 device = NULL;
2224 num_valid_devices = 0;
2225 physical_index = 0;
2226 logical_index = 0;
2227
2228 for (i = 0; i < num_new_devices; i++) {
2229
2230 if ((!pqi_expose_ld_first && i < num_physicals) ||
2231 (pqi_expose_ld_first && i >= num_logicals)) {
2232 is_physical_device = true;
2233 phys_lun_ext_entry =
2234 &physdev_list->lun_entries[physical_index++];
2235 log_lun_ext_entry = NULL;
2236 scsi3addr = phys_lun_ext_entry->lunid;
2237 } else {
2238 is_physical_device = false;
2239 phys_lun_ext_entry = NULL;
2240 log_lun_ext_entry =
2241 &logdev_list->lun_entries[logical_index++];
2242 scsi3addr = log_lun_ext_entry->lunid;
2243 }
2244
2245 if (is_physical_device && pqi_skip_device(scsi3addr))
2246 continue;
2247
2248 if (device)
2249 device = list_next_entry(device, new_device_list_entry);
2250 else
2251 device = list_first_entry(&new_device_list_head,
2252 struct pqi_scsi_dev, new_device_list_entry);
2253
2254 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2255 device->is_physical_device = is_physical_device;
2256 if (is_physical_device) {
2257 device->device_type = phys_lun_ext_entry->device_type;
2258 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2259 device->is_expander_smp_device = true;
2260 } else {
2261 device->is_external_raid_device =
2262 pqi_is_external_raid_addr(scsi3addr);
2263 }
2264
2265 if (!pqi_is_supported_device(device))
2266 continue;
2267
2268
2269 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2270 if (rc == -ENOMEM) {
2271 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2272 out_of_memory_msg);
2273 goto out;
2274 }
2275 if (rc) {
2276 if (device->is_physical_device)
2277 dev_warn(&ctrl_info->pci_dev->dev,
2278 "obtaining device info failed, skipping physical device %016llx\n",
2279 get_unaligned_be64(&phys_lun_ext_entry->wwid));
2280 else
2281 dev_warn(&ctrl_info->pci_dev->dev,
2282 "obtaining device info failed, skipping logical device %08x%08x\n",
2283 *((u32 *)&device->scsi3addr),
2284 *((u32 *)&device->scsi3addr[4]));
2285 rc = 0;
2286 continue;
2287 }
2288
2289 pqi_assign_bus_target_lun(device);
2290
2291 if (device->is_physical_device) {
2292 pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry);
2293 if ((phys_lun_ext_entry->device_flags &
2294 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2295 phys_lun_ext_entry->aio_handle) {
2296 device->aio_enabled = true;
2297 device->aio_handle =
2298 phys_lun_ext_entry->aio_handle;
2299 }
2300 } else {
2301 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2302 sizeof(device->volume_id));
2303 }
2304
2305 if (pqi_is_device_with_sas_address(device))
2306 device->sas_address = get_unaligned_be64(&device->wwid);
2307
2308 new_device_list[num_valid_devices++] = device;
2309 }
2310
2311 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2312
2313out:
2314 list_for_each_entry_safe(device, next, &new_device_list_head,
2315 new_device_list_entry) {
2316 if (device->keep_device)
2317 continue;
2318 list_del(&device->new_device_list_entry);
2319 pqi_free_device(device);
2320 }
2321
2322 kfree(new_device_list);
2323 kfree(physdev_list);
2324 kfree(logdev_list);
2325 kfree(id_phys);
2326
2327 return rc;
2328}
2329
2330static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2331{
2332 int rc;
2333 int mutex_acquired;
2334
2335 if (pqi_ctrl_offline(ctrl_info))
2336 return -ENXIO;
2337
2338 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2339
2340 if (!mutex_acquired) {
2341 if (pqi_ctrl_scan_blocked(ctrl_info))
2342 return -EBUSY;
2343 pqi_schedule_rescan_worker_delayed(ctrl_info);
2344 return -EINPROGRESS;
2345 }
2346
2347 rc = pqi_update_scsi_devices(ctrl_info);
2348 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2349 pqi_schedule_rescan_worker_delayed(ctrl_info);
2350
2351 mutex_unlock(&ctrl_info->scan_mutex);
2352
2353 return rc;
2354}
2355
2356static void pqi_scan_start(struct Scsi_Host *shost)
2357{
2358 struct pqi_ctrl_info *ctrl_info;
2359
2360 ctrl_info = shost_to_hba(shost);
2361
2362 pqi_scan_scsi_devices(ctrl_info);
2363}
2364
2365
2366
2367static int pqi_scan_finished(struct Scsi_Host *shost,
2368 unsigned long elapsed_time)
2369{
2370 struct pqi_ctrl_info *ctrl_info;
2371
2372 ctrl_info = shost_priv(shost);
2373
2374 return !mutex_is_locked(&ctrl_info->scan_mutex);
2375}
2376
2377static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2378 struct raid_map *raid_map, u64 first_block)
2379{
2380 u32 volume_blk_size;
2381
2382
2383
2384
2385
2386
2387 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2388 if (volume_blk_size != 512)
2389 first_block = (first_block * volume_blk_size) / 512;
2390
2391 encryption_info->data_encryption_key_index =
2392 get_unaligned_le16(&raid_map->data_encryption_key_index);
2393 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2394 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2395}
2396
2397
2398
2399
2400
2401static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2402 struct pqi_scsi_dev_raid_map_data *rmd)
2403{
2404 bool is_supported = true;
2405
2406 switch (rmd->raid_level) {
2407 case SA_RAID_0:
2408 break;
2409 case SA_RAID_1:
2410 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2411 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2412 is_supported = false;
2413 break;
2414 case SA_RAID_TRIPLE:
2415 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2416 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2417 is_supported = false;
2418 break;
2419 case SA_RAID_5:
2420 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2421 rmd->data_length > ctrl_info->max_write_raid_5_6))
2422 is_supported = false;
2423 break;
2424 case SA_RAID_6:
2425 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2426 rmd->data_length > ctrl_info->max_write_raid_5_6))
2427 is_supported = false;
2428 break;
2429 default:
2430 is_supported = false;
2431 break;
2432 }
2433
2434 return is_supported;
2435}
2436
2437#define PQI_RAID_BYPASS_INELIGIBLE 1
2438
2439static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2440 struct pqi_scsi_dev_raid_map_data *rmd)
2441{
2442
2443 switch (scmd->cmnd[0]) {
2444 case WRITE_6:
2445 rmd->is_write = true;
2446 fallthrough;
2447 case READ_6:
2448 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2449 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2450 rmd->block_cnt = (u32)scmd->cmnd[4];
2451 if (rmd->block_cnt == 0)
2452 rmd->block_cnt = 256;
2453 break;
2454 case WRITE_10:
2455 rmd->is_write = true;
2456 fallthrough;
2457 case READ_10:
2458 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2459 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2460 break;
2461 case WRITE_12:
2462 rmd->is_write = true;
2463 fallthrough;
2464 case READ_12:
2465 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2466 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2467 break;
2468 case WRITE_16:
2469 rmd->is_write = true;
2470 fallthrough;
2471 case READ_16:
2472 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2473 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2474 break;
2475 default:
2476
2477 return PQI_RAID_BYPASS_INELIGIBLE;
2478 }
2479
2480 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2481
2482 return 0;
2483}
2484
2485static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2486 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2487{
2488#if BITS_PER_LONG == 32
2489 u64 tmpdiv;
2490#endif
2491
2492 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2493
2494
2495 if (rmd->last_block >=
2496 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2497 rmd->last_block < rmd->first_block)
2498 return PQI_RAID_BYPASS_INELIGIBLE;
2499
2500 rmd->data_disks_per_row =
2501 get_unaligned_le16(&raid_map->data_disks_per_row);
2502 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2503 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2504
2505
2506 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2507 if (rmd->blocks_per_row == 0)
2508 return PQI_RAID_BYPASS_INELIGIBLE;
2509#if BITS_PER_LONG == 32
2510 tmpdiv = rmd->first_block;
2511 do_div(tmpdiv, rmd->blocks_per_row);
2512 rmd->first_row = tmpdiv;
2513 tmpdiv = rmd->last_block;
2514 do_div(tmpdiv, rmd->blocks_per_row);
2515 rmd->last_row = tmpdiv;
2516 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2517 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2518 tmpdiv = rmd->first_row_offset;
2519 do_div(tmpdiv, rmd->strip_size);
2520 rmd->first_column = tmpdiv;
2521 tmpdiv = rmd->last_row_offset;
2522 do_div(tmpdiv, rmd->strip_size);
2523 rmd->last_column = tmpdiv;
2524#else
2525 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2526 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2527 rmd->first_row_offset = (u32)(rmd->first_block -
2528 (rmd->first_row * rmd->blocks_per_row));
2529 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2530 rmd->blocks_per_row));
2531 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2532 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2533#endif
2534
2535
2536 if (rmd->first_row != rmd->last_row ||
2537 rmd->first_column != rmd->last_column)
2538 return PQI_RAID_BYPASS_INELIGIBLE;
2539
2540
2541 rmd->total_disks_per_row = rmd->data_disks_per_row +
2542 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2543 rmd->map_row = ((u32)(rmd->first_row >>
2544 raid_map->parity_rotation_shift)) %
2545 get_unaligned_le16(&raid_map->row_cnt);
2546 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2547 rmd->first_column;
2548
2549 return 0;
2550}
2551
2552static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2553 struct raid_map *raid_map)
2554{
2555#if BITS_PER_LONG == 32
2556 u64 tmpdiv;
2557#endif
2558
2559 if (rmd->blocks_per_row == 0)
2560 return PQI_RAID_BYPASS_INELIGIBLE;
2561
2562
2563
2564 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2565#if BITS_PER_LONG == 32
2566 tmpdiv = rmd->first_block;
2567 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2568 tmpdiv = rmd->first_group;
2569 do_div(tmpdiv, rmd->blocks_per_row);
2570 rmd->first_group = tmpdiv;
2571 tmpdiv = rmd->last_block;
2572 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2573 tmpdiv = rmd->last_group;
2574 do_div(tmpdiv, rmd->blocks_per_row);
2575 rmd->last_group = tmpdiv;
2576#else
2577 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2578 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2579#endif
2580 if (rmd->first_group != rmd->last_group)
2581 return PQI_RAID_BYPASS_INELIGIBLE;
2582
2583
2584#if BITS_PER_LONG == 32
2585 tmpdiv = rmd->first_block;
2586 do_div(tmpdiv, rmd->stripesize);
2587 rmd->first_row = tmpdiv;
2588 rmd->r5or6_first_row = tmpdiv;
2589 tmpdiv = rmd->last_block;
2590 do_div(tmpdiv, rmd->stripesize);
2591 rmd->r5or6_last_row = tmpdiv;
2592#else
2593 rmd->first_row = rmd->r5or6_first_row =
2594 rmd->first_block / rmd->stripesize;
2595 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2596#endif
2597 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2598 return PQI_RAID_BYPASS_INELIGIBLE;
2599
2600
2601#if BITS_PER_LONG == 32
2602 tmpdiv = rmd->first_block;
2603 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2604 tmpdiv = rmd->first_row_offset;
2605 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2606 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2607 tmpdiv = rmd->last_block;
2608 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2609 tmpdiv = rmd->r5or6_last_row_offset;
2610 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2611 tmpdiv = rmd->r5or6_first_row_offset;
2612 do_div(tmpdiv, rmd->strip_size);
2613 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2614 tmpdiv = rmd->r5or6_last_row_offset;
2615 do_div(tmpdiv, rmd->strip_size);
2616 rmd->r5or6_last_column = tmpdiv;
2617#else
2618 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2619 (u32)((rmd->first_block % rmd->stripesize) %
2620 rmd->blocks_per_row);
2621
2622 rmd->r5or6_last_row_offset =
2623 (u32)((rmd->last_block % rmd->stripesize) %
2624 rmd->blocks_per_row);
2625
2626 rmd->first_column =
2627 rmd->r5or6_first_row_offset / rmd->strip_size;
2628 rmd->r5or6_first_column = rmd->first_column;
2629 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2630#endif
2631 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2632 return PQI_RAID_BYPASS_INELIGIBLE;
2633
2634
2635 rmd->map_row =
2636 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2637 get_unaligned_le16(&raid_map->row_cnt);
2638
2639 rmd->map_index = (rmd->first_group *
2640 (get_unaligned_le16(&raid_map->row_cnt) *
2641 rmd->total_disks_per_row)) +
2642 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2643
2644 if (rmd->is_write) {
2645 u32 index;
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2657 index *= rmd->total_disks_per_row;
2658 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2659
2660 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2661 if (rmd->raid_level == SA_RAID_6) {
2662 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2663 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2664 }
2665#if BITS_PER_LONG == 32
2666 tmpdiv = rmd->first_block;
2667 do_div(tmpdiv, rmd->blocks_per_row);
2668 rmd->row = tmpdiv;
2669#else
2670 rmd->row = rmd->first_block / rmd->blocks_per_row;
2671#endif
2672 }
2673
2674 return 0;
2675}
2676
2677static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2678{
2679
2680 if (rmd->disk_block > 0xffffffff) {
2681 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2682 rmd->cdb[1] = 0;
2683 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2684 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2685 rmd->cdb[14] = 0;
2686 rmd->cdb[15] = 0;
2687 rmd->cdb_length = 16;
2688 } else {
2689 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2690 rmd->cdb[1] = 0;
2691 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2692 rmd->cdb[6] = 0;
2693 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2694 rmd->cdb[9] = 0;
2695 rmd->cdb_length = 10;
2696 }
2697}
2698
2699static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2700 struct pqi_scsi_dev_raid_map_data *rmd)
2701{
2702 u32 index;
2703 u32 group;
2704
2705 group = rmd->map_index / rmd->data_disks_per_row;
2706
2707 index = rmd->map_index - (group * rmd->data_disks_per_row);
2708 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2709 index += rmd->data_disks_per_row;
2710 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2711 if (rmd->layout_map_count > 2) {
2712 index += rmd->data_disks_per_row;
2713 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2714 }
2715
2716 rmd->num_it_nexus_entries = rmd->layout_map_count;
2717}
2718
2719static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2720 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2721 struct pqi_queue_group *queue_group)
2722{
2723 int rc;
2724 struct raid_map *raid_map;
2725 u32 group;
2726 u32 next_bypass_group;
2727 struct pqi_encryption_info *encryption_info_ptr;
2728 struct pqi_encryption_info encryption_info;
2729 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2730
2731 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2732 if (rc)
2733 return PQI_RAID_BYPASS_INELIGIBLE;
2734
2735 rmd.raid_level = device->raid_level;
2736
2737 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2738 return PQI_RAID_BYPASS_INELIGIBLE;
2739
2740 if (unlikely(rmd.block_cnt == 0))
2741 return PQI_RAID_BYPASS_INELIGIBLE;
2742
2743 raid_map = device->raid_map;
2744
2745 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2746 if (rc)
2747 return PQI_RAID_BYPASS_INELIGIBLE;
2748
2749 if (device->raid_level == SA_RAID_1 ||
2750 device->raid_level == SA_RAID_TRIPLE) {
2751 if (rmd.is_write) {
2752 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2753 } else {
2754 group = device->next_bypass_group;
2755 next_bypass_group = group + 1;
2756 if (next_bypass_group >= rmd.layout_map_count)
2757 next_bypass_group = 0;
2758 device->next_bypass_group = next_bypass_group;
2759 rmd.map_index += group * rmd.data_disks_per_row;
2760 }
2761 } else if ((device->raid_level == SA_RAID_5 ||
2762 device->raid_level == SA_RAID_6) &&
2763 (rmd.layout_map_count > 1 || rmd.is_write)) {
2764 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2765 if (rc)
2766 return PQI_RAID_BYPASS_INELIGIBLE;
2767 }
2768
2769 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2770 return PQI_RAID_BYPASS_INELIGIBLE;
2771
2772 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2773 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2774 rmd.first_row * rmd.strip_size +
2775 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2776 rmd.disk_block_cnt = rmd.block_cnt;
2777
2778
2779 if (raid_map->phys_blk_shift) {
2780 rmd.disk_block <<= raid_map->phys_blk_shift;
2781 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2782 }
2783
2784 if (unlikely(rmd.disk_block_cnt > 0xffff))
2785 return PQI_RAID_BYPASS_INELIGIBLE;
2786
2787 pqi_set_aio_cdb(&rmd);
2788
2789 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2790 if (rmd.data_length > device->max_transfer_encrypted)
2791 return PQI_RAID_BYPASS_INELIGIBLE;
2792 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2793 encryption_info_ptr = &encryption_info;
2794 } else {
2795 encryption_info_ptr = NULL;
2796 }
2797
2798 if (rmd.is_write) {
2799 switch (device->raid_level) {
2800 case SA_RAID_1:
2801 case SA_RAID_TRIPLE:
2802 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
2803 encryption_info_ptr, device, &rmd);
2804 case SA_RAID_5:
2805 case SA_RAID_6:
2806 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
2807 encryption_info_ptr, device, &rmd);
2808 }
2809 }
2810
2811 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
2812 rmd.cdb, rmd.cdb_length, queue_group,
2813 encryption_info_ptr, true);
2814}
2815
2816#define PQI_STATUS_IDLE 0x0
2817
2818#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2819#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2820
2821#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2822#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2823#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2824#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2825#define PQI_DEVICE_STATE_ERROR 0x4
2826
2827#define PQI_MODE_READY_TIMEOUT_SECS 30
2828#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2829
2830static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2831{
2832 struct pqi_device_registers __iomem *pqi_registers;
2833 unsigned long timeout;
2834 u64 signature;
2835 u8 status;
2836
2837 pqi_registers = ctrl_info->pqi_registers;
2838 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2839
2840 while (1) {
2841 signature = readq(&pqi_registers->signature);
2842 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2843 sizeof(signature)) == 0)
2844 break;
2845 if (time_after(jiffies, timeout)) {
2846 dev_err(&ctrl_info->pci_dev->dev,
2847 "timed out waiting for PQI signature\n");
2848 return -ETIMEDOUT;
2849 }
2850 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2851 }
2852
2853 while (1) {
2854 status = readb(&pqi_registers->function_and_status_code);
2855 if (status == PQI_STATUS_IDLE)
2856 break;
2857 if (time_after(jiffies, timeout)) {
2858 dev_err(&ctrl_info->pci_dev->dev,
2859 "timed out waiting for PQI IDLE\n");
2860 return -ETIMEDOUT;
2861 }
2862 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2863 }
2864
2865 while (1) {
2866 if (readl(&pqi_registers->device_status) ==
2867 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2868 break;
2869 if (time_after(jiffies, timeout)) {
2870 dev_err(&ctrl_info->pci_dev->dev,
2871 "timed out waiting for PQI all registers ready\n");
2872 return -ETIMEDOUT;
2873 }
2874 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2875 }
2876
2877 return 0;
2878}
2879
2880static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2881{
2882 struct pqi_scsi_dev *device;
2883
2884 device = io_request->scmd->device->hostdata;
2885 device->raid_bypass_enabled = false;
2886 device->aio_enabled = false;
2887}
2888
2889static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2890{
2891 struct pqi_ctrl_info *ctrl_info;
2892 struct pqi_scsi_dev *device;
2893
2894 device = sdev->hostdata;
2895 if (device->device_offline)
2896 return;
2897
2898 device->device_offline = true;
2899 ctrl_info = shost_to_hba(sdev->host);
2900 pqi_schedule_rescan_worker(ctrl_info);
2901 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2902 path, ctrl_info->scsi_host->host_no, device->bus,
2903 device->target, device->lun);
2904}
2905
2906static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2907{
2908 u8 scsi_status;
2909 u8 host_byte;
2910 struct scsi_cmnd *scmd;
2911 struct pqi_raid_error_info *error_info;
2912 size_t sense_data_length;
2913 int residual_count;
2914 int xfer_count;
2915 struct scsi_sense_hdr sshdr;
2916
2917 scmd = io_request->scmd;
2918 if (!scmd)
2919 return;
2920
2921 error_info = io_request->error_info;
2922 scsi_status = error_info->status;
2923 host_byte = DID_OK;
2924
2925 switch (error_info->data_out_result) {
2926 case PQI_DATA_IN_OUT_GOOD:
2927 break;
2928 case PQI_DATA_IN_OUT_UNDERFLOW:
2929 xfer_count =
2930 get_unaligned_le32(&error_info->data_out_transferred);
2931 residual_count = scsi_bufflen(scmd) - xfer_count;
2932 scsi_set_resid(scmd, residual_count);
2933 if (xfer_count < scmd->underflow)
2934 host_byte = DID_SOFT_ERROR;
2935 break;
2936 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2937 case PQI_DATA_IN_OUT_ABORTED:
2938 host_byte = DID_ABORT;
2939 break;
2940 case PQI_DATA_IN_OUT_TIMEOUT:
2941 host_byte = DID_TIME_OUT;
2942 break;
2943 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2944 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2945 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2946 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2947 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2948 case PQI_DATA_IN_OUT_ERROR:
2949 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2950 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2951 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2952 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2953 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2954 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2955 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2956 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2957 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2958 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2959 default:
2960 host_byte = DID_ERROR;
2961 break;
2962 }
2963
2964 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2965 if (sense_data_length == 0)
2966 sense_data_length =
2967 get_unaligned_le16(&error_info->response_data_length);
2968 if (sense_data_length) {
2969 if (sense_data_length > sizeof(error_info->data))
2970 sense_data_length = sizeof(error_info->data);
2971
2972 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2973 scsi_normalize_sense(error_info->data,
2974 sense_data_length, &sshdr) &&
2975 sshdr.sense_key == HARDWARE_ERROR &&
2976 sshdr.asc == 0x3e) {
2977 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2978 struct pqi_scsi_dev *device = scmd->device->hostdata;
2979
2980 switch (sshdr.ascq) {
2981 case 0x1:
2982 if (printk_ratelimit())
2983 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2984 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2985 pqi_take_device_offline(scmd->device, "RAID");
2986 host_byte = DID_NO_CONNECT;
2987 break;
2988
2989 default:
2990 if (printk_ratelimit())
2991 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2992 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2993 break;
2994 }
2995 }
2996
2997 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2998 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2999 memcpy(scmd->sense_buffer, error_info->data,
3000 sense_data_length);
3001 }
3002
3003 scmd->result = scsi_status;
3004 set_host_byte(scmd, host_byte);
3005}
3006
3007static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3008{
3009 u8 scsi_status;
3010 u8 host_byte;
3011 struct scsi_cmnd *scmd;
3012 struct pqi_aio_error_info *error_info;
3013 size_t sense_data_length;
3014 int residual_count;
3015 int xfer_count;
3016 bool device_offline;
3017
3018 scmd = io_request->scmd;
3019 error_info = io_request->error_info;
3020 host_byte = DID_OK;
3021 sense_data_length = 0;
3022 device_offline = false;
3023
3024 switch (error_info->service_response) {
3025 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3026 scsi_status = error_info->status;
3027 break;
3028 case PQI_AIO_SERV_RESPONSE_FAILURE:
3029 switch (error_info->status) {
3030 case PQI_AIO_STATUS_IO_ABORTED:
3031 scsi_status = SAM_STAT_TASK_ABORTED;
3032 break;
3033 case PQI_AIO_STATUS_UNDERRUN:
3034 scsi_status = SAM_STAT_GOOD;
3035 residual_count = get_unaligned_le32(
3036 &error_info->residual_count);
3037 scsi_set_resid(scmd, residual_count);
3038 xfer_count = scsi_bufflen(scmd) - residual_count;
3039 if (xfer_count < scmd->underflow)
3040 host_byte = DID_SOFT_ERROR;
3041 break;
3042 case PQI_AIO_STATUS_OVERRUN:
3043 scsi_status = SAM_STAT_GOOD;
3044 break;
3045 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3046 pqi_aio_path_disabled(io_request);
3047 scsi_status = SAM_STAT_GOOD;
3048 io_request->status = -EAGAIN;
3049 break;
3050 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3051 case PQI_AIO_STATUS_INVALID_DEVICE:
3052 if (!io_request->raid_bypass) {
3053 device_offline = true;
3054 pqi_take_device_offline(scmd->device, "AIO");
3055 host_byte = DID_NO_CONNECT;
3056 }
3057 scsi_status = SAM_STAT_CHECK_CONDITION;
3058 break;
3059 case PQI_AIO_STATUS_IO_ERROR:
3060 default:
3061 scsi_status = SAM_STAT_CHECK_CONDITION;
3062 break;
3063 }
3064 break;
3065 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3066 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3067 scsi_status = SAM_STAT_GOOD;
3068 break;
3069 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3070 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3071 default:
3072 scsi_status = SAM_STAT_CHECK_CONDITION;
3073 break;
3074 }
3075
3076 if (error_info->data_present) {
3077 sense_data_length =
3078 get_unaligned_le16(&error_info->data_length);
3079 if (sense_data_length) {
3080 if (sense_data_length > sizeof(error_info->data))
3081 sense_data_length = sizeof(error_info->data);
3082 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3083 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3084 memcpy(scmd->sense_buffer, error_info->data,
3085 sense_data_length);
3086 }
3087 }
3088
3089 if (device_offline && sense_data_length == 0)
3090 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3091
3092 scmd->result = scsi_status;
3093 set_host_byte(scmd, host_byte);
3094}
3095
3096static void pqi_process_io_error(unsigned int iu_type,
3097 struct pqi_io_request *io_request)
3098{
3099 switch (iu_type) {
3100 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3101 pqi_process_raid_io_error(io_request);
3102 break;
3103 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3104 pqi_process_aio_io_error(io_request);
3105 break;
3106 }
3107}
3108
3109static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3110 struct pqi_task_management_response *response)
3111{
3112 int rc;
3113
3114 switch (response->response_code) {
3115 case SOP_TMF_COMPLETE:
3116 case SOP_TMF_FUNCTION_SUCCEEDED:
3117 rc = 0;
3118 break;
3119 case SOP_TMF_REJECTED:
3120 rc = -EAGAIN;
3121 break;
3122 default:
3123 rc = -EIO;
3124 break;
3125 }
3126
3127 if (rc)
3128 dev_err(&ctrl_info->pci_dev->dev,
3129 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3130
3131 return rc;
3132}
3133
3134static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
3135{
3136 pqi_take_ctrl_offline(ctrl_info);
3137}
3138
3139static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3140{
3141 int num_responses;
3142 pqi_index_t oq_pi;
3143 pqi_index_t oq_ci;
3144 struct pqi_io_request *io_request;
3145 struct pqi_io_response *response;
3146 u16 request_id;
3147
3148 num_responses = 0;
3149 oq_ci = queue_group->oq_ci_copy;
3150
3151 while (1) {
3152 oq_pi = readl(queue_group->oq_pi);
3153 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3154 pqi_invalid_response(ctrl_info);
3155 dev_err(&ctrl_info->pci_dev->dev,
3156 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3157 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3158 return -1;
3159 }
3160 if (oq_pi == oq_ci)
3161 break;
3162
3163 num_responses++;
3164 response = queue_group->oq_element_array +
3165 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3166
3167 request_id = get_unaligned_le16(&response->request_id);
3168 if (request_id >= ctrl_info->max_io_slots) {
3169 pqi_invalid_response(ctrl_info);
3170 dev_err(&ctrl_info->pci_dev->dev,
3171 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3172 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3173 return -1;
3174 }
3175
3176 io_request = &ctrl_info->io_request_pool[request_id];
3177 if (atomic_read(&io_request->refcount) == 0) {
3178 pqi_invalid_response(ctrl_info);
3179 dev_err(&ctrl_info->pci_dev->dev,
3180 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3181 request_id, oq_pi, oq_ci);
3182 return -1;
3183 }
3184
3185 switch (response->header.iu_type) {
3186 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3187 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3188 if (io_request->scmd)
3189 io_request->scmd->result = 0;
3190 fallthrough;
3191 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3192 break;
3193 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3194 io_request->status =
3195 get_unaligned_le16(
3196 &((struct pqi_vendor_general_response *)response)->status);
3197 break;
3198 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3199 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3200 (void *)response);
3201 break;
3202 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3203 pqi_aio_path_disabled(io_request);
3204 io_request->status = -EAGAIN;
3205 break;
3206 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3207 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3208 io_request->error_info = ctrl_info->error_buffer +
3209 (get_unaligned_le16(&response->error_index) *
3210 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3211 pqi_process_io_error(response->header.iu_type, io_request);
3212 break;
3213 default:
3214 pqi_invalid_response(ctrl_info);
3215 dev_err(&ctrl_info->pci_dev->dev,
3216 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3217 response->header.iu_type, oq_pi, oq_ci);
3218 return -1;
3219 }
3220
3221 io_request->io_complete_callback(io_request, io_request->context);
3222
3223
3224
3225
3226
3227 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3228 }
3229
3230 if (num_responses) {
3231 queue_group->oq_ci_copy = oq_ci;
3232 writel(oq_ci, queue_group->oq_ci);
3233 }
3234
3235 return num_responses;
3236}
3237
3238static inline unsigned int pqi_num_elements_free(unsigned int pi,
3239 unsigned int ci, unsigned int elements_in_queue)
3240{
3241 unsigned int num_elements_used;
3242
3243 if (pi >= ci)
3244 num_elements_used = pi - ci;
3245 else
3246 num_elements_used = elements_in_queue - ci + pi;
3247
3248 return elements_in_queue - num_elements_used - 1;
3249}
3250
3251static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3252 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3253{
3254 pqi_index_t iq_pi;
3255 pqi_index_t iq_ci;
3256 unsigned long flags;
3257 void *next_element;
3258 struct pqi_queue_group *queue_group;
3259
3260 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3261 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3262
3263 while (1) {
3264 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3265
3266 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3267 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3268
3269 if (pqi_num_elements_free(iq_pi, iq_ci,
3270 ctrl_info->num_elements_per_iq))
3271 break;
3272
3273 spin_unlock_irqrestore(
3274 &queue_group->submit_lock[RAID_PATH], flags);
3275
3276 if (pqi_ctrl_offline(ctrl_info))
3277 return;
3278 }
3279
3280 next_element = queue_group->iq_element_array[RAID_PATH] +
3281 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3282
3283 memcpy(next_element, iu, iu_length);
3284
3285 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3286 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3287
3288
3289
3290
3291
3292 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3293
3294 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3295}
3296
3297static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3298 struct pqi_event *event)
3299{
3300 struct pqi_event_acknowledge_request request;
3301
3302 memset(&request, 0, sizeof(request));
3303
3304 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3305 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3306 &request.header.iu_length);
3307 request.event_type = event->event_type;
3308 put_unaligned_le16(event->event_id, &request.event_id);
3309 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3310
3311 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3312}
3313
3314#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3315#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3316
3317static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3318 struct pqi_ctrl_info *ctrl_info)
3319{
3320 u8 status;
3321 unsigned long timeout;
3322
3323 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3324
3325 while (1) {
3326 status = pqi_read_soft_reset_status(ctrl_info);
3327 if (status & PQI_SOFT_RESET_INITIATE)
3328 return RESET_INITIATE_DRIVER;
3329
3330 if (status & PQI_SOFT_RESET_ABORT)
3331 return RESET_ABORT;
3332
3333 if (!sis_is_firmware_running(ctrl_info))
3334 return RESET_NORESPONSE;
3335
3336 if (time_after(jiffies, timeout)) {
3337 dev_warn(&ctrl_info->pci_dev->dev,
3338 "timed out waiting for soft reset status\n");
3339 return RESET_TIMEDOUT;
3340 }
3341
3342 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3343 }
3344}
3345
3346static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3347{
3348 int rc;
3349 unsigned int delay_secs;
3350 enum pqi_soft_reset_status reset_status;
3351
3352 if (ctrl_info->soft_reset_handshake_supported)
3353 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3354 else
3355 reset_status = RESET_INITIATE_FIRMWARE;
3356
3357 delay_secs = PQI_POST_RESET_DELAY_SECS;
3358
3359 switch (reset_status) {
3360 case RESET_TIMEDOUT:
3361 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3362 fallthrough;
3363 case RESET_INITIATE_DRIVER:
3364 dev_info(&ctrl_info->pci_dev->dev,
3365 "Online Firmware Activation: resetting controller\n");
3366 sis_soft_reset(ctrl_info);
3367 fallthrough;
3368 case RESET_INITIATE_FIRMWARE:
3369 ctrl_info->pqi_mode_enabled = false;
3370 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3371 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3372 pqi_ofa_free_host_buffer(ctrl_info);
3373 pqi_ctrl_ofa_done(ctrl_info);
3374 dev_info(&ctrl_info->pci_dev->dev,
3375 "Online Firmware Activation: %s\n",
3376 rc == 0 ? "SUCCESS" : "FAILED");
3377 break;
3378 case RESET_ABORT:
3379 dev_info(&ctrl_info->pci_dev->dev,
3380 "Online Firmware Activation ABORTED\n");
3381 if (ctrl_info->soft_reset_handshake_supported)
3382 pqi_clear_soft_reset_status(ctrl_info);
3383 pqi_ofa_free_host_buffer(ctrl_info);
3384 pqi_ctrl_ofa_done(ctrl_info);
3385 pqi_ofa_ctrl_unquiesce(ctrl_info);
3386 break;
3387 case RESET_NORESPONSE:
3388 fallthrough;
3389 default:
3390 dev_err(&ctrl_info->pci_dev->dev,
3391 "unexpected Online Firmware Activation reset status: 0x%x\n",
3392 reset_status);
3393 pqi_ofa_free_host_buffer(ctrl_info);
3394 pqi_ctrl_ofa_done(ctrl_info);
3395 pqi_ofa_ctrl_unquiesce(ctrl_info);
3396 pqi_take_ctrl_offline(ctrl_info);
3397 break;
3398 }
3399}
3400
3401static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3402{
3403 struct pqi_ctrl_info *ctrl_info;
3404
3405 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3406
3407 pqi_ctrl_ofa_start(ctrl_info);
3408 pqi_ofa_setup_host_buffer(ctrl_info);
3409 pqi_ofa_host_memory_update(ctrl_info);
3410}
3411
3412static void pqi_ofa_quiesce_worker(struct work_struct *work)
3413{
3414 struct pqi_ctrl_info *ctrl_info;
3415 struct pqi_event *event;
3416
3417 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3418
3419 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3420
3421 pqi_ofa_ctrl_quiesce(ctrl_info);
3422 pqi_acknowledge_event(ctrl_info, event);
3423 pqi_process_soft_reset(ctrl_info);
3424}
3425
3426static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3427 struct pqi_event *event)
3428{
3429 bool ack_event;
3430
3431 ack_event = true;
3432
3433 switch (event->event_id) {
3434 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3435 dev_info(&ctrl_info->pci_dev->dev,
3436 "received Online Firmware Activation memory allocation request\n");
3437 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3438 break;
3439 case PQI_EVENT_OFA_QUIESCE:
3440 dev_info(&ctrl_info->pci_dev->dev,
3441 "received Online Firmware Activation quiesce request\n");
3442 schedule_work(&ctrl_info->ofa_quiesce_work);
3443 ack_event = false;
3444 break;
3445 case PQI_EVENT_OFA_CANCELED:
3446 dev_info(&ctrl_info->pci_dev->dev,
3447 "received Online Firmware Activation cancel request: reason: %u\n",
3448 ctrl_info->ofa_cancel_reason);
3449 pqi_ofa_free_host_buffer(ctrl_info);
3450 pqi_ctrl_ofa_done(ctrl_info);
3451 break;
3452 default:
3453 dev_err(&ctrl_info->pci_dev->dev,
3454 "received unknown Online Firmware Activation request: event ID: %u\n",
3455 event->event_id);
3456 break;
3457 }
3458
3459 return ack_event;
3460}
3461
3462static void pqi_event_worker(struct work_struct *work)
3463{
3464 unsigned int i;
3465 bool rescan_needed;
3466 struct pqi_ctrl_info *ctrl_info;
3467 struct pqi_event *event;
3468 bool ack_event;
3469
3470 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3471
3472 pqi_ctrl_busy(ctrl_info);
3473 pqi_wait_if_ctrl_blocked(ctrl_info);
3474 if (pqi_ctrl_offline(ctrl_info))
3475 goto out;
3476
3477 rescan_needed = false;
3478 event = ctrl_info->events;
3479 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3480 if (event->pending) {
3481 event->pending = false;
3482 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3483 ack_event = pqi_ofa_process_event(ctrl_info, event);
3484 } else {
3485 ack_event = true;
3486 rescan_needed = true;
3487 }
3488 if (ack_event)
3489 pqi_acknowledge_event(ctrl_info, event);
3490 }
3491 event++;
3492 }
3493
3494 if (rescan_needed)
3495 pqi_schedule_rescan_worker_delayed(ctrl_info);
3496
3497out:
3498 pqi_ctrl_unbusy(ctrl_info);
3499}
3500
3501#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3502
3503static void pqi_heartbeat_timer_handler(struct timer_list *t)
3504{
3505 int num_interrupts;
3506 u32 heartbeat_count;
3507 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3508
3509 pqi_check_ctrl_health(ctrl_info);
3510 if (pqi_ctrl_offline(ctrl_info))
3511 return;
3512
3513 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3514 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3515
3516 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3517 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3518 dev_err(&ctrl_info->pci_dev->dev,
3519 "no heartbeat detected - last heartbeat count: %u\n",
3520 heartbeat_count);
3521 pqi_take_ctrl_offline(ctrl_info);
3522 return;
3523 }
3524 } else {
3525 ctrl_info->previous_num_interrupts = num_interrupts;
3526 }
3527
3528 ctrl_info->previous_heartbeat_count = heartbeat_count;
3529 mod_timer(&ctrl_info->heartbeat_timer,
3530 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3531}
3532
3533static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3534{
3535 if (!ctrl_info->heartbeat_counter)
3536 return;
3537
3538 ctrl_info->previous_num_interrupts =
3539 atomic_read(&ctrl_info->num_interrupts);
3540 ctrl_info->previous_heartbeat_count =
3541 pqi_read_heartbeat_counter(ctrl_info);
3542
3543 ctrl_info->heartbeat_timer.expires =
3544 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3545 add_timer(&ctrl_info->heartbeat_timer);
3546}
3547
3548static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3549{
3550 del_timer_sync(&ctrl_info->heartbeat_timer);
3551}
3552
3553static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3554 struct pqi_event *event, struct pqi_event_response *response)
3555{
3556 switch (event->event_id) {
3557 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3558 ctrl_info->ofa_bytes_requested =
3559 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3560 break;
3561 case PQI_EVENT_OFA_CANCELED:
3562 ctrl_info->ofa_cancel_reason =
3563 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3564 break;
3565 }
3566}
3567
3568static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3569{
3570 int num_events;
3571 pqi_index_t oq_pi;
3572 pqi_index_t oq_ci;
3573 struct pqi_event_queue *event_queue;
3574 struct pqi_event_response *response;
3575 struct pqi_event *event;
3576 int event_index;
3577
3578 event_queue = &ctrl_info->event_queue;
3579 num_events = 0;
3580 oq_ci = event_queue->oq_ci_copy;
3581
3582 while (1) {
3583 oq_pi = readl(event_queue->oq_pi);
3584 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3585 pqi_invalid_response(ctrl_info);
3586 dev_err(&ctrl_info->pci_dev->dev,
3587 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3588 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3589 return -1;
3590 }
3591
3592 if (oq_pi == oq_ci)
3593 break;
3594
3595 num_events++;
3596 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3597
3598 event_index = pqi_event_type_to_event_index(response->event_type);
3599
3600 if (event_index >= 0 && response->request_acknowledge) {
3601 event = &ctrl_info->events[event_index];
3602 event->pending = true;
3603 event->event_type = response->event_type;
3604 event->event_id = get_unaligned_le16(&response->event_id);
3605 event->additional_event_id =
3606 get_unaligned_le32(&response->additional_event_id);
3607 if (event->event_type == PQI_EVENT_TYPE_OFA)
3608 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3609 }
3610
3611 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3612 }
3613
3614 if (num_events) {
3615 event_queue->oq_ci_copy = oq_ci;
3616 writel(oq_ci, event_queue->oq_ci);
3617 schedule_work(&ctrl_info->event_work);
3618 }
3619
3620 return num_events;
3621}
3622
3623#define PQI_LEGACY_INTX_MASK 0x1
3624
3625static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3626{
3627 u32 intx_mask;
3628 struct pqi_device_registers __iomem *pqi_registers;
3629 volatile void __iomem *register_addr;
3630
3631 pqi_registers = ctrl_info->pqi_registers;
3632
3633 if (enable_intx)
3634 register_addr = &pqi_registers->legacy_intx_mask_clear;
3635 else
3636 register_addr = &pqi_registers->legacy_intx_mask_set;
3637
3638 intx_mask = readl(register_addr);
3639 intx_mask |= PQI_LEGACY_INTX_MASK;
3640 writel(intx_mask, register_addr);
3641}
3642
3643static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3644 enum pqi_irq_mode new_mode)
3645{
3646 switch (ctrl_info->irq_mode) {
3647 case IRQ_MODE_MSIX:
3648 switch (new_mode) {
3649 case IRQ_MODE_MSIX:
3650 break;
3651 case IRQ_MODE_INTX:
3652 pqi_configure_legacy_intx(ctrl_info, true);
3653 sis_enable_intx(ctrl_info);
3654 break;
3655 case IRQ_MODE_NONE:
3656 break;
3657 }
3658 break;
3659 case IRQ_MODE_INTX:
3660 switch (new_mode) {
3661 case IRQ_MODE_MSIX:
3662 pqi_configure_legacy_intx(ctrl_info, false);
3663 sis_enable_msix(ctrl_info);
3664 break;
3665 case IRQ_MODE_INTX:
3666 break;
3667 case IRQ_MODE_NONE:
3668 pqi_configure_legacy_intx(ctrl_info, false);
3669 break;
3670 }
3671 break;
3672 case IRQ_MODE_NONE:
3673 switch (new_mode) {
3674 case IRQ_MODE_MSIX:
3675 sis_enable_msix(ctrl_info);
3676 break;
3677 case IRQ_MODE_INTX:
3678 pqi_configure_legacy_intx(ctrl_info, true);
3679 sis_enable_intx(ctrl_info);
3680 break;
3681 case IRQ_MODE_NONE:
3682 break;
3683 }
3684 break;
3685 }
3686
3687 ctrl_info->irq_mode = new_mode;
3688}
3689
3690#define PQI_LEGACY_INTX_PENDING 0x1
3691
3692static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3693{
3694 bool valid_irq;
3695 u32 intx_status;
3696
3697 switch (ctrl_info->irq_mode) {
3698 case IRQ_MODE_MSIX:
3699 valid_irq = true;
3700 break;
3701 case IRQ_MODE_INTX:
3702 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3703 if (intx_status & PQI_LEGACY_INTX_PENDING)
3704 valid_irq = true;
3705 else
3706 valid_irq = false;
3707 break;
3708 case IRQ_MODE_NONE:
3709 default:
3710 valid_irq = false;
3711 break;
3712 }
3713
3714 return valid_irq;
3715}
3716
3717static irqreturn_t pqi_irq_handler(int irq, void *data)
3718{
3719 struct pqi_ctrl_info *ctrl_info;
3720 struct pqi_queue_group *queue_group;
3721 int num_io_responses_handled;
3722 int num_events_handled;
3723
3724 queue_group = data;
3725 ctrl_info = queue_group->ctrl_info;
3726
3727 if (!pqi_is_valid_irq(ctrl_info))
3728 return IRQ_NONE;
3729
3730 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3731 if (num_io_responses_handled < 0)
3732 goto out;
3733
3734 if (irq == ctrl_info->event_irq) {
3735 num_events_handled = pqi_process_event_intr(ctrl_info);
3736 if (num_events_handled < 0)
3737 goto out;
3738 } else {
3739 num_events_handled = 0;
3740 }
3741
3742 if (num_io_responses_handled + num_events_handled > 0)
3743 atomic_inc(&ctrl_info->num_interrupts);
3744
3745 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3746 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3747
3748out:
3749 return IRQ_HANDLED;
3750}
3751
3752static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3753{
3754 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3755 int i;
3756 int rc;
3757
3758 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3759
3760 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3761 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3762 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3763 if (rc) {
3764 dev_err(&pci_dev->dev,
3765 "irq %u init failed with error %d\n",
3766 pci_irq_vector(pci_dev, i), rc);
3767 return rc;
3768 }
3769 ctrl_info->num_msix_vectors_initialized++;
3770 }
3771
3772 return 0;
3773}
3774
3775static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3776{
3777 int i;
3778
3779 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3780 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3781 &ctrl_info->queue_groups[i]);
3782
3783 ctrl_info->num_msix_vectors_initialized = 0;
3784}
3785
3786static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3787{
3788 int num_vectors_enabled;
3789
3790 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3791 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3792 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3793 if (num_vectors_enabled < 0) {
3794 dev_err(&ctrl_info->pci_dev->dev,
3795 "MSI-X init failed with error %d\n",
3796 num_vectors_enabled);
3797 return num_vectors_enabled;
3798 }
3799
3800 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3801 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3802 return 0;
3803}
3804
3805static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3806{
3807 if (ctrl_info->num_msix_vectors_enabled) {
3808 pci_free_irq_vectors(ctrl_info->pci_dev);
3809 ctrl_info->num_msix_vectors_enabled = 0;
3810 }
3811}
3812
3813static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3814{
3815 unsigned int i;
3816 size_t alloc_length;
3817 size_t element_array_length_per_iq;
3818 size_t element_array_length_per_oq;
3819 void *element_array;
3820 void __iomem *next_queue_index;
3821 void *aligned_pointer;
3822 unsigned int num_inbound_queues;
3823 unsigned int num_outbound_queues;
3824 unsigned int num_queue_indexes;
3825 struct pqi_queue_group *queue_group;
3826
3827 element_array_length_per_iq =
3828 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3829 ctrl_info->num_elements_per_iq;
3830 element_array_length_per_oq =
3831 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3832 ctrl_info->num_elements_per_oq;
3833 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3834 num_outbound_queues = ctrl_info->num_queue_groups;
3835 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3836
3837 aligned_pointer = NULL;
3838
3839 for (i = 0; i < num_inbound_queues; i++) {
3840 aligned_pointer = PTR_ALIGN(aligned_pointer,
3841 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3842 aligned_pointer += element_array_length_per_iq;
3843 }
3844
3845 for (i = 0; i < num_outbound_queues; i++) {
3846 aligned_pointer = PTR_ALIGN(aligned_pointer,
3847 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3848 aligned_pointer += element_array_length_per_oq;
3849 }
3850
3851 aligned_pointer = PTR_ALIGN(aligned_pointer,
3852 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3853 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3854 PQI_EVENT_OQ_ELEMENT_LENGTH;
3855
3856 for (i = 0; i < num_queue_indexes; i++) {
3857 aligned_pointer = PTR_ALIGN(aligned_pointer,
3858 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3859 aligned_pointer += sizeof(pqi_index_t);
3860 }
3861
3862 alloc_length = (size_t)aligned_pointer +
3863 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3864
3865 alloc_length += PQI_EXTRA_SGL_MEMORY;
3866
3867 ctrl_info->queue_memory_base =
3868 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3869 &ctrl_info->queue_memory_base_dma_handle,
3870 GFP_KERNEL);
3871
3872 if (!ctrl_info->queue_memory_base)
3873 return -ENOMEM;
3874
3875 ctrl_info->queue_memory_length = alloc_length;
3876
3877 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3878 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3879
3880 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3881 queue_group = &ctrl_info->queue_groups[i];
3882 queue_group->iq_element_array[RAID_PATH] = element_array;
3883 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3884 ctrl_info->queue_memory_base_dma_handle +
3885 (element_array - ctrl_info->queue_memory_base);
3886 element_array += element_array_length_per_iq;
3887 element_array = PTR_ALIGN(element_array,
3888 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3889 queue_group->iq_element_array[AIO_PATH] = element_array;
3890 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3891 ctrl_info->queue_memory_base_dma_handle +
3892 (element_array - ctrl_info->queue_memory_base);
3893 element_array += element_array_length_per_iq;
3894 element_array = PTR_ALIGN(element_array,
3895 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3896 }
3897
3898 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3899 queue_group = &ctrl_info->queue_groups[i];
3900 queue_group->oq_element_array = element_array;
3901 queue_group->oq_element_array_bus_addr =
3902 ctrl_info->queue_memory_base_dma_handle +
3903 (element_array - ctrl_info->queue_memory_base);
3904 element_array += element_array_length_per_oq;
3905 element_array = PTR_ALIGN(element_array,
3906 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3907 }
3908
3909 ctrl_info->event_queue.oq_element_array = element_array;
3910 ctrl_info->event_queue.oq_element_array_bus_addr =
3911 ctrl_info->queue_memory_base_dma_handle +
3912 (element_array - ctrl_info->queue_memory_base);
3913 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3914 PQI_EVENT_OQ_ELEMENT_LENGTH;
3915
3916 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3917 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3918
3919 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3920 queue_group = &ctrl_info->queue_groups[i];
3921 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3922 queue_group->iq_ci_bus_addr[RAID_PATH] =
3923 ctrl_info->queue_memory_base_dma_handle +
3924 (next_queue_index -
3925 (void __iomem *)ctrl_info->queue_memory_base);
3926 next_queue_index += sizeof(pqi_index_t);
3927 next_queue_index = PTR_ALIGN(next_queue_index,
3928 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3929 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3930 queue_group->iq_ci_bus_addr[AIO_PATH] =
3931 ctrl_info->queue_memory_base_dma_handle +
3932 (next_queue_index -
3933 (void __iomem *)ctrl_info->queue_memory_base);
3934 next_queue_index += sizeof(pqi_index_t);
3935 next_queue_index = PTR_ALIGN(next_queue_index,
3936 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3937 queue_group->oq_pi = next_queue_index;
3938 queue_group->oq_pi_bus_addr =
3939 ctrl_info->queue_memory_base_dma_handle +
3940 (next_queue_index -
3941 (void __iomem *)ctrl_info->queue_memory_base);
3942 next_queue_index += sizeof(pqi_index_t);
3943 next_queue_index = PTR_ALIGN(next_queue_index,
3944 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3945 }
3946
3947 ctrl_info->event_queue.oq_pi = next_queue_index;
3948 ctrl_info->event_queue.oq_pi_bus_addr =
3949 ctrl_info->queue_memory_base_dma_handle +
3950 (next_queue_index -
3951 (void __iomem *)ctrl_info->queue_memory_base);
3952
3953 return 0;
3954}
3955
3956static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3957{
3958 unsigned int i;
3959 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3960 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3961
3962
3963
3964
3965
3966 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3967 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3968
3969
3970
3971
3972
3973
3974 ctrl_info->event_queue.oq_id = next_oq_id++;
3975 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3976 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3977 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3978 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3979 }
3980
3981
3982
3983
3984
3985 ctrl_info->event_queue.int_msg_num = 0;
3986 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3987 ctrl_info->queue_groups[i].int_msg_num = i;
3988
3989 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3990 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3991 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3992 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3993 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3994 }
3995}
3996
3997static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3998{
3999 size_t alloc_length;
4000 struct pqi_admin_queues_aligned *admin_queues_aligned;
4001 struct pqi_admin_queues *admin_queues;
4002
4003 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4004 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4005
4006 ctrl_info->admin_queue_memory_base =
4007 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4008 &ctrl_info->admin_queue_memory_base_dma_handle,
4009 GFP_KERNEL);
4010
4011 if (!ctrl_info->admin_queue_memory_base)
4012 return -ENOMEM;
4013
4014 ctrl_info->admin_queue_memory_length = alloc_length;
4015
4016 admin_queues = &ctrl_info->admin_queues;
4017 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4018 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4019 admin_queues->iq_element_array =
4020 &admin_queues_aligned->iq_element_array;
4021 admin_queues->oq_element_array =
4022 &admin_queues_aligned->oq_element_array;
4023 admin_queues->iq_ci =
4024 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4025 admin_queues->oq_pi =
4026 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4027
4028 admin_queues->iq_element_array_bus_addr =
4029 ctrl_info->admin_queue_memory_base_dma_handle +
4030 (admin_queues->iq_element_array -
4031 ctrl_info->admin_queue_memory_base);
4032 admin_queues->oq_element_array_bus_addr =
4033 ctrl_info->admin_queue_memory_base_dma_handle +
4034 (admin_queues->oq_element_array -
4035 ctrl_info->admin_queue_memory_base);
4036 admin_queues->iq_ci_bus_addr =
4037 ctrl_info->admin_queue_memory_base_dma_handle +
4038 ((void __iomem *)admin_queues->iq_ci -
4039 (void __iomem *)ctrl_info->admin_queue_memory_base);
4040 admin_queues->oq_pi_bus_addr =
4041 ctrl_info->admin_queue_memory_base_dma_handle +
4042 ((void __iomem *)admin_queues->oq_pi -
4043 (void __iomem *)ctrl_info->admin_queue_memory_base);
4044
4045 return 0;
4046}
4047
4048#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
4049#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4050
4051static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4052{
4053 struct pqi_device_registers __iomem *pqi_registers;
4054 struct pqi_admin_queues *admin_queues;
4055 unsigned long timeout;
4056 u8 status;
4057 u32 reg;
4058
4059 pqi_registers = ctrl_info->pqi_registers;
4060 admin_queues = &ctrl_info->admin_queues;
4061
4062 writeq((u64)admin_queues->iq_element_array_bus_addr,
4063 &pqi_registers->admin_iq_element_array_addr);
4064 writeq((u64)admin_queues->oq_element_array_bus_addr,
4065 &pqi_registers->admin_oq_element_array_addr);
4066 writeq((u64)admin_queues->iq_ci_bus_addr,
4067 &pqi_registers->admin_iq_ci_addr);
4068 writeq((u64)admin_queues->oq_pi_bus_addr,
4069 &pqi_registers->admin_oq_pi_addr);
4070
4071 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4072 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4073 (admin_queues->int_msg_num << 16);
4074 writel(reg, &pqi_registers->admin_iq_num_elements);
4075
4076 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4077 &pqi_registers->function_and_status_code);
4078
4079 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4080 while (1) {
4081 status = readb(&pqi_registers->function_and_status_code);
4082 if (status == PQI_STATUS_IDLE)
4083 break;
4084 if (time_after(jiffies, timeout))
4085 return -ETIMEDOUT;
4086 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4087 }
4088
4089
4090
4091
4092
4093
4094 admin_queues->iq_pi = ctrl_info->iomem_base +
4095 PQI_DEVICE_REGISTERS_OFFSET +
4096 readq(&pqi_registers->admin_iq_pi_offset);
4097 admin_queues->oq_ci = ctrl_info->iomem_base +
4098 PQI_DEVICE_REGISTERS_OFFSET +
4099 readq(&pqi_registers->admin_oq_ci_offset);
4100
4101 return 0;
4102}
4103
4104static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4105 struct pqi_general_admin_request *request)
4106{
4107 struct pqi_admin_queues *admin_queues;
4108 void *next_element;
4109 pqi_index_t iq_pi;
4110
4111 admin_queues = &ctrl_info->admin_queues;
4112 iq_pi = admin_queues->iq_pi_copy;
4113
4114 next_element = admin_queues->iq_element_array +
4115 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4116
4117 memcpy(next_element, request, sizeof(*request));
4118
4119 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4120 admin_queues->iq_pi_copy = iq_pi;
4121
4122
4123
4124
4125
4126 writel(iq_pi, admin_queues->iq_pi);
4127}
4128
4129#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4130
4131static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4132 struct pqi_general_admin_response *response)
4133{
4134 struct pqi_admin_queues *admin_queues;
4135 pqi_index_t oq_pi;
4136 pqi_index_t oq_ci;
4137 unsigned long timeout;
4138
4139 admin_queues = &ctrl_info->admin_queues;
4140 oq_ci = admin_queues->oq_ci_copy;
4141
4142 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
4143
4144 while (1) {
4145 oq_pi = readl(admin_queues->oq_pi);
4146 if (oq_pi != oq_ci)
4147 break;
4148 if (time_after(jiffies, timeout)) {
4149 dev_err(&ctrl_info->pci_dev->dev,
4150 "timed out waiting for admin response\n");
4151 return -ETIMEDOUT;
4152 }
4153 if (!sis_is_firmware_running(ctrl_info))
4154 return -ENXIO;
4155 usleep_range(1000, 2000);
4156 }
4157
4158 memcpy(response, admin_queues->oq_element_array +
4159 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4160
4161 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4162 admin_queues->oq_ci_copy = oq_ci;
4163 writel(oq_ci, admin_queues->oq_ci);
4164
4165 return 0;
4166}
4167
4168static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4169 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4170 struct pqi_io_request *io_request)
4171{
4172 struct pqi_io_request *next;
4173 void *next_element;
4174 pqi_index_t iq_pi;
4175 pqi_index_t iq_ci;
4176 size_t iu_length;
4177 unsigned long flags;
4178 unsigned int num_elements_needed;
4179 unsigned int num_elements_to_end_of_queue;
4180 size_t copy_count;
4181 struct pqi_iu_header *request;
4182
4183 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4184
4185 if (io_request) {
4186 io_request->queue_group = queue_group;
4187 list_add_tail(&io_request->request_list_entry,
4188 &queue_group->request_list[path]);
4189 }
4190
4191 iq_pi = queue_group->iq_pi_copy[path];
4192
4193 list_for_each_entry_safe(io_request, next,
4194 &queue_group->request_list[path], request_list_entry) {
4195
4196 request = io_request->iu;
4197
4198 iu_length = get_unaligned_le16(&request->iu_length) +
4199 PQI_REQUEST_HEADER_LENGTH;
4200 num_elements_needed =
4201 DIV_ROUND_UP(iu_length,
4202 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4203
4204 iq_ci = readl(queue_group->iq_ci[path]);
4205
4206 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4207 ctrl_info->num_elements_per_iq))
4208 break;
4209
4210 put_unaligned_le16(queue_group->oq_id,
4211 &request->response_queue_id);
4212
4213 next_element = queue_group->iq_element_array[path] +
4214 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4215
4216 num_elements_to_end_of_queue =
4217 ctrl_info->num_elements_per_iq - iq_pi;
4218
4219 if (num_elements_needed <= num_elements_to_end_of_queue) {
4220 memcpy(next_element, request, iu_length);
4221 } else {
4222 copy_count = num_elements_to_end_of_queue *
4223 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4224 memcpy(next_element, request, copy_count);
4225 memcpy(queue_group->iq_element_array[path],
4226 (u8 *)request + copy_count,
4227 iu_length - copy_count);
4228 }
4229
4230 iq_pi = (iq_pi + num_elements_needed) %
4231 ctrl_info->num_elements_per_iq;
4232
4233 list_del(&io_request->request_list_entry);
4234 }
4235
4236 if (iq_pi != queue_group->iq_pi_copy[path]) {
4237 queue_group->iq_pi_copy[path] = iq_pi;
4238
4239
4240
4241
4242 writel(iq_pi, queue_group->iq_pi[path]);
4243 }
4244
4245 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4246}
4247
4248#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4249
4250static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4251 struct completion *wait)
4252{
4253 int rc;
4254
4255 while (1) {
4256 if (wait_for_completion_io_timeout(wait,
4257 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
4258 rc = 0;
4259 break;
4260 }
4261
4262 pqi_check_ctrl_health(ctrl_info);
4263 if (pqi_ctrl_offline(ctrl_info)) {
4264 rc = -ENXIO;
4265 break;
4266 }
4267 }
4268
4269 return rc;
4270}
4271
4272static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4273 void *context)
4274{
4275 struct completion *waiting = context;
4276
4277 complete(waiting);
4278}
4279
4280static int pqi_process_raid_io_error_synchronous(
4281 struct pqi_raid_error_info *error_info)
4282{
4283 int rc = -EIO;
4284
4285 switch (error_info->data_out_result) {
4286 case PQI_DATA_IN_OUT_GOOD:
4287 if (error_info->status == SAM_STAT_GOOD)
4288 rc = 0;
4289 break;
4290 case PQI_DATA_IN_OUT_UNDERFLOW:
4291 if (error_info->status == SAM_STAT_GOOD ||
4292 error_info->status == SAM_STAT_CHECK_CONDITION)
4293 rc = 0;
4294 break;
4295 case PQI_DATA_IN_OUT_ABORTED:
4296 rc = PQI_CMD_STATUS_ABORTED;
4297 break;
4298 }
4299
4300 return rc;
4301}
4302
4303static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4304{
4305 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4306}
4307
4308static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4309 struct pqi_iu_header *request, unsigned int flags,
4310 struct pqi_raid_error_info *error_info)
4311{
4312 int rc = 0;
4313 struct pqi_io_request *io_request;
4314 size_t iu_length;
4315 DECLARE_COMPLETION_ONSTACK(wait);
4316
4317 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4318 if (down_interruptible(&ctrl_info->sync_request_sem))
4319 return -ERESTARTSYS;
4320 } else {
4321 down(&ctrl_info->sync_request_sem);
4322 }
4323
4324 pqi_ctrl_busy(ctrl_info);
4325
4326
4327
4328
4329 if (pqi_is_blockable_request(request))
4330 pqi_wait_if_ctrl_blocked(ctrl_info);
4331
4332 if (pqi_ctrl_offline(ctrl_info)) {
4333 rc = -ENXIO;
4334 goto out;
4335 }
4336
4337 io_request = pqi_alloc_io_request(ctrl_info);
4338
4339 put_unaligned_le16(io_request->index,
4340 &(((struct pqi_raid_path_request *)request)->request_id));
4341
4342 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4343 ((struct pqi_raid_path_request *)request)->error_index =
4344 ((struct pqi_raid_path_request *)request)->request_id;
4345
4346 iu_length = get_unaligned_le16(&request->iu_length) +
4347 PQI_REQUEST_HEADER_LENGTH;
4348 memcpy(io_request->iu, request, iu_length);
4349
4350 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4351 io_request->context = &wait;
4352
4353 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4354 io_request);
4355
4356 pqi_wait_for_completion_io(ctrl_info, &wait);
4357
4358 if (error_info) {
4359 if (io_request->error_info)
4360 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4361 else
4362 memset(error_info, 0, sizeof(*error_info));
4363 } else if (rc == 0 && io_request->error_info) {
4364 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4365 }
4366
4367 pqi_free_io_request(io_request);
4368
4369out:
4370 pqi_ctrl_unbusy(ctrl_info);
4371 up(&ctrl_info->sync_request_sem);
4372
4373 return rc;
4374}
4375
4376static int pqi_validate_admin_response(
4377 struct pqi_general_admin_response *response, u8 expected_function_code)
4378{
4379 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4380 return -EINVAL;
4381
4382 if (get_unaligned_le16(&response->header.iu_length) !=
4383 PQI_GENERAL_ADMIN_IU_LENGTH)
4384 return -EINVAL;
4385
4386 if (response->function_code != expected_function_code)
4387 return -EINVAL;
4388
4389 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4390 return -EINVAL;
4391
4392 return 0;
4393}
4394
4395static int pqi_submit_admin_request_synchronous(
4396 struct pqi_ctrl_info *ctrl_info,
4397 struct pqi_general_admin_request *request,
4398 struct pqi_general_admin_response *response)
4399{
4400 int rc;
4401
4402 pqi_submit_admin_request(ctrl_info, request);
4403
4404 rc = pqi_poll_for_admin_response(ctrl_info, response);
4405
4406 if (rc == 0)
4407 rc = pqi_validate_admin_response(response, request->function_code);
4408
4409 return rc;
4410}
4411
4412static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4413{
4414 int rc;
4415 struct pqi_general_admin_request request;
4416 struct pqi_general_admin_response response;
4417 struct pqi_device_capability *capability;
4418 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4419
4420 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4421 if (!capability)
4422 return -ENOMEM;
4423
4424 memset(&request, 0, sizeof(request));
4425
4426 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4427 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4428 &request.header.iu_length);
4429 request.function_code =
4430 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4431 put_unaligned_le32(sizeof(*capability),
4432 &request.data.report_device_capability.buffer_length);
4433
4434 rc = pqi_map_single(ctrl_info->pci_dev,
4435 &request.data.report_device_capability.sg_descriptor,
4436 capability, sizeof(*capability),
4437 DMA_FROM_DEVICE);
4438 if (rc)
4439 goto out;
4440
4441 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4442
4443 pqi_pci_unmap(ctrl_info->pci_dev,
4444 &request.data.report_device_capability.sg_descriptor, 1,
4445 DMA_FROM_DEVICE);
4446
4447 if (rc)
4448 goto out;
4449
4450 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4451 rc = -EIO;
4452 goto out;
4453 }
4454
4455 ctrl_info->max_inbound_queues =
4456 get_unaligned_le16(&capability->max_inbound_queues);
4457 ctrl_info->max_elements_per_iq =
4458 get_unaligned_le16(&capability->max_elements_per_iq);
4459 ctrl_info->max_iq_element_length =
4460 get_unaligned_le16(&capability->max_iq_element_length)
4461 * 16;
4462 ctrl_info->max_outbound_queues =
4463 get_unaligned_le16(&capability->max_outbound_queues);
4464 ctrl_info->max_elements_per_oq =
4465 get_unaligned_le16(&capability->max_elements_per_oq);
4466 ctrl_info->max_oq_element_length =
4467 get_unaligned_le16(&capability->max_oq_element_length)
4468 * 16;
4469
4470 sop_iu_layer_descriptor =
4471 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4472
4473 ctrl_info->max_inbound_iu_length_per_firmware =
4474 get_unaligned_le16(
4475 &sop_iu_layer_descriptor->max_inbound_iu_length);
4476 ctrl_info->inbound_spanning_supported =
4477 sop_iu_layer_descriptor->inbound_spanning_supported;
4478 ctrl_info->outbound_spanning_supported =
4479 sop_iu_layer_descriptor->outbound_spanning_supported;
4480
4481out:
4482 kfree(capability);
4483
4484 return rc;
4485}
4486
4487static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4488{
4489 if (ctrl_info->max_iq_element_length <
4490 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4491 dev_err(&ctrl_info->pci_dev->dev,
4492 "max. inbound queue element length of %d is less than the required length of %d\n",
4493 ctrl_info->max_iq_element_length,
4494 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4495 return -EINVAL;
4496 }
4497
4498 if (ctrl_info->max_oq_element_length <
4499 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4500 dev_err(&ctrl_info->pci_dev->dev,
4501 "max. outbound queue element length of %d is less than the required length of %d\n",
4502 ctrl_info->max_oq_element_length,
4503 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4504 return -EINVAL;
4505 }
4506
4507 if (ctrl_info->max_inbound_iu_length_per_firmware <
4508 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4509 dev_err(&ctrl_info->pci_dev->dev,
4510 "max. inbound IU length of %u is less than the min. required length of %d\n",
4511 ctrl_info->max_inbound_iu_length_per_firmware,
4512 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4513 return -EINVAL;
4514 }
4515
4516 if (!ctrl_info->inbound_spanning_supported) {
4517 dev_err(&ctrl_info->pci_dev->dev,
4518 "the controller does not support inbound spanning\n");
4519 return -EINVAL;
4520 }
4521
4522 if (ctrl_info->outbound_spanning_supported) {
4523 dev_err(&ctrl_info->pci_dev->dev,
4524 "the controller supports outbound spanning but this driver does not\n");
4525 return -EINVAL;
4526 }
4527
4528 return 0;
4529}
4530
4531static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4532{
4533 int rc;
4534 struct pqi_event_queue *event_queue;
4535 struct pqi_general_admin_request request;
4536 struct pqi_general_admin_response response;
4537
4538 event_queue = &ctrl_info->event_queue;
4539
4540
4541
4542
4543
4544 memset(&request, 0, sizeof(request));
4545 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4546 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4547 &request.header.iu_length);
4548 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4549 put_unaligned_le16(event_queue->oq_id,
4550 &request.data.create_operational_oq.queue_id);
4551 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4552 &request.data.create_operational_oq.element_array_addr);
4553 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4554 &request.data.create_operational_oq.pi_addr);
4555 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4556 &request.data.create_operational_oq.num_elements);
4557 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4558 &request.data.create_operational_oq.element_length);
4559 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4560 put_unaligned_le16(event_queue->int_msg_num,
4561 &request.data.create_operational_oq.int_msg_num);
4562
4563 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4564 &response);
4565 if (rc)
4566 return rc;
4567
4568 event_queue->oq_ci = ctrl_info->iomem_base +
4569 PQI_DEVICE_REGISTERS_OFFSET +
4570 get_unaligned_le64(
4571 &response.data.create_operational_oq.oq_ci_offset);
4572
4573 return 0;
4574}
4575
4576static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4577 unsigned int group_number)
4578{
4579 int rc;
4580 struct pqi_queue_group *queue_group;
4581 struct pqi_general_admin_request request;
4582 struct pqi_general_admin_response response;
4583
4584 queue_group = &ctrl_info->queue_groups[group_number];
4585
4586
4587
4588
4589
4590 memset(&request, 0, sizeof(request));
4591 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4592 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4593 &request.header.iu_length);
4594 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4595 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4596 &request.data.create_operational_iq.queue_id);
4597 put_unaligned_le64(
4598 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4599 &request.data.create_operational_iq.element_array_addr);
4600 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4601 &request.data.create_operational_iq.ci_addr);
4602 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4603 &request.data.create_operational_iq.num_elements);
4604 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4605 &request.data.create_operational_iq.element_length);
4606 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4607
4608 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4609 &response);
4610 if (rc) {
4611 dev_err(&ctrl_info->pci_dev->dev,
4612 "error creating inbound RAID queue\n");
4613 return rc;
4614 }
4615
4616 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4617 PQI_DEVICE_REGISTERS_OFFSET +
4618 get_unaligned_le64(
4619 &response.data.create_operational_iq.iq_pi_offset);
4620
4621
4622
4623
4624
4625 memset(&request, 0, sizeof(request));
4626 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4627 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4628 &request.header.iu_length);
4629 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4630 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4631 &request.data.create_operational_iq.queue_id);
4632 put_unaligned_le64((u64)queue_group->
4633 iq_element_array_bus_addr[AIO_PATH],
4634 &request.data.create_operational_iq.element_array_addr);
4635 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4636 &request.data.create_operational_iq.ci_addr);
4637 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4638 &request.data.create_operational_iq.num_elements);
4639 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4640 &request.data.create_operational_iq.element_length);
4641 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4642
4643 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4644 &response);
4645 if (rc) {
4646 dev_err(&ctrl_info->pci_dev->dev,
4647 "error creating inbound AIO queue\n");
4648 return rc;
4649 }
4650
4651 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4652 PQI_DEVICE_REGISTERS_OFFSET +
4653 get_unaligned_le64(
4654 &response.data.create_operational_iq.iq_pi_offset);
4655
4656
4657
4658
4659
4660
4661 memset(&request, 0, sizeof(request));
4662 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4663 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4664 &request.header.iu_length);
4665 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4666 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4667 &request.data.change_operational_iq_properties.queue_id);
4668 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4669 &request.data.change_operational_iq_properties.vendor_specific);
4670
4671 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4672 &response);
4673 if (rc) {
4674 dev_err(&ctrl_info->pci_dev->dev,
4675 "error changing queue property\n");
4676 return rc;
4677 }
4678
4679
4680
4681
4682 memset(&request, 0, sizeof(request));
4683 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4684 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4685 &request.header.iu_length);
4686 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4687 put_unaligned_le16(queue_group->oq_id,
4688 &request.data.create_operational_oq.queue_id);
4689 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4690 &request.data.create_operational_oq.element_array_addr);
4691 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4692 &request.data.create_operational_oq.pi_addr);
4693 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4694 &request.data.create_operational_oq.num_elements);
4695 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4696 &request.data.create_operational_oq.element_length);
4697 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4698 put_unaligned_le16(queue_group->int_msg_num,
4699 &request.data.create_operational_oq.int_msg_num);
4700
4701 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4702 &response);
4703 if (rc) {
4704 dev_err(&ctrl_info->pci_dev->dev,
4705 "error creating outbound queue\n");
4706 return rc;
4707 }
4708
4709 queue_group->oq_ci = ctrl_info->iomem_base +
4710 PQI_DEVICE_REGISTERS_OFFSET +
4711 get_unaligned_le64(
4712 &response.data.create_operational_oq.oq_ci_offset);
4713
4714 return 0;
4715}
4716
4717static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4718{
4719 int rc;
4720 unsigned int i;
4721
4722 rc = pqi_create_event_queue(ctrl_info);
4723 if (rc) {
4724 dev_err(&ctrl_info->pci_dev->dev,
4725 "error creating event queue\n");
4726 return rc;
4727 }
4728
4729 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4730 rc = pqi_create_queue_group(ctrl_info, i);
4731 if (rc) {
4732 dev_err(&ctrl_info->pci_dev->dev,
4733 "error creating queue group number %u/%u\n",
4734 i, ctrl_info->num_queue_groups);
4735 return rc;
4736 }
4737 }
4738
4739 return 0;
4740}
4741
4742#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4743 (offsetof(struct pqi_event_config, descriptors) + \
4744 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4745
4746static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4747 bool enable_events)
4748{
4749 int rc;
4750 unsigned int i;
4751 struct pqi_event_config *event_config;
4752 struct pqi_event_descriptor *event_descriptor;
4753 struct pqi_general_management_request request;
4754
4755 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4756 GFP_KERNEL);
4757 if (!event_config)
4758 return -ENOMEM;
4759
4760 memset(&request, 0, sizeof(request));
4761
4762 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4763 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4764 data.report_event_configuration.sg_descriptors[1]) -
4765 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4766 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4767 &request.data.report_event_configuration.buffer_length);
4768
4769 rc = pqi_map_single(ctrl_info->pci_dev,
4770 request.data.report_event_configuration.sg_descriptors,
4771 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4772 DMA_FROM_DEVICE);
4773 if (rc)
4774 goto out;
4775
4776 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4777
4778 pqi_pci_unmap(ctrl_info->pci_dev,
4779 request.data.report_event_configuration.sg_descriptors, 1,
4780 DMA_FROM_DEVICE);
4781
4782 if (rc)
4783 goto out;
4784
4785 for (i = 0; i < event_config->num_event_descriptors; i++) {
4786 event_descriptor = &event_config->descriptors[i];
4787 if (enable_events &&
4788 pqi_is_supported_event(event_descriptor->event_type))
4789 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4790 &event_descriptor->oq_id);
4791 else
4792 put_unaligned_le16(0, &event_descriptor->oq_id);
4793 }
4794
4795 memset(&request, 0, sizeof(request));
4796
4797 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4798 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4799 data.report_event_configuration.sg_descriptors[1]) -
4800 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4801 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4802 &request.data.report_event_configuration.buffer_length);
4803
4804 rc = pqi_map_single(ctrl_info->pci_dev,
4805 request.data.report_event_configuration.sg_descriptors,
4806 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4807 DMA_TO_DEVICE);
4808 if (rc)
4809 goto out;
4810
4811 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4812
4813 pqi_pci_unmap(ctrl_info->pci_dev,
4814 request.data.report_event_configuration.sg_descriptors, 1,
4815 DMA_TO_DEVICE);
4816
4817out:
4818 kfree(event_config);
4819
4820 return rc;
4821}
4822
4823static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4824{
4825 return pqi_configure_events(ctrl_info, true);
4826}
4827
4828static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4829{
4830 unsigned int i;
4831 struct device *dev;
4832 size_t sg_chain_buffer_length;
4833 struct pqi_io_request *io_request;
4834
4835 if (!ctrl_info->io_request_pool)
4836 return;
4837
4838 dev = &ctrl_info->pci_dev->dev;
4839 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4840 io_request = ctrl_info->io_request_pool;
4841
4842 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4843 kfree(io_request->iu);
4844 if (!io_request->sg_chain_buffer)
4845 break;
4846 dma_free_coherent(dev, sg_chain_buffer_length,
4847 io_request->sg_chain_buffer,
4848 io_request->sg_chain_buffer_dma_handle);
4849 io_request++;
4850 }
4851
4852 kfree(ctrl_info->io_request_pool);
4853 ctrl_info->io_request_pool = NULL;
4854}
4855
4856static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4857{
4858 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4859 ctrl_info->error_buffer_length,
4860 &ctrl_info->error_buffer_dma_handle,
4861 GFP_KERNEL);
4862 if (!ctrl_info->error_buffer)
4863 return -ENOMEM;
4864
4865 return 0;
4866}
4867
4868static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4869{
4870 unsigned int i;
4871 void *sg_chain_buffer;
4872 size_t sg_chain_buffer_length;
4873 dma_addr_t sg_chain_buffer_dma_handle;
4874 struct device *dev;
4875 struct pqi_io_request *io_request;
4876
4877 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
4878 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4879
4880 if (!ctrl_info->io_request_pool) {
4881 dev_err(&ctrl_info->pci_dev->dev,
4882 "failed to allocate I/O request pool\n");
4883 goto error;
4884 }
4885
4886 dev = &ctrl_info->pci_dev->dev;
4887 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4888 io_request = ctrl_info->io_request_pool;
4889
4890 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4891 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4892
4893 if (!io_request->iu) {
4894 dev_err(&ctrl_info->pci_dev->dev,
4895 "failed to allocate IU buffers\n");
4896 goto error;
4897 }
4898
4899 sg_chain_buffer = dma_alloc_coherent(dev,
4900 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4901 GFP_KERNEL);
4902
4903 if (!sg_chain_buffer) {
4904 dev_err(&ctrl_info->pci_dev->dev,
4905 "failed to allocate PQI scatter-gather chain buffers\n");
4906 goto error;
4907 }
4908
4909 io_request->index = i;
4910 io_request->sg_chain_buffer = sg_chain_buffer;
4911 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
4912 io_request++;
4913 }
4914
4915 return 0;
4916
4917error:
4918 pqi_free_all_io_requests(ctrl_info);
4919
4920 return -ENOMEM;
4921}
4922
4923
4924
4925
4926
4927
4928static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4929{
4930 u32 max_transfer_size;
4931 u32 max_sg_entries;
4932
4933 ctrl_info->scsi_ml_can_queue =
4934 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4935 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4936
4937 ctrl_info->error_buffer_length =
4938 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4939
4940 if (reset_devices)
4941 max_transfer_size = min(ctrl_info->max_transfer_size,
4942 PQI_MAX_TRANSFER_SIZE_KDUMP);
4943 else
4944 max_transfer_size = min(ctrl_info->max_transfer_size,
4945 PQI_MAX_TRANSFER_SIZE);
4946
4947 max_sg_entries = max_transfer_size / PAGE_SIZE;
4948
4949
4950 max_sg_entries++;
4951
4952 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4953
4954 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4955
4956 ctrl_info->sg_chain_buffer_length =
4957 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4958 PQI_EXTRA_SGL_MEMORY;
4959 ctrl_info->sg_tablesize = max_sg_entries;
4960 ctrl_info->max_sectors = max_transfer_size / 512;
4961}
4962
4963static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4964{
4965 int num_queue_groups;
4966 u16 num_elements_per_iq;
4967 u16 num_elements_per_oq;
4968
4969 if (reset_devices) {
4970 num_queue_groups = 1;
4971 } else {
4972 int num_cpus;
4973 int max_queue_groups;
4974
4975 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4976 ctrl_info->max_outbound_queues - 1);
4977 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4978
4979 num_cpus = num_online_cpus();
4980 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4981 num_queue_groups = min(num_queue_groups, max_queue_groups);
4982 }
4983
4984 ctrl_info->num_queue_groups = num_queue_groups;
4985 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4986
4987
4988
4989
4990
4991 ctrl_info->max_inbound_iu_length =
4992 (ctrl_info->max_inbound_iu_length_per_firmware /
4993 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4994 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4995
4996 num_elements_per_iq =
4997 (ctrl_info->max_inbound_iu_length /
4998 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4999
5000
5001 num_elements_per_iq++;
5002
5003 num_elements_per_iq = min(num_elements_per_iq,
5004 ctrl_info->max_elements_per_iq);
5005
5006 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5007 num_elements_per_oq = min(num_elements_per_oq,
5008 ctrl_info->max_elements_per_oq);
5009
5010 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5011 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5012
5013 ctrl_info->max_sg_per_iu =
5014 ((ctrl_info->max_inbound_iu_length -
5015 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5016 sizeof(struct pqi_sg_descriptor)) +
5017 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5018
5019 ctrl_info->max_sg_per_r56_iu =
5020 ((ctrl_info->max_inbound_iu_length -
5021 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5022 sizeof(struct pqi_sg_descriptor)) +
5023 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5024}
5025
5026static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5027 struct scatterlist *sg)
5028{
5029 u64 address = (u64)sg_dma_address(sg);
5030 unsigned int length = sg_dma_len(sg);
5031
5032 put_unaligned_le64(address, &sg_descriptor->address);
5033 put_unaligned_le32(length, &sg_descriptor->length);
5034 put_unaligned_le32(0, &sg_descriptor->flags);
5035}
5036
5037static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5038 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5039 int max_sg_per_iu, bool *chained)
5040{
5041 int i;
5042 unsigned int num_sg_in_iu;
5043
5044 *chained = false;
5045 i = 0;
5046 num_sg_in_iu = 0;
5047 max_sg_per_iu--;
5048
5049 while (1) {
5050 pqi_set_sg_descriptor(sg_descriptor, sg);
5051 if (!*chained)
5052 num_sg_in_iu++;
5053 i++;
5054 if (i == sg_count)
5055 break;
5056 sg_descriptor++;
5057 if (i == max_sg_per_iu) {
5058 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5059 &sg_descriptor->address);
5060 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5061 &sg_descriptor->length);
5062 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5063 *chained = true;
5064 num_sg_in_iu++;
5065 sg_descriptor = io_request->sg_chain_buffer;
5066 }
5067 sg = sg_next(sg);
5068 }
5069
5070 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5071
5072 return num_sg_in_iu;
5073}
5074
5075static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5076 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5077 struct pqi_io_request *io_request)
5078{
5079 u16 iu_length;
5080 int sg_count;
5081 bool chained;
5082 unsigned int num_sg_in_iu;
5083 struct scatterlist *sg;
5084 struct pqi_sg_descriptor *sg_descriptor;
5085
5086 sg_count = scsi_dma_map(scmd);
5087 if (sg_count < 0)
5088 return sg_count;
5089
5090 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5091 PQI_REQUEST_HEADER_LENGTH;
5092
5093 if (sg_count == 0)
5094 goto out;
5095
5096 sg = scsi_sglist(scmd);
5097 sg_descriptor = request->sg_descriptors;
5098
5099 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5100 ctrl_info->max_sg_per_iu, &chained);
5101
5102 request->partial = chained;
5103 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5104
5105out:
5106 put_unaligned_le16(iu_length, &request->header.iu_length);
5107
5108 return 0;
5109}
5110
5111static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5112 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5113 struct pqi_io_request *io_request)
5114{
5115 u16 iu_length;
5116 int sg_count;
5117 bool chained;
5118 unsigned int num_sg_in_iu;
5119 struct scatterlist *sg;
5120 struct pqi_sg_descriptor *sg_descriptor;
5121
5122 sg_count = scsi_dma_map(scmd);
5123 if (sg_count < 0)
5124 return sg_count;
5125
5126 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5127 PQI_REQUEST_HEADER_LENGTH;
5128 num_sg_in_iu = 0;
5129
5130 if (sg_count == 0)
5131 goto out;
5132
5133 sg = scsi_sglist(scmd);
5134 sg_descriptor = request->sg_descriptors;
5135
5136 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5137 ctrl_info->max_sg_per_iu, &chained);
5138
5139 request->partial = chained;
5140 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5141
5142out:
5143 put_unaligned_le16(iu_length, &request->header.iu_length);
5144 request->num_sg_descriptors = num_sg_in_iu;
5145
5146 return 0;
5147}
5148
5149static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5150 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5151 struct pqi_io_request *io_request)
5152{
5153 u16 iu_length;
5154 int sg_count;
5155 bool chained;
5156 unsigned int num_sg_in_iu;
5157 struct scatterlist *sg;
5158 struct pqi_sg_descriptor *sg_descriptor;
5159
5160 sg_count = scsi_dma_map(scmd);
5161 if (sg_count < 0)
5162 return sg_count;
5163
5164 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5165 PQI_REQUEST_HEADER_LENGTH;
5166 num_sg_in_iu = 0;
5167
5168 if (sg_count != 0) {
5169 sg = scsi_sglist(scmd);
5170 sg_descriptor = request->sg_descriptors;
5171
5172 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5173 ctrl_info->max_sg_per_r56_iu, &chained);
5174
5175 request->partial = chained;
5176 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5177 }
5178
5179 put_unaligned_le16(iu_length, &request->header.iu_length);
5180 request->num_sg_descriptors = num_sg_in_iu;
5181
5182 return 0;
5183}
5184
5185static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5186 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5187 struct pqi_io_request *io_request)
5188{
5189 u16 iu_length;
5190 int sg_count;
5191 bool chained;
5192 unsigned int num_sg_in_iu;
5193 struct scatterlist *sg;
5194 struct pqi_sg_descriptor *sg_descriptor;
5195
5196 sg_count = scsi_dma_map(scmd);
5197 if (sg_count < 0)
5198 return sg_count;
5199
5200 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5201 PQI_REQUEST_HEADER_LENGTH;
5202 num_sg_in_iu = 0;
5203
5204 if (sg_count == 0)
5205 goto out;
5206
5207 sg = scsi_sglist(scmd);
5208 sg_descriptor = request->sg_descriptors;
5209
5210 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5211 ctrl_info->max_sg_per_iu, &chained);
5212
5213 request->partial = chained;
5214 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5215
5216out:
5217 put_unaligned_le16(iu_length, &request->header.iu_length);
5218 request->num_sg_descriptors = num_sg_in_iu;
5219
5220 return 0;
5221}
5222
5223static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5224 void *context)
5225{
5226 struct scsi_cmnd *scmd;
5227
5228 scmd = io_request->scmd;
5229 pqi_free_io_request(io_request);
5230 scsi_dma_unmap(scmd);
5231 pqi_scsi_done(scmd);
5232}
5233
5234static int pqi_raid_submit_scsi_cmd_with_io_request(
5235 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5236 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5237 struct pqi_queue_group *queue_group)
5238{
5239 int rc;
5240 size_t cdb_length;
5241 struct pqi_raid_path_request *request;
5242
5243 io_request->io_complete_callback = pqi_raid_io_complete;
5244 io_request->scmd = scmd;
5245
5246 request = io_request->iu;
5247 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5248
5249 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5250 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5251 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5252 put_unaligned_le16(io_request->index, &request->request_id);
5253 request->error_index = request->request_id;
5254 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5255
5256 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5257 memcpy(request->cdb, scmd->cmnd, cdb_length);
5258
5259 switch (cdb_length) {
5260 case 6:
5261 case 10:
5262 case 12:
5263 case 16:
5264 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5265 break;
5266 case 20:
5267 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5268 break;
5269 case 24:
5270 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5271 break;
5272 case 28:
5273 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5274 break;
5275 case 32:
5276 default:
5277 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5278 break;
5279 }
5280
5281 switch (scmd->sc_data_direction) {
5282 case DMA_TO_DEVICE:
5283 request->data_direction = SOP_READ_FLAG;
5284 break;
5285 case DMA_FROM_DEVICE:
5286 request->data_direction = SOP_WRITE_FLAG;
5287 break;
5288 case DMA_NONE:
5289 request->data_direction = SOP_NO_DIRECTION_FLAG;
5290 break;
5291 case DMA_BIDIRECTIONAL:
5292 request->data_direction = SOP_BIDIRECTIONAL;
5293 break;
5294 default:
5295 dev_err(&ctrl_info->pci_dev->dev,
5296 "unknown data direction: %d\n",
5297 scmd->sc_data_direction);
5298 break;
5299 }
5300
5301 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5302 if (rc) {
5303 pqi_free_io_request(io_request);
5304 return SCSI_MLQUEUE_HOST_BUSY;
5305 }
5306
5307 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5308
5309 return 0;
5310}
5311
5312static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5313 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5314 struct pqi_queue_group *queue_group)
5315{
5316 struct pqi_io_request *io_request;
5317
5318 io_request = pqi_alloc_io_request(ctrl_info);
5319
5320 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5321 device, scmd, queue_group);
5322}
5323
5324static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5325{
5326 struct scsi_cmnd *scmd;
5327 struct pqi_scsi_dev *device;
5328 struct pqi_ctrl_info *ctrl_info;
5329
5330 if (!io_request->raid_bypass)
5331 return false;
5332
5333 scmd = io_request->scmd;
5334 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5335 return false;
5336 if (host_byte(scmd->result) == DID_NO_CONNECT)
5337 return false;
5338
5339 device = scmd->device->hostdata;
5340 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5341 return false;
5342
5343 ctrl_info = shost_to_hba(scmd->device->host);
5344 if (pqi_ctrl_offline(ctrl_info))
5345 return false;
5346
5347 return true;
5348}
5349
5350static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5351 void *context)
5352{
5353 struct scsi_cmnd *scmd;
5354
5355 scmd = io_request->scmd;
5356 scsi_dma_unmap(scmd);
5357 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5358 set_host_byte(scmd, DID_IMM_RETRY);
5359 scmd->SCp.this_residual++;
5360 }
5361
5362 pqi_free_io_request(io_request);
5363 pqi_scsi_done(scmd);
5364}
5365
5366static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5367 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5368 struct pqi_queue_group *queue_group)
5369{
5370 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5371 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5372}
5373
5374static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5375 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5376 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5377 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5378{
5379 int rc;
5380 struct pqi_io_request *io_request;
5381 struct pqi_aio_path_request *request;
5382
5383 io_request = pqi_alloc_io_request(ctrl_info);
5384 io_request->io_complete_callback = pqi_aio_io_complete;
5385 io_request->scmd = scmd;
5386 io_request->raid_bypass = raid_bypass;
5387
5388 request = io_request->iu;
5389 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5390
5391 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5392 put_unaligned_le32(aio_handle, &request->nexus_id);
5393 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5394 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5395 put_unaligned_le16(io_request->index, &request->request_id);
5396 request->error_index = request->request_id;
5397 if (cdb_length > sizeof(request->cdb))
5398 cdb_length = sizeof(request->cdb);
5399 request->cdb_length = cdb_length;
5400 memcpy(request->cdb, cdb, cdb_length);
5401
5402 switch (scmd->sc_data_direction) {
5403 case DMA_TO_DEVICE:
5404 request->data_direction = SOP_READ_FLAG;
5405 break;
5406 case DMA_FROM_DEVICE:
5407 request->data_direction = SOP_WRITE_FLAG;
5408 break;
5409 case DMA_NONE:
5410 request->data_direction = SOP_NO_DIRECTION_FLAG;
5411 break;
5412 case DMA_BIDIRECTIONAL:
5413 request->data_direction = SOP_BIDIRECTIONAL;
5414 break;
5415 default:
5416 dev_err(&ctrl_info->pci_dev->dev,
5417 "unknown data direction: %d\n",
5418 scmd->sc_data_direction);
5419 break;
5420 }
5421
5422 if (encryption_info) {
5423 request->encryption_enable = true;
5424 put_unaligned_le16(encryption_info->data_encryption_key_index,
5425 &request->data_encryption_key_index);
5426 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5427 &request->encrypt_tweak_lower);
5428 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5429 &request->encrypt_tweak_upper);
5430 }
5431
5432 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5433 if (rc) {
5434 pqi_free_io_request(io_request);
5435 return SCSI_MLQUEUE_HOST_BUSY;
5436 }
5437
5438 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5439
5440 return 0;
5441}
5442
5443static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5444 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5445 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5446 struct pqi_scsi_dev_raid_map_data *rmd)
5447{
5448 int rc;
5449 struct pqi_io_request *io_request;
5450 struct pqi_aio_r1_path_request *r1_request;
5451
5452 io_request = pqi_alloc_io_request(ctrl_info);
5453 io_request->io_complete_callback = pqi_aio_io_complete;
5454 io_request->scmd = scmd;
5455 io_request->raid_bypass = true;
5456
5457 r1_request = io_request->iu;
5458 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5459
5460 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5461 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5462 r1_request->num_drives = rmd->num_it_nexus_entries;
5463 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5464 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5465 if (rmd->num_it_nexus_entries == 3)
5466 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5467
5468 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5469 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5470 put_unaligned_le16(io_request->index, &r1_request->request_id);
5471 r1_request->error_index = r1_request->request_id;
5472 if (rmd->cdb_length > sizeof(r1_request->cdb))
5473 rmd->cdb_length = sizeof(r1_request->cdb);
5474 r1_request->cdb_length = rmd->cdb_length;
5475 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5476
5477
5478 r1_request->data_direction = SOP_READ_FLAG;
5479
5480 if (encryption_info) {
5481 r1_request->encryption_enable = true;
5482 put_unaligned_le16(encryption_info->data_encryption_key_index,
5483 &r1_request->data_encryption_key_index);
5484 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5485 &r1_request->encrypt_tweak_lower);
5486 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5487 &r1_request->encrypt_tweak_upper);
5488 }
5489
5490 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5491 if (rc) {
5492 pqi_free_io_request(io_request);
5493 return SCSI_MLQUEUE_HOST_BUSY;
5494 }
5495
5496 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5497
5498 return 0;
5499}
5500
5501static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5502 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5503 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5504 struct pqi_scsi_dev_raid_map_data *rmd)
5505{
5506 int rc;
5507 struct pqi_io_request *io_request;
5508 struct pqi_aio_r56_path_request *r56_request;
5509
5510 io_request = pqi_alloc_io_request(ctrl_info);
5511 io_request->io_complete_callback = pqi_aio_io_complete;
5512 io_request->scmd = scmd;
5513 io_request->raid_bypass = true;
5514
5515 r56_request = io_request->iu;
5516 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5517
5518 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5519 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5520 else
5521 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5522
5523 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5524 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5525 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5526 if (rmd->raid_level == SA_RAID_6) {
5527 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5528 r56_request->xor_multiplier = rmd->xor_mult;
5529 }
5530 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5531 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5532 put_unaligned_le64(rmd->row, &r56_request->row);
5533
5534 put_unaligned_le16(io_request->index, &r56_request->request_id);
5535 r56_request->error_index = r56_request->request_id;
5536
5537 if (rmd->cdb_length > sizeof(r56_request->cdb))
5538 rmd->cdb_length = sizeof(r56_request->cdb);
5539 r56_request->cdb_length = rmd->cdb_length;
5540 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5541
5542
5543 r56_request->data_direction = SOP_READ_FLAG;
5544
5545 if (encryption_info) {
5546 r56_request->encryption_enable = true;
5547 put_unaligned_le16(encryption_info->data_encryption_key_index,
5548 &r56_request->data_encryption_key_index);
5549 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5550 &r56_request->encrypt_tweak_lower);
5551 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5552 &r56_request->encrypt_tweak_upper);
5553 }
5554
5555 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5556 if (rc) {
5557 pqi_free_io_request(io_request);
5558 return SCSI_MLQUEUE_HOST_BUSY;
5559 }
5560
5561 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5562
5563 return 0;
5564}
5565
5566static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5567 struct scsi_cmnd *scmd)
5568{
5569 u16 hw_queue;
5570
5571 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5572 if (hw_queue > ctrl_info->max_hw_queue_index)
5573 hw_queue = 0;
5574
5575 return hw_queue;
5576}
5577
5578static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5579{
5580 if (blk_rq_is_passthrough(scmd->request))
5581 return false;
5582
5583 return scmd->SCp.this_residual == 0;
5584}
5585
5586
5587
5588
5589
5590
5591void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5592{
5593 struct pqi_scsi_dev *device;
5594
5595 if (!scmd->device) {
5596 set_host_byte(scmd, DID_NO_CONNECT);
5597 return;
5598 }
5599
5600 device = scmd->device->hostdata;
5601 if (!device) {
5602 set_host_byte(scmd, DID_NO_CONNECT);
5603 return;
5604 }
5605
5606 atomic_dec(&device->scsi_cmds_outstanding);
5607}
5608
5609static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5610 struct scsi_cmnd *scmd)
5611{
5612 u32 oldest_jiffies;
5613 u8 lru_index;
5614 int i;
5615 int rc;
5616 struct pqi_scsi_dev *device;
5617 struct pqi_stream_data *pqi_stream_data;
5618 struct pqi_scsi_dev_raid_map_data rmd;
5619
5620 if (!ctrl_info->enable_stream_detection)
5621 return false;
5622
5623 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5624 if (rc)
5625 return false;
5626
5627
5628 if (!rmd.is_write)
5629 return false;
5630
5631 device = scmd->device->hostdata;
5632
5633
5634 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5635 return false;
5636
5637
5638
5639
5640
5641 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5642 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5643 return true;
5644
5645 lru_index = 0;
5646 oldest_jiffies = INT_MAX;
5647 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5648 pqi_stream_data = &device->stream_data[i];
5649
5650
5651
5652
5653 if ((pqi_stream_data->next_lba &&
5654 rmd.first_block >= pqi_stream_data->next_lba) &&
5655 rmd.first_block <= pqi_stream_data->next_lba +
5656 rmd.block_cnt) {
5657 pqi_stream_data->next_lba = rmd.first_block +
5658 rmd.block_cnt;
5659 pqi_stream_data->last_accessed = jiffies;
5660 return true;
5661 }
5662
5663
5664 if (pqi_stream_data->last_accessed == 0) {
5665 lru_index = i;
5666 break;
5667 }
5668
5669
5670 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5671 oldest_jiffies = pqi_stream_data->last_accessed;
5672 lru_index = i;
5673 }
5674 }
5675
5676
5677 pqi_stream_data = &device->stream_data[lru_index];
5678 pqi_stream_data->last_accessed = jiffies;
5679 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5680
5681 return false;
5682}
5683
5684static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5685{
5686 int rc;
5687 struct pqi_ctrl_info *ctrl_info;
5688 struct pqi_scsi_dev *device;
5689 u16 hw_queue;
5690 struct pqi_queue_group *queue_group;
5691 bool raid_bypassed;
5692
5693 device = scmd->device->hostdata;
5694
5695 if (!device) {
5696 set_host_byte(scmd, DID_NO_CONNECT);
5697 pqi_scsi_done(scmd);
5698 return 0;
5699 }
5700
5701 atomic_inc(&device->scsi_cmds_outstanding);
5702
5703 ctrl_info = shost_to_hba(shost);
5704
5705 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5706 set_host_byte(scmd, DID_NO_CONNECT);
5707 pqi_scsi_done(scmd);
5708 return 0;
5709 }
5710
5711 if (pqi_ctrl_blocked(ctrl_info)) {
5712 rc = SCSI_MLQUEUE_HOST_BUSY;
5713 goto out;
5714 }
5715
5716
5717
5718
5719
5720 scmd->result = 0;
5721
5722 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5723 queue_group = &ctrl_info->queue_groups[hw_queue];
5724
5725 if (pqi_is_logical_device(device)) {
5726 raid_bypassed = false;
5727 if (device->raid_bypass_enabled &&
5728 pqi_is_bypass_eligible_request(scmd) &&
5729 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5730 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5731 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5732 raid_bypassed = true;
5733 atomic_inc(&device->raid_bypass_cnt);
5734 }
5735 }
5736 if (!raid_bypassed)
5737 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5738 } else {
5739 if (device->aio_enabled)
5740 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5741 else
5742 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5743 }
5744
5745out:
5746 if (rc)
5747 atomic_dec(&device->scsi_cmds_outstanding);
5748
5749 return rc;
5750}
5751
5752static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5753 struct pqi_queue_group *queue_group)
5754{
5755 unsigned int path;
5756 unsigned long flags;
5757 bool list_is_empty;
5758
5759 for (path = 0; path < 2; path++) {
5760 while (1) {
5761 spin_lock_irqsave(
5762 &queue_group->submit_lock[path], flags);
5763 list_is_empty =
5764 list_empty(&queue_group->request_list[path]);
5765 spin_unlock_irqrestore(
5766 &queue_group->submit_lock[path], flags);
5767 if (list_is_empty)
5768 break;
5769 pqi_check_ctrl_health(ctrl_info);
5770 if (pqi_ctrl_offline(ctrl_info))
5771 return -ENXIO;
5772 usleep_range(1000, 2000);
5773 }
5774 }
5775
5776 return 0;
5777}
5778
5779static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5780{
5781 int rc;
5782 unsigned int i;
5783 unsigned int path;
5784 struct pqi_queue_group *queue_group;
5785 pqi_index_t iq_pi;
5786 pqi_index_t iq_ci;
5787
5788 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5789 queue_group = &ctrl_info->queue_groups[i];
5790
5791 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5792 if (rc)
5793 return rc;
5794
5795 for (path = 0; path < 2; path++) {
5796 iq_pi = queue_group->iq_pi_copy[path];
5797
5798 while (1) {
5799 iq_ci = readl(queue_group->iq_ci[path]);
5800 if (iq_ci == iq_pi)
5801 break;
5802 pqi_check_ctrl_health(ctrl_info);
5803 if (pqi_ctrl_offline(ctrl_info))
5804 return -ENXIO;
5805 usleep_range(1000, 2000);
5806 }
5807 }
5808 }
5809
5810 return 0;
5811}
5812
5813static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5814 struct pqi_scsi_dev *device)
5815{
5816 unsigned int i;
5817 unsigned int path;
5818 struct pqi_queue_group *queue_group;
5819 unsigned long flags;
5820 struct pqi_io_request *io_request;
5821 struct pqi_io_request *next;
5822 struct scsi_cmnd *scmd;
5823 struct pqi_scsi_dev *scsi_device;
5824
5825 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5826 queue_group = &ctrl_info->queue_groups[i];
5827
5828 for (path = 0; path < 2; path++) {
5829 spin_lock_irqsave(
5830 &queue_group->submit_lock[path], flags);
5831
5832 list_for_each_entry_safe(io_request, next,
5833 &queue_group->request_list[path],
5834 request_list_entry) {
5835
5836 scmd = io_request->scmd;
5837 if (!scmd)
5838 continue;
5839
5840 scsi_device = scmd->device->hostdata;
5841 if (scsi_device != device)
5842 continue;
5843
5844 list_del(&io_request->request_list_entry);
5845 set_host_byte(scmd, DID_RESET);
5846 pqi_free_io_request(io_request);
5847 scsi_dma_unmap(scmd);
5848 pqi_scsi_done(scmd);
5849 }
5850
5851 spin_unlock_irqrestore(
5852 &queue_group->submit_lock[path], flags);
5853 }
5854 }
5855}
5856
5857#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
5858
5859static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5860 struct pqi_scsi_dev *device, unsigned long timeout_msecs)
5861{
5862 int cmds_outstanding;
5863 unsigned long start_jiffies;
5864 unsigned long warning_timeout;
5865 unsigned long msecs_waiting;
5866
5867 start_jiffies = jiffies;
5868 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
5869
5870 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
5871 pqi_check_ctrl_health(ctrl_info);
5872 if (pqi_ctrl_offline(ctrl_info))
5873 return -ENXIO;
5874 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
5875 if (msecs_waiting > timeout_msecs) {
5876 dev_err(&ctrl_info->pci_dev->dev,
5877 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
5878 ctrl_info->scsi_host->host_no, device->bus, device->target,
5879 device->lun, msecs_waiting / 1000, cmds_outstanding);
5880 return -ETIMEDOUT;
5881 }
5882 if (time_after(jiffies, warning_timeout)) {
5883 dev_warn(&ctrl_info->pci_dev->dev,
5884 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
5885 ctrl_info->scsi_host->host_no, device->bus, device->target,
5886 device->lun, msecs_waiting / 1000, cmds_outstanding);
5887 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
5888 }
5889 usleep_range(1000, 2000);
5890 }
5891
5892 return 0;
5893}
5894
5895static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5896 void *context)
5897{
5898 struct completion *waiting = context;
5899
5900 complete(waiting);
5901}
5902
5903#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
5904
5905static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5906 struct pqi_scsi_dev *device, struct completion *wait)
5907{
5908 int rc;
5909 unsigned int wait_secs;
5910
5911 wait_secs = 0;
5912
5913 while (1) {
5914 if (wait_for_completion_io_timeout(wait,
5915 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
5916 rc = 0;
5917 break;
5918 }
5919
5920 pqi_check_ctrl_health(ctrl_info);
5921 if (pqi_ctrl_offline(ctrl_info)) {
5922 rc = -ENXIO;
5923 break;
5924 }
5925
5926 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
5927
5928 dev_warn(&ctrl_info->pci_dev->dev,
5929 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n",
5930 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun,
5931 wait_secs);
5932 }
5933
5934 return rc;
5935}
5936
5937#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
5938
5939static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
5940{
5941 int rc;
5942 struct pqi_io_request *io_request;
5943 DECLARE_COMPLETION_ONSTACK(wait);
5944 struct pqi_task_management_request *request;
5945
5946 io_request = pqi_alloc_io_request(ctrl_info);
5947 io_request->io_complete_callback = pqi_lun_reset_complete;
5948 io_request->context = &wait;
5949
5950 request = io_request->iu;
5951 memset(request, 0, sizeof(*request));
5952
5953 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5954 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5955 &request->header.iu_length);
5956 put_unaligned_le16(io_request->index, &request->request_id);
5957 memcpy(request->lun_number, device->scsi3addr,
5958 sizeof(request->lun_number));
5959 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5960 if (ctrl_info->tmf_iu_timeout_supported)
5961 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
5962
5963 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5964 io_request);
5965
5966 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5967 if (rc == 0)
5968 rc = io_request->status;
5969
5970 pqi_free_io_request(io_request);
5971
5972 return rc;
5973}
5974
5975#define PQI_LUN_RESET_RETRIES 3
5976#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
5977#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
5978#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
5979
5980static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
5981{
5982 int reset_rc;
5983 int wait_rc;
5984 unsigned int retries;
5985 unsigned long timeout_msecs;
5986
5987 for (retries = 0;;) {
5988 reset_rc = pqi_lun_reset(ctrl_info, device);
5989 if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
5990 break;
5991 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5992 }
5993
5994 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
5995 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
5996
5997 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
5998 if (wait_rc && reset_rc == 0)
5999 reset_rc = wait_rc;
6000
6001 return reset_rc == 0 ? SUCCESS : FAILED;
6002}
6003
6004static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6005 struct pqi_scsi_dev *device)
6006{
6007 int rc;
6008
6009 pqi_ctrl_block_requests(ctrl_info);
6010 pqi_ctrl_wait_until_quiesced(ctrl_info);
6011 pqi_fail_io_queued_for_device(ctrl_info, device);
6012 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6013 if (rc)
6014 rc = FAILED;
6015 else
6016 rc = pqi_lun_reset_with_retries(ctrl_info, device);
6017 pqi_ctrl_unblock_requests(ctrl_info);
6018
6019 return rc;
6020}
6021
6022static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6023{
6024 int rc;
6025 struct Scsi_Host *shost;
6026 struct pqi_ctrl_info *ctrl_info;
6027 struct pqi_scsi_dev *device;
6028
6029 shost = scmd->device->host;
6030 ctrl_info = shost_to_hba(shost);
6031 device = scmd->device->hostdata;
6032
6033 mutex_lock(&ctrl_info->lun_reset_mutex);
6034
6035 dev_err(&ctrl_info->pci_dev->dev,
6036 "resetting scsi %d:%d:%d:%d\n",
6037 shost->host_no, device->bus, device->target, device->lun);
6038
6039 pqi_check_ctrl_health(ctrl_info);
6040 if (pqi_ctrl_offline(ctrl_info))
6041 rc = FAILED;
6042 else
6043 rc = pqi_device_reset(ctrl_info, device);
6044
6045 dev_err(&ctrl_info->pci_dev->dev,
6046 "reset of scsi %d:%d:%d:%d: %s\n",
6047 shost->host_no, device->bus, device->target, device->lun,
6048 rc == SUCCESS ? "SUCCESS" : "FAILED");
6049
6050 mutex_unlock(&ctrl_info->lun_reset_mutex);
6051
6052 return rc;
6053}
6054
6055static int pqi_slave_alloc(struct scsi_device *sdev)
6056{
6057 struct pqi_scsi_dev *device;
6058 unsigned long flags;
6059 struct pqi_ctrl_info *ctrl_info;
6060 struct scsi_target *starget;
6061 struct sas_rphy *rphy;
6062
6063 ctrl_info = shost_to_hba(sdev->host);
6064
6065 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6066
6067 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6068 starget = scsi_target(sdev);
6069 rphy = target_to_rphy(starget);
6070 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6071 if (device) {
6072 device->target = sdev_id(sdev);
6073 device->lun = sdev->lun;
6074 device->target_lun_valid = true;
6075 }
6076 } else {
6077 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6078 sdev_id(sdev), sdev->lun);
6079 }
6080
6081 if (device) {
6082 sdev->hostdata = device;
6083 device->sdev = sdev;
6084 if (device->queue_depth) {
6085 device->advertised_queue_depth = device->queue_depth;
6086 scsi_change_queue_depth(sdev,
6087 device->advertised_queue_depth);
6088 }
6089 if (pqi_is_logical_device(device)) {
6090 pqi_disable_write_same(sdev);
6091 } else {
6092 sdev->allow_restart = 1;
6093 if (device->device_type == SA_DEVICE_TYPE_NVME)
6094 pqi_disable_write_same(sdev);
6095 }
6096 }
6097
6098 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6099
6100 return 0;
6101}
6102
6103static int pqi_map_queues(struct Scsi_Host *shost)
6104{
6105 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6106
6107 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6108 ctrl_info->pci_dev, 0);
6109}
6110
6111static int pqi_slave_configure(struct scsi_device *sdev)
6112{
6113 struct pqi_scsi_dev *device;
6114
6115 device = sdev->hostdata;
6116 device->devtype = sdev->type;
6117
6118 return 0;
6119}
6120
6121static void pqi_slave_destroy(struct scsi_device *sdev)
6122{
6123 unsigned long flags;
6124 struct pqi_scsi_dev *device;
6125 struct pqi_ctrl_info *ctrl_info;
6126
6127 ctrl_info = shost_to_hba(sdev->host);
6128
6129 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6130
6131 device = sdev->hostdata;
6132 if (device) {
6133 sdev->hostdata = NULL;
6134 if (!list_empty(&device->scsi_device_list_entry))
6135 list_del(&device->scsi_device_list_entry);
6136 }
6137
6138 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6139
6140 if (device) {
6141 pqi_dev_info(ctrl_info, "removed", device);
6142 pqi_free_device(device);
6143 }
6144}
6145
6146static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6147{
6148 struct pci_dev *pci_dev;
6149 u32 subsystem_vendor;
6150 u32 subsystem_device;
6151 cciss_pci_info_struct pciinfo;
6152
6153 if (!arg)
6154 return -EINVAL;
6155
6156 pci_dev = ctrl_info->pci_dev;
6157
6158 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6159 pciinfo.bus = pci_dev->bus->number;
6160 pciinfo.dev_fn = pci_dev->devfn;
6161 subsystem_vendor = pci_dev->subsystem_vendor;
6162 subsystem_device = pci_dev->subsystem_device;
6163 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6164
6165 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6166 return -EFAULT;
6167
6168 return 0;
6169}
6170
6171static int pqi_getdrivver_ioctl(void __user *arg)
6172{
6173 u32 version;
6174
6175 if (!arg)
6176 return -EINVAL;
6177
6178 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6179 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6180
6181 if (copy_to_user(arg, &version, sizeof(version)))
6182 return -EFAULT;
6183
6184 return 0;
6185}
6186
6187struct ciss_error_info {
6188 u8 scsi_status;
6189 int command_status;
6190 size_t sense_data_length;
6191};
6192
6193static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6194 struct ciss_error_info *ciss_error_info)
6195{
6196 int ciss_cmd_status;
6197 size_t sense_data_length;
6198
6199 switch (pqi_error_info->data_out_result) {
6200 case PQI_DATA_IN_OUT_GOOD:
6201 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6202 break;
6203 case PQI_DATA_IN_OUT_UNDERFLOW:
6204 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6205 break;
6206 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6207 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6208 break;
6209 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6210 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6211 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6212 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6213 case PQI_DATA_IN_OUT_ERROR:
6214 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6215 break;
6216 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6217 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6218 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6219 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6220 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6221 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6222 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6223 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6224 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6225 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6226 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6227 break;
6228 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6229 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6230 break;
6231 case PQI_DATA_IN_OUT_ABORTED:
6232 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6233 break;
6234 case PQI_DATA_IN_OUT_TIMEOUT:
6235 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6236 break;
6237 default:
6238 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6239 break;
6240 }
6241
6242 sense_data_length =
6243 get_unaligned_le16(&pqi_error_info->sense_data_length);
6244 if (sense_data_length == 0)
6245 sense_data_length =
6246 get_unaligned_le16(&pqi_error_info->response_data_length);
6247 if (sense_data_length)
6248 if (sense_data_length > sizeof(pqi_error_info->data))
6249 sense_data_length = sizeof(pqi_error_info->data);
6250
6251 ciss_error_info->scsi_status = pqi_error_info->status;
6252 ciss_error_info->command_status = ciss_cmd_status;
6253 ciss_error_info->sense_data_length = sense_data_length;
6254}
6255
6256static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6257{
6258 int rc;
6259 char *kernel_buffer = NULL;
6260 u16 iu_length;
6261 size_t sense_data_length;
6262 IOCTL_Command_struct iocommand;
6263 struct pqi_raid_path_request request;
6264 struct pqi_raid_error_info pqi_error_info;
6265 struct ciss_error_info ciss_error_info;
6266
6267 if (pqi_ctrl_offline(ctrl_info))
6268 return -ENXIO;
6269 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6270 return -EBUSY;
6271 if (!arg)
6272 return -EINVAL;
6273 if (!capable(CAP_SYS_RAWIO))
6274 return -EPERM;
6275 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6276 return -EFAULT;
6277 if (iocommand.buf_size < 1 &&
6278 iocommand.Request.Type.Direction != XFER_NONE)
6279 return -EINVAL;
6280 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6281 return -EINVAL;
6282 if (iocommand.Request.Type.Type != TYPE_CMD)
6283 return -EINVAL;
6284
6285 switch (iocommand.Request.Type.Direction) {
6286 case XFER_NONE:
6287 case XFER_WRITE:
6288 case XFER_READ:
6289 case XFER_READ | XFER_WRITE:
6290 break;
6291 default:
6292 return -EINVAL;
6293 }
6294
6295 if (iocommand.buf_size > 0) {
6296 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6297 if (!kernel_buffer)
6298 return -ENOMEM;
6299 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6300 if (copy_from_user(kernel_buffer, iocommand.buf,
6301 iocommand.buf_size)) {
6302 rc = -EFAULT;
6303 goto out;
6304 }
6305 } else {
6306 memset(kernel_buffer, 0, iocommand.buf_size);
6307 }
6308 }
6309
6310 memset(&request, 0, sizeof(request));
6311
6312 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6313 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6314 PQI_REQUEST_HEADER_LENGTH;
6315 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6316 sizeof(request.lun_number));
6317 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6318 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6319
6320 switch (iocommand.Request.Type.Direction) {
6321 case XFER_NONE:
6322 request.data_direction = SOP_NO_DIRECTION_FLAG;
6323 break;
6324 case XFER_WRITE:
6325 request.data_direction = SOP_WRITE_FLAG;
6326 break;
6327 case XFER_READ:
6328 request.data_direction = SOP_READ_FLAG;
6329 break;
6330 case XFER_READ | XFER_WRITE:
6331 request.data_direction = SOP_BIDIRECTIONAL;
6332 break;
6333 }
6334
6335 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6336
6337 if (iocommand.buf_size > 0) {
6338 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6339
6340 rc = pqi_map_single(ctrl_info->pci_dev,
6341 &request.sg_descriptors[0], kernel_buffer,
6342 iocommand.buf_size, DMA_BIDIRECTIONAL);
6343 if (rc)
6344 goto out;
6345
6346 iu_length += sizeof(request.sg_descriptors[0]);
6347 }
6348
6349 put_unaligned_le16(iu_length, &request.header.iu_length);
6350
6351 if (ctrl_info->raid_iu_timeout_supported)
6352 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6353
6354 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6355 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6356
6357 if (iocommand.buf_size > 0)
6358 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6359 DMA_BIDIRECTIONAL);
6360
6361 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6362
6363 if (rc == 0) {
6364 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6365 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6366 iocommand.error_info.CommandStatus =
6367 ciss_error_info.command_status;
6368 sense_data_length = ciss_error_info.sense_data_length;
6369 if (sense_data_length) {
6370 if (sense_data_length >
6371 sizeof(iocommand.error_info.SenseInfo))
6372 sense_data_length =
6373 sizeof(iocommand.error_info.SenseInfo);
6374 memcpy(iocommand.error_info.SenseInfo,
6375 pqi_error_info.data, sense_data_length);
6376 iocommand.error_info.SenseLen = sense_data_length;
6377 }
6378 }
6379
6380 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6381 rc = -EFAULT;
6382 goto out;
6383 }
6384
6385 if (rc == 0 && iocommand.buf_size > 0 &&
6386 (iocommand.Request.Type.Direction & XFER_READ)) {
6387 if (copy_to_user(iocommand.buf, kernel_buffer,
6388 iocommand.buf_size)) {
6389 rc = -EFAULT;
6390 }
6391 }
6392
6393out:
6394 kfree(kernel_buffer);
6395
6396 return rc;
6397}
6398
6399static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6400 void __user *arg)
6401{
6402 int rc;
6403 struct pqi_ctrl_info *ctrl_info;
6404
6405 ctrl_info = shost_to_hba(sdev->host);
6406
6407 switch (cmd) {
6408 case CCISS_DEREGDISK:
6409 case CCISS_REGNEWDISK:
6410 case CCISS_REGNEWD:
6411 rc = pqi_scan_scsi_devices(ctrl_info);
6412 break;
6413 case CCISS_GETPCIINFO:
6414 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6415 break;
6416 case CCISS_GETDRIVVER:
6417 rc = pqi_getdrivver_ioctl(arg);
6418 break;
6419 case CCISS_PASSTHRU:
6420 rc = pqi_passthru_ioctl(ctrl_info, arg);
6421 break;
6422 default:
6423 rc = -EINVAL;
6424 break;
6425 }
6426
6427 return rc;
6428}
6429
6430static ssize_t pqi_firmware_version_show(struct device *dev,
6431 struct device_attribute *attr, char *buffer)
6432{
6433 struct Scsi_Host *shost;
6434 struct pqi_ctrl_info *ctrl_info;
6435
6436 shost = class_to_shost(dev);
6437 ctrl_info = shost_to_hba(shost);
6438
6439 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6440}
6441
6442static ssize_t pqi_driver_version_show(struct device *dev,
6443 struct device_attribute *attr, char *buffer)
6444{
6445 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6446}
6447
6448static ssize_t pqi_serial_number_show(struct device *dev,
6449 struct device_attribute *attr, char *buffer)
6450{
6451 struct Scsi_Host *shost;
6452 struct pqi_ctrl_info *ctrl_info;
6453
6454 shost = class_to_shost(dev);
6455 ctrl_info = shost_to_hba(shost);
6456
6457 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6458}
6459
6460static ssize_t pqi_model_show(struct device *dev,
6461 struct device_attribute *attr, char *buffer)
6462{
6463 struct Scsi_Host *shost;
6464 struct pqi_ctrl_info *ctrl_info;
6465
6466 shost = class_to_shost(dev);
6467 ctrl_info = shost_to_hba(shost);
6468
6469 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6470}
6471
6472static ssize_t pqi_vendor_show(struct device *dev,
6473 struct device_attribute *attr, char *buffer)
6474{
6475 struct Scsi_Host *shost;
6476 struct pqi_ctrl_info *ctrl_info;
6477
6478 shost = class_to_shost(dev);
6479 ctrl_info = shost_to_hba(shost);
6480
6481 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6482}
6483
6484static ssize_t pqi_host_rescan_store(struct device *dev,
6485 struct device_attribute *attr, const char *buffer, size_t count)
6486{
6487 struct Scsi_Host *shost = class_to_shost(dev);
6488
6489 pqi_scan_start(shost);
6490
6491 return count;
6492}
6493
6494static ssize_t pqi_lockup_action_show(struct device *dev,
6495 struct device_attribute *attr, char *buffer)
6496{
6497 int count = 0;
6498 unsigned int i;
6499
6500 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6501 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6502 count += scnprintf(buffer + count, PAGE_SIZE - count,
6503 "[%s] ", pqi_lockup_actions[i].name);
6504 else
6505 count += scnprintf(buffer + count, PAGE_SIZE - count,
6506 "%s ", pqi_lockup_actions[i].name);
6507 }
6508
6509 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6510
6511 return count;
6512}
6513
6514static ssize_t pqi_lockup_action_store(struct device *dev,
6515 struct device_attribute *attr, const char *buffer, size_t count)
6516{
6517 unsigned int i;
6518 char *action_name;
6519 char action_name_buffer[32];
6520
6521 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6522 action_name = strstrip(action_name_buffer);
6523
6524 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6525 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6526 pqi_lockup_action = pqi_lockup_actions[i].action;
6527 return count;
6528 }
6529 }
6530
6531 return -EINVAL;
6532}
6533
6534static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6535 struct device_attribute *attr, char *buffer)
6536{
6537 struct Scsi_Host *shost = class_to_shost(dev);
6538 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6539
6540 return scnprintf(buffer, 10, "%x\n",
6541 ctrl_info->enable_stream_detection);
6542}
6543
6544static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6545 struct device_attribute *attr, const char *buffer, size_t count)
6546{
6547 struct Scsi_Host *shost = class_to_shost(dev);
6548 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6549 u8 set_stream_detection = 0;
6550
6551 if (kstrtou8(buffer, 0, &set_stream_detection))
6552 return -EINVAL;
6553
6554 if (set_stream_detection > 0)
6555 set_stream_detection = 1;
6556
6557 ctrl_info->enable_stream_detection = set_stream_detection;
6558
6559 return count;
6560}
6561
6562static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6563 struct device_attribute *attr, char *buffer)
6564{
6565 struct Scsi_Host *shost = class_to_shost(dev);
6566 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6567
6568 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6569}
6570
6571static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6572 struct device_attribute *attr, const char *buffer, size_t count)
6573{
6574 struct Scsi_Host *shost = class_to_shost(dev);
6575 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6576 u8 set_r5_writes = 0;
6577
6578 if (kstrtou8(buffer, 0, &set_r5_writes))
6579 return -EINVAL;
6580
6581 if (set_r5_writes > 0)
6582 set_r5_writes = 1;
6583
6584 ctrl_info->enable_r5_writes = set_r5_writes;
6585
6586 return count;
6587}
6588
6589static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6590 struct device_attribute *attr, char *buffer)
6591{
6592 struct Scsi_Host *shost = class_to_shost(dev);
6593 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6594
6595 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6596}
6597
6598static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6599 struct device_attribute *attr, const char *buffer, size_t count)
6600{
6601 struct Scsi_Host *shost = class_to_shost(dev);
6602 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6603 u8 set_r6_writes = 0;
6604
6605 if (kstrtou8(buffer, 0, &set_r6_writes))
6606 return -EINVAL;
6607
6608 if (set_r6_writes > 0)
6609 set_r6_writes = 1;
6610
6611 ctrl_info->enable_r6_writes = set_r6_writes;
6612
6613 return count;
6614}
6615
6616static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6617static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6618static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6619static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6620static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6621static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6622static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6623 pqi_lockup_action_store);
6624static DEVICE_ATTR(enable_stream_detection, 0644,
6625 pqi_host_enable_stream_detection_show,
6626 pqi_host_enable_stream_detection_store);
6627static DEVICE_ATTR(enable_r5_writes, 0644,
6628 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6629static DEVICE_ATTR(enable_r6_writes, 0644,
6630 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6631
6632static struct device_attribute *pqi_shost_attrs[] = {
6633 &dev_attr_driver_version,
6634 &dev_attr_firmware_version,
6635 &dev_attr_model,
6636 &dev_attr_serial_number,
6637 &dev_attr_vendor,
6638 &dev_attr_rescan,
6639 &dev_attr_lockup_action,
6640 &dev_attr_enable_stream_detection,
6641 &dev_attr_enable_r5_writes,
6642 &dev_attr_enable_r6_writes,
6643 NULL
6644};
6645
6646static ssize_t pqi_unique_id_show(struct device *dev,
6647 struct device_attribute *attr, char *buffer)
6648{
6649 struct pqi_ctrl_info *ctrl_info;
6650 struct scsi_device *sdev;
6651 struct pqi_scsi_dev *device;
6652 unsigned long flags;
6653 u8 unique_id[16];
6654
6655 sdev = to_scsi_device(dev);
6656 ctrl_info = shost_to_hba(sdev->host);
6657
6658 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6659
6660 device = sdev->hostdata;
6661 if (!device) {
6662 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6663 return -ENODEV;
6664 }
6665
6666 if (device->is_physical_device) {
6667 memset(unique_id, 0, 8);
6668 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
6669 } else {
6670 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6671 }
6672
6673 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6674
6675 return scnprintf(buffer, PAGE_SIZE,
6676 "%02X%02X%02X%02X%02X%02X%02X%02X"
6677 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
6678 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6679 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6680 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6681 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6682}
6683
6684static ssize_t pqi_lunid_show(struct device *dev,
6685 struct device_attribute *attr, char *buffer)
6686{
6687 struct pqi_ctrl_info *ctrl_info;
6688 struct scsi_device *sdev;
6689 struct pqi_scsi_dev *device;
6690 unsigned long flags;
6691 u8 lunid[8];
6692
6693 sdev = to_scsi_device(dev);
6694 ctrl_info = shost_to_hba(sdev->host);
6695
6696 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6697
6698 device = sdev->hostdata;
6699 if (!device) {
6700 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6701 return -ENODEV;
6702 }
6703
6704 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6705
6706 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6707
6708 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6709}
6710
6711#define MAX_PATHS 8
6712
6713static ssize_t pqi_path_info_show(struct device *dev,
6714 struct device_attribute *attr, char *buf)
6715{
6716 struct pqi_ctrl_info *ctrl_info;
6717 struct scsi_device *sdev;
6718 struct pqi_scsi_dev *device;
6719 unsigned long flags;
6720 int i;
6721 int output_len = 0;
6722 u8 box;
6723 u8 bay;
6724 u8 path_map_index;
6725 char *active;
6726 u8 phys_connector[2];
6727
6728 sdev = to_scsi_device(dev);
6729 ctrl_info = shost_to_hba(sdev->host);
6730
6731 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6732
6733 device = sdev->hostdata;
6734 if (!device) {
6735 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6736 return -ENODEV;
6737 }
6738
6739 bay = device->bay;
6740 for (i = 0; i < MAX_PATHS; i++) {
6741 path_map_index = 1 << i;
6742 if (i == device->active_path_index)
6743 active = "Active";
6744 else if (device->path_map & path_map_index)
6745 active = "Inactive";
6746 else
6747 continue;
6748
6749 output_len += scnprintf(buf + output_len,
6750 PAGE_SIZE - output_len,
6751 "[%d:%d:%d:%d] %20.20s ",
6752 ctrl_info->scsi_host->host_no,
6753 device->bus, device->target,
6754 device->lun,
6755 scsi_device_type(device->devtype));
6756
6757 if (device->devtype == TYPE_RAID ||
6758 pqi_is_logical_device(device))
6759 goto end_buffer;
6760
6761 memcpy(&phys_connector, &device->phys_connector[i],
6762 sizeof(phys_connector));
6763 if (phys_connector[0] < '0')
6764 phys_connector[0] = '0';
6765 if (phys_connector[1] < '0')
6766 phys_connector[1] = '0';
6767
6768 output_len += scnprintf(buf + output_len,
6769 PAGE_SIZE - output_len,
6770 "PORT: %.2s ", phys_connector);
6771
6772 box = device->box[i];
6773 if (box != 0 && box != 0xFF)
6774 output_len += scnprintf(buf + output_len,
6775 PAGE_SIZE - output_len,
6776 "BOX: %hhu ", box);
6777
6778 if ((device->devtype == TYPE_DISK ||
6779 device->devtype == TYPE_ZBC) &&
6780 pqi_expose_device(device))
6781 output_len += scnprintf(buf + output_len,
6782 PAGE_SIZE - output_len,
6783 "BAY: %hhu ", bay);
6784
6785end_buffer:
6786 output_len += scnprintf(buf + output_len,
6787 PAGE_SIZE - output_len,
6788 "%s\n", active);
6789 }
6790
6791 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6792
6793 return output_len;
6794}
6795
6796static ssize_t pqi_sas_address_show(struct device *dev,
6797 struct device_attribute *attr, char *buffer)
6798{
6799 struct pqi_ctrl_info *ctrl_info;
6800 struct scsi_device *sdev;
6801 struct pqi_scsi_dev *device;
6802 unsigned long flags;
6803 u64 sas_address;
6804
6805 sdev = to_scsi_device(dev);
6806 ctrl_info = shost_to_hba(sdev->host);
6807
6808 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6809
6810 device = sdev->hostdata;
6811 if (!device || !pqi_is_device_with_sas_address(device)) {
6812 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6813 return -ENODEV;
6814 }
6815
6816 sas_address = device->sas_address;
6817
6818 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6819
6820 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6821}
6822
6823static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6824 struct device_attribute *attr, char *buffer)
6825{
6826 struct pqi_ctrl_info *ctrl_info;
6827 struct scsi_device *sdev;
6828 struct pqi_scsi_dev *device;
6829 unsigned long flags;
6830
6831 sdev = to_scsi_device(dev);
6832 ctrl_info = shost_to_hba(sdev->host);
6833
6834 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6835
6836 device = sdev->hostdata;
6837 if (!device) {
6838 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6839 return -ENODEV;
6840 }
6841
6842 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6843 buffer[1] = '\n';
6844 buffer[2] = '\0';
6845
6846 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6847
6848 return 2;
6849}
6850
6851static ssize_t pqi_raid_level_show(struct device *dev,
6852 struct device_attribute *attr, char *buffer)
6853{
6854 struct pqi_ctrl_info *ctrl_info;
6855 struct scsi_device *sdev;
6856 struct pqi_scsi_dev *device;
6857 unsigned long flags;
6858 char *raid_level;
6859
6860 sdev = to_scsi_device(dev);
6861 ctrl_info = shost_to_hba(sdev->host);
6862
6863 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6864
6865 device = sdev->hostdata;
6866 if (!device) {
6867 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6868 return -ENODEV;
6869 }
6870
6871 if (pqi_is_logical_device(device))
6872 raid_level = pqi_raid_level_to_string(device->raid_level);
6873 else
6874 raid_level = "N/A";
6875
6876 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6877
6878 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6879}
6880
6881static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
6882 struct device_attribute *attr, char *buffer)
6883{
6884 struct pqi_ctrl_info *ctrl_info;
6885 struct scsi_device *sdev;
6886 struct pqi_scsi_dev *device;
6887 unsigned long flags;
6888 int raid_bypass_cnt;
6889
6890 sdev = to_scsi_device(dev);
6891 ctrl_info = shost_to_hba(sdev->host);
6892
6893 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6894
6895 device = sdev->hostdata;
6896 if (!device) {
6897 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6898 return -ENODEV;
6899 }
6900
6901 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
6902
6903 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6904
6905 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
6906}
6907
6908static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6909static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6910static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6911static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6912static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
6913static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6914static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
6915
6916static struct device_attribute *pqi_sdev_attrs[] = {
6917 &dev_attr_lunid,
6918 &dev_attr_unique_id,
6919 &dev_attr_path_info,
6920 &dev_attr_sas_address,
6921 &dev_attr_ssd_smart_path_enabled,
6922 &dev_attr_raid_level,
6923 &dev_attr_raid_bypass_cnt,
6924 NULL
6925};
6926
6927static struct scsi_host_template pqi_driver_template = {
6928 .module = THIS_MODULE,
6929 .name = DRIVER_NAME_SHORT,
6930 .proc_name = DRIVER_NAME_SHORT,
6931 .queuecommand = pqi_scsi_queue_command,
6932 .scan_start = pqi_scan_start,
6933 .scan_finished = pqi_scan_finished,
6934 .this_id = -1,
6935 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6936 .ioctl = pqi_ioctl,
6937 .slave_alloc = pqi_slave_alloc,
6938 .slave_configure = pqi_slave_configure,
6939 .slave_destroy = pqi_slave_destroy,
6940 .map_queues = pqi_map_queues,
6941 .sdev_attrs = pqi_sdev_attrs,
6942 .shost_attrs = pqi_shost_attrs,
6943};
6944
6945static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6946{
6947 int rc;
6948 struct Scsi_Host *shost;
6949
6950 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6951 if (!shost) {
6952 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
6953 return -ENOMEM;
6954 }
6955
6956 shost->io_port = 0;
6957 shost->n_io_port = 0;
6958 shost->this_id = -1;
6959 shost->max_channel = PQI_MAX_BUS;
6960 shost->max_cmd_len = MAX_COMMAND_SIZE;
6961 shost->max_lun = ~0;
6962 shost->max_id = ~0;
6963 shost->max_sectors = ctrl_info->max_sectors;
6964 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6965 shost->cmd_per_lun = shost->can_queue;
6966 shost->sg_tablesize = ctrl_info->sg_tablesize;
6967 shost->transportt = pqi_sas_transport_template;
6968 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6969 shost->unique_id = shost->irq;
6970 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6971 shost->host_tagset = 1;
6972 shost->hostdata[0] = (unsigned long)ctrl_info;
6973
6974 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6975 if (rc) {
6976 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
6977 goto free_host;
6978 }
6979
6980 rc = pqi_add_sas_host(shost, ctrl_info);
6981 if (rc) {
6982 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
6983 goto remove_host;
6984 }
6985
6986 ctrl_info->scsi_host = shost;
6987
6988 return 0;
6989
6990remove_host:
6991 scsi_remove_host(shost);
6992free_host:
6993 scsi_host_put(shost);
6994
6995 return rc;
6996}
6997
6998static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6999{
7000 struct Scsi_Host *shost;
7001
7002 pqi_delete_sas_host(ctrl_info);
7003
7004 shost = ctrl_info->scsi_host;
7005 if (!shost)
7006 return;
7007
7008 scsi_remove_host(shost);
7009 scsi_host_put(shost);
7010}
7011
7012static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7013{
7014 int rc = 0;
7015 struct pqi_device_registers __iomem *pqi_registers;
7016 unsigned long timeout;
7017 unsigned int timeout_msecs;
7018 union pqi_reset_register reset_reg;
7019
7020 pqi_registers = ctrl_info->pqi_registers;
7021 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7022 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7023
7024 while (1) {
7025 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7026 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7027 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7028 break;
7029 pqi_check_ctrl_health(ctrl_info);
7030 if (pqi_ctrl_offline(ctrl_info)) {
7031 rc = -ENXIO;
7032 break;
7033 }
7034 if (time_after(jiffies, timeout)) {
7035 rc = -ETIMEDOUT;
7036 break;
7037 }
7038 }
7039
7040 return rc;
7041}
7042
7043static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7044{
7045 int rc;
7046 union pqi_reset_register reset_reg;
7047
7048 if (ctrl_info->pqi_reset_quiesce_supported) {
7049 rc = sis_pqi_reset_quiesce(ctrl_info);
7050 if (rc) {
7051 dev_err(&ctrl_info->pci_dev->dev,
7052 "PQI reset failed during quiesce with error %d\n", rc);
7053 return rc;
7054 }
7055 }
7056
7057 reset_reg.all_bits = 0;
7058 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7059 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7060
7061 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7062
7063 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7064 if (rc)
7065 dev_err(&ctrl_info->pci_dev->dev,
7066 "PQI reset failed with error %d\n", rc);
7067
7068 return rc;
7069}
7070
7071static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7072{
7073 int rc;
7074 struct bmic_sense_subsystem_info *sense_info;
7075
7076 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7077 if (!sense_info)
7078 return -ENOMEM;
7079
7080 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7081 if (rc)
7082 goto out;
7083
7084 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7085 sizeof(sense_info->ctrl_serial_number));
7086 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7087
7088out:
7089 kfree(sense_info);
7090
7091 return rc;
7092}
7093
7094static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7095{
7096 int rc;
7097 struct bmic_identify_controller *identify;
7098
7099 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7100 if (!identify)
7101 return -ENOMEM;
7102
7103 rc = pqi_identify_controller(ctrl_info, identify);
7104 if (rc)
7105 goto out;
7106
7107 if (get_unaligned_le32(&identify->extra_controller_flags) &
7108 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7109 memcpy(ctrl_info->firmware_version,
7110 identify->firmware_version_long,
7111 sizeof(identify->firmware_version_long));
7112 } else {
7113 memcpy(ctrl_info->firmware_version,
7114 identify->firmware_version_short,
7115 sizeof(identify->firmware_version_short));
7116 ctrl_info->firmware_version
7117 [sizeof(identify->firmware_version_short)] = '\0';
7118 snprintf(ctrl_info->firmware_version +
7119 strlen(ctrl_info->firmware_version),
7120 sizeof(ctrl_info->firmware_version) -
7121 sizeof(identify->firmware_version_short),
7122 "-%u",
7123 get_unaligned_le16(&identify->firmware_build_number));
7124 }
7125
7126 memcpy(ctrl_info->model, identify->product_id,
7127 sizeof(identify->product_id));
7128 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7129
7130 memcpy(ctrl_info->vendor, identify->vendor_id,
7131 sizeof(identify->vendor_id));
7132 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7133
7134out:
7135 kfree(identify);
7136
7137 return rc;
7138}
7139
7140struct pqi_config_table_section_info {
7141 struct pqi_ctrl_info *ctrl_info;
7142 void *section;
7143 u32 section_offset;
7144 void __iomem *section_iomem_addr;
7145};
7146
7147static inline bool pqi_is_firmware_feature_supported(
7148 struct pqi_config_table_firmware_features *firmware_features,
7149 unsigned int bit_position)
7150{
7151 unsigned int byte_index;
7152
7153 byte_index = bit_position / BITS_PER_BYTE;
7154
7155 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7156 return false;
7157
7158 return firmware_features->features_supported[byte_index] &
7159 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7160}
7161
7162static inline bool pqi_is_firmware_feature_enabled(
7163 struct pqi_config_table_firmware_features *firmware_features,
7164 void __iomem *firmware_features_iomem_addr,
7165 unsigned int bit_position)
7166{
7167 unsigned int byte_index;
7168 u8 __iomem *features_enabled_iomem_addr;
7169
7170 byte_index = (bit_position / BITS_PER_BYTE) +
7171 (le16_to_cpu(firmware_features->num_elements) * 2);
7172
7173 features_enabled_iomem_addr = firmware_features_iomem_addr +
7174 offsetof(struct pqi_config_table_firmware_features,
7175 features_supported) + byte_index;
7176
7177 return *((__force u8 *)features_enabled_iomem_addr) &
7178 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7179}
7180
7181static inline void pqi_request_firmware_feature(
7182 struct pqi_config_table_firmware_features *firmware_features,
7183 unsigned int bit_position)
7184{
7185 unsigned int byte_index;
7186
7187 byte_index = (bit_position / BITS_PER_BYTE) +
7188 le16_to_cpu(firmware_features->num_elements);
7189
7190 firmware_features->features_supported[byte_index] |=
7191 (1 << (bit_position % BITS_PER_BYTE));
7192}
7193
7194static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7195 u16 first_section, u16 last_section)
7196{
7197 struct pqi_vendor_general_request request;
7198
7199 memset(&request, 0, sizeof(request));
7200
7201 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7202 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7203 &request.header.iu_length);
7204 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7205 &request.function_code);
7206 put_unaligned_le16(first_section,
7207 &request.data.config_table_update.first_section);
7208 put_unaligned_le16(last_section,
7209 &request.data.config_table_update.last_section);
7210
7211 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7212}
7213
7214static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7215 struct pqi_config_table_firmware_features *firmware_features,
7216 void __iomem *firmware_features_iomem_addr)
7217{
7218 void *features_requested;
7219 void __iomem *features_requested_iomem_addr;
7220 void __iomem *host_max_known_feature_iomem_addr;
7221
7222 features_requested = firmware_features->features_supported +
7223 le16_to_cpu(firmware_features->num_elements);
7224
7225 features_requested_iomem_addr = firmware_features_iomem_addr +
7226 (features_requested - (void *)firmware_features);
7227
7228 memcpy_toio(features_requested_iomem_addr, features_requested,
7229 le16_to_cpu(firmware_features->num_elements));
7230
7231 if (pqi_is_firmware_feature_supported(firmware_features,
7232 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7233 host_max_known_feature_iomem_addr =
7234 features_requested_iomem_addr +
7235 (le16_to_cpu(firmware_features->num_elements) * 2) +
7236 sizeof(__le16);
7237 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7238 host_max_known_feature_iomem_addr);
7239 }
7240
7241 return pqi_config_table_update(ctrl_info,
7242 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7243 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7244}
7245
7246struct pqi_firmware_feature {
7247 char *feature_name;
7248 unsigned int feature_bit;
7249 bool supported;
7250 bool enabled;
7251 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7252 struct pqi_firmware_feature *firmware_feature);
7253};
7254
7255static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7256 struct pqi_firmware_feature *firmware_feature)
7257{
7258 if (!firmware_feature->supported) {
7259 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7260 firmware_feature->feature_name);
7261 return;
7262 }
7263
7264 if (firmware_feature->enabled) {
7265 dev_info(&ctrl_info->pci_dev->dev,
7266 "%s enabled\n", firmware_feature->feature_name);
7267 return;
7268 }
7269
7270 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7271 firmware_feature->feature_name);
7272}
7273
7274static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7275 struct pqi_firmware_feature *firmware_feature)
7276{
7277 switch (firmware_feature->feature_bit) {
7278 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7279 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7280 break;
7281 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7282 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7283 break;
7284 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7285 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7286 break;
7287 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7288 ctrl_info->soft_reset_handshake_supported =
7289 firmware_feature->enabled &&
7290 pqi_read_soft_reset_status(ctrl_info);
7291 break;
7292 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7293 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7294 break;
7295 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7296 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7297 break;
7298 case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
7299 ctrl_info->unique_wwid_in_report_phys_lun_supported =
7300 firmware_feature->enabled;
7301 break;
7302 }
7303
7304 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7305}
7306
7307static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7308 struct pqi_firmware_feature *firmware_feature)
7309{
7310 if (firmware_feature->feature_status)
7311 firmware_feature->feature_status(ctrl_info, firmware_feature);
7312}
7313
7314static DEFINE_MUTEX(pqi_firmware_features_mutex);
7315
7316static struct pqi_firmware_feature pqi_firmware_features[] = {
7317 {
7318 .feature_name = "Online Firmware Activation",
7319 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7320 .feature_status = pqi_firmware_feature_status,
7321 },
7322 {
7323 .feature_name = "Serial Management Protocol",
7324 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7325 .feature_status = pqi_firmware_feature_status,
7326 },
7327 {
7328 .feature_name = "Maximum Known Feature",
7329 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7330 .feature_status = pqi_firmware_feature_status,
7331 },
7332 {
7333 .feature_name = "RAID 0 Read Bypass",
7334 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7335 .feature_status = pqi_firmware_feature_status,
7336 },
7337 {
7338 .feature_name = "RAID 1 Read Bypass",
7339 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7340 .feature_status = pqi_firmware_feature_status,
7341 },
7342 {
7343 .feature_name = "RAID 5 Read Bypass",
7344 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7345 .feature_status = pqi_firmware_feature_status,
7346 },
7347 {
7348 .feature_name = "RAID 6 Read Bypass",
7349 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7350 .feature_status = pqi_firmware_feature_status,
7351 },
7352 {
7353 .feature_name = "RAID 0 Write Bypass",
7354 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7355 .feature_status = pqi_firmware_feature_status,
7356 },
7357 {
7358 .feature_name = "RAID 1 Write Bypass",
7359 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7360 .feature_status = pqi_ctrl_update_feature_flags,
7361 },
7362 {
7363 .feature_name = "RAID 5 Write Bypass",
7364 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7365 .feature_status = pqi_ctrl_update_feature_flags,
7366 },
7367 {
7368 .feature_name = "RAID 6 Write Bypass",
7369 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7370 .feature_status = pqi_ctrl_update_feature_flags,
7371 },
7372 {
7373 .feature_name = "New Soft Reset Handshake",
7374 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7375 .feature_status = pqi_ctrl_update_feature_flags,
7376 },
7377 {
7378 .feature_name = "RAID IU Timeout",
7379 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7380 .feature_status = pqi_ctrl_update_feature_flags,
7381 },
7382 {
7383 .feature_name = "TMF IU Timeout",
7384 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7385 .feature_status = pqi_ctrl_update_feature_flags,
7386 },
7387 {
7388 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7389 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7390 .feature_status = pqi_firmware_feature_status,
7391 },
7392 {
7393 .feature_name = "Unique WWID in Report Physical LUN",
7394 .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
7395 .feature_status = pqi_ctrl_update_feature_flags,
7396 },
7397};
7398
7399static void pqi_process_firmware_features(
7400 struct pqi_config_table_section_info *section_info)
7401{
7402 int rc;
7403 struct pqi_ctrl_info *ctrl_info;
7404 struct pqi_config_table_firmware_features *firmware_features;
7405 void __iomem *firmware_features_iomem_addr;
7406 unsigned int i;
7407 unsigned int num_features_supported;
7408
7409 ctrl_info = section_info->ctrl_info;
7410 firmware_features = section_info->section;
7411 firmware_features_iomem_addr = section_info->section_iomem_addr;
7412
7413 for (i = 0, num_features_supported = 0;
7414 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7415 if (pqi_is_firmware_feature_supported(firmware_features,
7416 pqi_firmware_features[i].feature_bit)) {
7417 pqi_firmware_features[i].supported = true;
7418 num_features_supported++;
7419 } else {
7420 pqi_firmware_feature_update(ctrl_info,
7421 &pqi_firmware_features[i]);
7422 }
7423 }
7424
7425 if (num_features_supported == 0)
7426 return;
7427
7428 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7429 if (!pqi_firmware_features[i].supported)
7430 continue;
7431 pqi_request_firmware_feature(firmware_features,
7432 pqi_firmware_features[i].feature_bit);
7433 }
7434
7435 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7436 firmware_features_iomem_addr);
7437 if (rc) {
7438 dev_err(&ctrl_info->pci_dev->dev,
7439 "failed to enable firmware features in PQI configuration table\n");
7440 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7441 if (!pqi_firmware_features[i].supported)
7442 continue;
7443 pqi_firmware_feature_update(ctrl_info,
7444 &pqi_firmware_features[i]);
7445 }
7446 return;
7447 }
7448
7449 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7450 if (!pqi_firmware_features[i].supported)
7451 continue;
7452 if (pqi_is_firmware_feature_enabled(firmware_features,
7453 firmware_features_iomem_addr,
7454 pqi_firmware_features[i].feature_bit)) {
7455 pqi_firmware_features[i].enabled = true;
7456 }
7457 pqi_firmware_feature_update(ctrl_info,
7458 &pqi_firmware_features[i]);
7459 }
7460}
7461
7462static void pqi_init_firmware_features(void)
7463{
7464 unsigned int i;
7465
7466 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7467 pqi_firmware_features[i].supported = false;
7468 pqi_firmware_features[i].enabled = false;
7469 }
7470}
7471
7472static void pqi_process_firmware_features_section(
7473 struct pqi_config_table_section_info *section_info)
7474{
7475 mutex_lock(&pqi_firmware_features_mutex);
7476 pqi_init_firmware_features();
7477 pqi_process_firmware_features(section_info);
7478 mutex_unlock(&pqi_firmware_features_mutex);
7479}
7480
7481
7482
7483
7484
7485
7486static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7487{
7488 ctrl_info->heartbeat_counter = NULL;
7489 ctrl_info->soft_reset_status = NULL;
7490 ctrl_info->soft_reset_handshake_supported = false;
7491 ctrl_info->enable_r1_writes = false;
7492 ctrl_info->enable_r5_writes = false;
7493 ctrl_info->enable_r6_writes = false;
7494 ctrl_info->raid_iu_timeout_supported = false;
7495 ctrl_info->tmf_iu_timeout_supported = false;
7496 ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
7497}
7498
7499static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7500{
7501 u32 table_length;
7502 u32 section_offset;
7503 bool firmware_feature_section_present;
7504 void __iomem *table_iomem_addr;
7505 struct pqi_config_table *config_table;
7506 struct pqi_config_table_section_header *section;
7507 struct pqi_config_table_section_info section_info;
7508 struct pqi_config_table_section_info feature_section_info;
7509
7510 table_length = ctrl_info->config_table_length;
7511 if (table_length == 0)
7512 return 0;
7513
7514 config_table = kmalloc(table_length, GFP_KERNEL);
7515 if (!config_table) {
7516 dev_err(&ctrl_info->pci_dev->dev,
7517 "failed to allocate memory for PQI configuration table\n");
7518 return -ENOMEM;
7519 }
7520
7521
7522
7523
7524
7525 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7526 memcpy_fromio(config_table, table_iomem_addr, table_length);
7527
7528 firmware_feature_section_present = false;
7529 section_info.ctrl_info = ctrl_info;
7530 section_offset = get_unaligned_le32(&config_table->first_section_offset);
7531
7532 while (section_offset) {
7533 section = (void *)config_table + section_offset;
7534
7535 section_info.section = section;
7536 section_info.section_offset = section_offset;
7537 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7538
7539 switch (get_unaligned_le16(§ion->section_id)) {
7540 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7541 firmware_feature_section_present = true;
7542 feature_section_info = section_info;
7543 break;
7544 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7545 if (pqi_disable_heartbeat)
7546 dev_warn(&ctrl_info->pci_dev->dev,
7547 "heartbeat disabled by module parameter\n");
7548 else
7549 ctrl_info->heartbeat_counter =
7550 table_iomem_addr +
7551 section_offset +
7552 offsetof(struct pqi_config_table_heartbeat,
7553 heartbeat_counter);
7554 break;
7555 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7556 ctrl_info->soft_reset_status =
7557 table_iomem_addr +
7558 section_offset +
7559 offsetof(struct pqi_config_table_soft_reset,
7560 soft_reset_status);
7561 break;
7562 }
7563
7564 section_offset = get_unaligned_le16(§ion->next_section_offset);
7565 }
7566
7567
7568
7569
7570
7571
7572 if (firmware_feature_section_present)
7573 pqi_process_firmware_features_section(&feature_section_info);
7574
7575 kfree(config_table);
7576
7577 return 0;
7578}
7579
7580
7581
7582static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7583{
7584 int rc;
7585
7586 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7587 rc = pqi_reset(ctrl_info);
7588 if (rc)
7589 return rc;
7590 rc = sis_reenable_sis_mode(ctrl_info);
7591 if (rc) {
7592 dev_err(&ctrl_info->pci_dev->dev,
7593 "re-enabling SIS mode failed with error %d\n", rc);
7594 return rc;
7595 }
7596 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7597
7598 return 0;
7599}
7600
7601
7602
7603
7604
7605
7606static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7607{
7608 if (!sis_is_firmware_running(ctrl_info))
7609 return -ENXIO;
7610
7611 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7612 return 0;
7613
7614 if (sis_is_kernel_up(ctrl_info)) {
7615 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7616 return 0;
7617 }
7618
7619 return pqi_revert_to_sis_mode(ctrl_info);
7620}
7621
7622static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7623{
7624 int rc;
7625 u32 product_id;
7626
7627 if (reset_devices) {
7628 sis_soft_reset(ctrl_info);
7629 msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
7630 } else {
7631 rc = pqi_force_sis_mode(ctrl_info);
7632 if (rc)
7633 return rc;
7634 }
7635
7636
7637
7638
7639
7640 rc = sis_wait_for_ctrl_ready(ctrl_info);
7641 if (rc)
7642 return rc;
7643
7644
7645
7646
7647
7648 rc = sis_get_ctrl_properties(ctrl_info);
7649 if (rc) {
7650 dev_err(&ctrl_info->pci_dev->dev,
7651 "error obtaining controller properties\n");
7652 return rc;
7653 }
7654
7655 rc = sis_get_pqi_capabilities(ctrl_info);
7656 if (rc) {
7657 dev_err(&ctrl_info->pci_dev->dev,
7658 "error obtaining controller capabilities\n");
7659 return rc;
7660 }
7661
7662 product_id = sis_get_product_id(ctrl_info);
7663 ctrl_info->product_id = (u8)product_id;
7664 ctrl_info->product_revision = (u8)(product_id >> 8);
7665
7666 if (reset_devices) {
7667 if (ctrl_info->max_outstanding_requests >
7668 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7669 ctrl_info->max_outstanding_requests =
7670 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7671 } else {
7672 if (ctrl_info->max_outstanding_requests >
7673 PQI_MAX_OUTSTANDING_REQUESTS)
7674 ctrl_info->max_outstanding_requests =
7675 PQI_MAX_OUTSTANDING_REQUESTS;
7676 }
7677
7678 pqi_calculate_io_resources(ctrl_info);
7679
7680 rc = pqi_alloc_error_buffer(ctrl_info);
7681 if (rc) {
7682 dev_err(&ctrl_info->pci_dev->dev,
7683 "failed to allocate PQI error buffer\n");
7684 return rc;
7685 }
7686
7687
7688
7689
7690
7691
7692 rc = sis_init_base_struct_addr(ctrl_info);
7693 if (rc) {
7694 dev_err(&ctrl_info->pci_dev->dev,
7695 "error initializing PQI mode\n");
7696 return rc;
7697 }
7698
7699
7700 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7701 if (rc) {
7702 dev_err(&ctrl_info->pci_dev->dev,
7703 "transition to PQI mode failed\n");
7704 return rc;
7705 }
7706
7707
7708 ctrl_info->pqi_mode_enabled = true;
7709 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7710
7711 rc = pqi_alloc_admin_queues(ctrl_info);
7712 if (rc) {
7713 dev_err(&ctrl_info->pci_dev->dev,
7714 "failed to allocate admin queues\n");
7715 return rc;
7716 }
7717
7718 rc = pqi_create_admin_queues(ctrl_info);
7719 if (rc) {
7720 dev_err(&ctrl_info->pci_dev->dev,
7721 "error creating admin queues\n");
7722 return rc;
7723 }
7724
7725 rc = pqi_report_device_capability(ctrl_info);
7726 if (rc) {
7727 dev_err(&ctrl_info->pci_dev->dev,
7728 "obtaining device capability failed\n");
7729 return rc;
7730 }
7731
7732 rc = pqi_validate_device_capability(ctrl_info);
7733 if (rc)
7734 return rc;
7735
7736 pqi_calculate_queue_resources(ctrl_info);
7737
7738 rc = pqi_enable_msix_interrupts(ctrl_info);
7739 if (rc)
7740 return rc;
7741
7742 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7743 ctrl_info->max_msix_vectors =
7744 ctrl_info->num_msix_vectors_enabled;
7745 pqi_calculate_queue_resources(ctrl_info);
7746 }
7747
7748 rc = pqi_alloc_io_resources(ctrl_info);
7749 if (rc)
7750 return rc;
7751
7752 rc = pqi_alloc_operational_queues(ctrl_info);
7753 if (rc) {
7754 dev_err(&ctrl_info->pci_dev->dev,
7755 "failed to allocate operational queues\n");
7756 return rc;
7757 }
7758
7759 pqi_init_operational_queues(ctrl_info);
7760
7761 rc = pqi_request_irqs(ctrl_info);
7762 if (rc)
7763 return rc;
7764
7765 rc = pqi_create_queues(ctrl_info);
7766 if (rc)
7767 return rc;
7768
7769 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7770
7771 ctrl_info->controller_online = true;
7772
7773 rc = pqi_process_config_table(ctrl_info);
7774 if (rc)
7775 return rc;
7776
7777 pqi_start_heartbeat_timer(ctrl_info);
7778
7779 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
7780 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
7781 if (rc) {
7782 dev_err(&ctrl_info->pci_dev->dev,
7783 "error obtaining advanced RAID bypass configuration\n");
7784 return rc;
7785 }
7786 ctrl_info->ciss_report_log_flags |=
7787 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
7788 }
7789
7790 rc = pqi_enable_events(ctrl_info);
7791 if (rc) {
7792 dev_err(&ctrl_info->pci_dev->dev,
7793 "error enabling events\n");
7794 return rc;
7795 }
7796
7797
7798 rc = pqi_register_scsi(ctrl_info);
7799 if (rc)
7800 return rc;
7801
7802 rc = pqi_get_ctrl_product_details(ctrl_info);
7803 if (rc) {
7804 dev_err(&ctrl_info->pci_dev->dev,
7805 "error obtaining product details\n");
7806 return rc;
7807 }
7808
7809 rc = pqi_get_ctrl_serial_number(ctrl_info);
7810 if (rc) {
7811 dev_err(&ctrl_info->pci_dev->dev,
7812 "error obtaining ctrl serial number\n");
7813 return rc;
7814 }
7815
7816 rc = pqi_set_diag_rescan(ctrl_info);
7817 if (rc) {
7818 dev_err(&ctrl_info->pci_dev->dev,
7819 "error enabling multi-lun rescan\n");
7820 return rc;
7821 }
7822
7823 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7824 if (rc) {
7825 dev_err(&ctrl_info->pci_dev->dev,
7826 "error updating host wellness\n");
7827 return rc;
7828 }
7829
7830 pqi_schedule_update_time_worker(ctrl_info);
7831
7832 pqi_scan_scsi_devices(ctrl_info);
7833
7834 return 0;
7835}
7836
7837static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7838{
7839 unsigned int i;
7840 struct pqi_admin_queues *admin_queues;
7841 struct pqi_event_queue *event_queue;
7842
7843 admin_queues = &ctrl_info->admin_queues;
7844 admin_queues->iq_pi_copy = 0;
7845 admin_queues->oq_ci_copy = 0;
7846 writel(0, admin_queues->oq_pi);
7847
7848 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7849 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7850 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7851 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7852
7853 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7854 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7855 writel(0, ctrl_info->queue_groups[i].oq_pi);
7856 }
7857
7858 event_queue = &ctrl_info->event_queue;
7859 writel(0, event_queue->oq_pi);
7860 event_queue->oq_ci_copy = 0;
7861}
7862
7863static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7864{
7865 int rc;
7866
7867 rc = pqi_force_sis_mode(ctrl_info);
7868 if (rc)
7869 return rc;
7870
7871
7872
7873
7874
7875 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7876 if (rc)
7877 return rc;
7878
7879
7880
7881
7882
7883 rc = sis_get_ctrl_properties(ctrl_info);
7884 if (rc) {
7885 dev_err(&ctrl_info->pci_dev->dev,
7886 "error obtaining controller properties\n");
7887 return rc;
7888 }
7889
7890 rc = sis_get_pqi_capabilities(ctrl_info);
7891 if (rc) {
7892 dev_err(&ctrl_info->pci_dev->dev,
7893 "error obtaining controller capabilities\n");
7894 return rc;
7895 }
7896
7897
7898
7899
7900
7901
7902 rc = sis_init_base_struct_addr(ctrl_info);
7903 if (rc) {
7904 dev_err(&ctrl_info->pci_dev->dev,
7905 "error initializing PQI mode\n");
7906 return rc;
7907 }
7908
7909
7910 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7911 if (rc) {
7912 dev_err(&ctrl_info->pci_dev->dev,
7913 "transition to PQI mode failed\n");
7914 return rc;
7915 }
7916
7917
7918 ctrl_info->pqi_mode_enabled = true;
7919 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7920
7921 pqi_reinit_queues(ctrl_info);
7922
7923 rc = pqi_create_admin_queues(ctrl_info);
7924 if (rc) {
7925 dev_err(&ctrl_info->pci_dev->dev,
7926 "error creating admin queues\n");
7927 return rc;
7928 }
7929
7930 rc = pqi_create_queues(ctrl_info);
7931 if (rc)
7932 return rc;
7933
7934 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7935
7936 ctrl_info->controller_online = true;
7937 pqi_ctrl_unblock_requests(ctrl_info);
7938
7939 pqi_ctrl_reset_config(ctrl_info);
7940
7941 rc = pqi_process_config_table(ctrl_info);
7942 if (rc)
7943 return rc;
7944
7945 pqi_start_heartbeat_timer(ctrl_info);
7946
7947 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
7948 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
7949 if (rc) {
7950 dev_err(&ctrl_info->pci_dev->dev,
7951 "error obtaining advanced RAID bypass configuration\n");
7952 return rc;
7953 }
7954 ctrl_info->ciss_report_log_flags |=
7955 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
7956 }
7957
7958 rc = pqi_enable_events(ctrl_info);
7959 if (rc) {
7960 dev_err(&ctrl_info->pci_dev->dev,
7961 "error enabling events\n");
7962 return rc;
7963 }
7964
7965 rc = pqi_get_ctrl_product_details(ctrl_info);
7966 if (rc) {
7967 dev_err(&ctrl_info->pci_dev->dev,
7968 "error obtaining product details\n");
7969 return rc;
7970 }
7971
7972 rc = pqi_set_diag_rescan(ctrl_info);
7973 if (rc) {
7974 dev_err(&ctrl_info->pci_dev->dev,
7975 "error enabling multi-lun rescan\n");
7976 return rc;
7977 }
7978
7979 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7980 if (rc) {
7981 dev_err(&ctrl_info->pci_dev->dev,
7982 "error updating host wellness\n");
7983 return rc;
7984 }
7985
7986 if (pqi_ofa_in_progress(ctrl_info))
7987 pqi_ctrl_unblock_scan(ctrl_info);
7988
7989 pqi_scan_scsi_devices(ctrl_info);
7990
7991 return 0;
7992}
7993
7994static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
7995{
7996 int rc;
7997
7998 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7999 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8000
8001 return pcibios_err_to_errno(rc);
8002}
8003
8004static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8005{
8006 int rc;
8007 u64 mask;
8008
8009 rc = pci_enable_device(ctrl_info->pci_dev);
8010 if (rc) {
8011 dev_err(&ctrl_info->pci_dev->dev,
8012 "failed to enable PCI device\n");
8013 return rc;
8014 }
8015
8016 if (sizeof(dma_addr_t) > 4)
8017 mask = DMA_BIT_MASK(64);
8018 else
8019 mask = DMA_BIT_MASK(32);
8020
8021 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8022 if (rc) {
8023 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8024 goto disable_device;
8025 }
8026
8027 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8028 if (rc) {
8029 dev_err(&ctrl_info->pci_dev->dev,
8030 "failed to obtain PCI resources\n");
8031 goto disable_device;
8032 }
8033
8034 ctrl_info->iomem_base = ioremap(pci_resource_start(
8035 ctrl_info->pci_dev, 0),
8036 sizeof(struct pqi_ctrl_registers));
8037 if (!ctrl_info->iomem_base) {
8038 dev_err(&ctrl_info->pci_dev->dev,
8039 "failed to map memory for controller registers\n");
8040 rc = -ENOMEM;
8041 goto release_regions;
8042 }
8043
8044#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8045
8046
8047 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8048 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8049 if (rc) {
8050 dev_err(&ctrl_info->pci_dev->dev,
8051 "failed to set PCIe completion timeout\n");
8052 goto release_regions;
8053 }
8054
8055
8056 pci_set_master(ctrl_info->pci_dev);
8057
8058 ctrl_info->registers = ctrl_info->iomem_base;
8059 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8060
8061 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8062
8063 return 0;
8064
8065release_regions:
8066 pci_release_regions(ctrl_info->pci_dev);
8067disable_device:
8068 pci_disable_device(ctrl_info->pci_dev);
8069
8070 return rc;
8071}
8072
8073static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8074{
8075 iounmap(ctrl_info->iomem_base);
8076 pci_release_regions(ctrl_info->pci_dev);
8077 if (pci_is_enabled(ctrl_info->pci_dev))
8078 pci_disable_device(ctrl_info->pci_dev);
8079 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8080}
8081
8082static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8083{
8084 struct pqi_ctrl_info *ctrl_info;
8085
8086 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8087 GFP_KERNEL, numa_node);
8088 if (!ctrl_info)
8089 return NULL;
8090
8091 mutex_init(&ctrl_info->scan_mutex);
8092 mutex_init(&ctrl_info->lun_reset_mutex);
8093 mutex_init(&ctrl_info->ofa_mutex);
8094
8095 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8096 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8097
8098 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8099 atomic_set(&ctrl_info->num_interrupts, 0);
8100
8101 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8102 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8103
8104 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8105 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8106
8107 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8108 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8109
8110 sema_init(&ctrl_info->sync_request_sem,
8111 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8112 init_waitqueue_head(&ctrl_info->block_requests_wait);
8113
8114 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8115 ctrl_info->irq_mode = IRQ_MODE_NONE;
8116 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8117
8118 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8119 ctrl_info->max_transfer_encrypted_sas_sata =
8120 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8121 ctrl_info->max_transfer_encrypted_nvme =
8122 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8123 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8124 ctrl_info->max_write_raid_1_10_2drive = ~0;
8125 ctrl_info->max_write_raid_1_10_3drive = ~0;
8126
8127 return ctrl_info;
8128}
8129
8130static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8131{
8132 kfree(ctrl_info);
8133}
8134
8135static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8136{
8137 pqi_free_irqs(ctrl_info);
8138 pqi_disable_msix_interrupts(ctrl_info);
8139}
8140
8141static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8142{
8143 pqi_stop_heartbeat_timer(ctrl_info);
8144 pqi_free_interrupts(ctrl_info);
8145 if (ctrl_info->queue_memory_base)
8146 dma_free_coherent(&ctrl_info->pci_dev->dev,
8147 ctrl_info->queue_memory_length,
8148 ctrl_info->queue_memory_base,
8149 ctrl_info->queue_memory_base_dma_handle);
8150 if (ctrl_info->admin_queue_memory_base)
8151 dma_free_coherent(&ctrl_info->pci_dev->dev,
8152 ctrl_info->admin_queue_memory_length,
8153 ctrl_info->admin_queue_memory_base,
8154 ctrl_info->admin_queue_memory_base_dma_handle);
8155 pqi_free_all_io_requests(ctrl_info);
8156 if (ctrl_info->error_buffer)
8157 dma_free_coherent(&ctrl_info->pci_dev->dev,
8158 ctrl_info->error_buffer_length,
8159 ctrl_info->error_buffer,
8160 ctrl_info->error_buffer_dma_handle);
8161 if (ctrl_info->iomem_base)
8162 pqi_cleanup_pci_init(ctrl_info);
8163 pqi_free_ctrl_info(ctrl_info);
8164}
8165
8166static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8167{
8168 pqi_cancel_rescan_worker(ctrl_info);
8169 pqi_cancel_update_time_worker(ctrl_info);
8170 pqi_unregister_scsi(ctrl_info);
8171 if (ctrl_info->pqi_mode_enabled)
8172 pqi_revert_to_sis_mode(ctrl_info);
8173 pqi_free_ctrl_resources(ctrl_info);
8174}
8175
8176static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8177{
8178 pqi_ctrl_block_scan(ctrl_info);
8179 pqi_scsi_block_requests(ctrl_info);
8180 pqi_ctrl_block_device_reset(ctrl_info);
8181 pqi_ctrl_block_requests(ctrl_info);
8182 pqi_ctrl_wait_until_quiesced(ctrl_info);
8183 pqi_stop_heartbeat_timer(ctrl_info);
8184}
8185
8186static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8187{
8188 pqi_start_heartbeat_timer(ctrl_info);
8189 pqi_ctrl_unblock_requests(ctrl_info);
8190 pqi_ctrl_unblock_device_reset(ctrl_info);
8191 pqi_scsi_unblock_requests(ctrl_info);
8192 pqi_ctrl_unblock_scan(ctrl_info);
8193}
8194
8195static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8196{
8197 int i;
8198 u32 sg_count;
8199 struct device *dev;
8200 struct pqi_ofa_memory *ofap;
8201 struct pqi_sg_descriptor *mem_descriptor;
8202 dma_addr_t dma_handle;
8203
8204 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8205
8206 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8207 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8208 goto out;
8209
8210 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8211 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8212 goto out;
8213
8214 dev = &ctrl_info->pci_dev->dev;
8215
8216 for (i = 0; i < sg_count; i++) {
8217 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8218 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8219 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8220 goto out_free_chunks;
8221 mem_descriptor = &ofap->sg_descriptor[i];
8222 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8223 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8224 }
8225
8226 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8227 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8228 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8229
8230 return 0;
8231
8232out_free_chunks:
8233 while (--i >= 0) {
8234 mem_descriptor = &ofap->sg_descriptor[i];
8235 dma_free_coherent(dev, chunk_size,
8236 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8237 get_unaligned_le64(&mem_descriptor->address));
8238 }
8239 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8240
8241out:
8242 return -ENOMEM;
8243}
8244
8245static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8246{
8247 u32 total_size;
8248 u32 chunk_size;
8249 u32 min_chunk_size;
8250
8251 if (ctrl_info->ofa_bytes_requested == 0)
8252 return 0;
8253
8254 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8255 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8256 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8257
8258 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8259 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8260 return 0;
8261 chunk_size /= 2;
8262 chunk_size = PAGE_ALIGN(chunk_size);
8263 }
8264
8265 return -ENOMEM;
8266}
8267
8268static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8269{
8270 struct device *dev;
8271 struct pqi_ofa_memory *ofap;
8272
8273 dev = &ctrl_info->pci_dev->dev;
8274
8275 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8276 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8277 if (!ofap)
8278 return;
8279
8280 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8281
8282 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8283 dev_err(dev,
8284 "failed to allocate host buffer for Online Firmware Activation\n");
8285 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8286 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8287 return;
8288 }
8289
8290 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8291 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8292}
8293
8294static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8295{
8296 unsigned int i;
8297 struct device *dev;
8298 struct pqi_ofa_memory *ofap;
8299 struct pqi_sg_descriptor *mem_descriptor;
8300 unsigned int num_memory_descriptors;
8301
8302 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8303 if (!ofap)
8304 return;
8305
8306 dev = &ctrl_info->pci_dev->dev;
8307
8308 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8309 goto out;
8310
8311 mem_descriptor = ofap->sg_descriptor;
8312 num_memory_descriptors =
8313 get_unaligned_le16(&ofap->num_memory_descriptors);
8314
8315 for (i = 0; i < num_memory_descriptors; i++) {
8316 dma_free_coherent(dev,
8317 get_unaligned_le32(&mem_descriptor[i].length),
8318 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8319 get_unaligned_le64(&mem_descriptor[i].address));
8320 }
8321 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8322
8323out:
8324 dma_free_coherent(dev, sizeof(*ofap), ofap,
8325 ctrl_info->pqi_ofa_mem_dma_handle);
8326 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8327}
8328
8329static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8330{
8331 u32 buffer_length;
8332 struct pqi_vendor_general_request request;
8333 struct pqi_ofa_memory *ofap;
8334
8335 memset(&request, 0, sizeof(request));
8336
8337 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8338 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8339 &request.header.iu_length);
8340 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8341 &request.function_code);
8342
8343 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8344
8345 if (ofap) {
8346 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8347 get_unaligned_le16(&ofap->num_memory_descriptors) *
8348 sizeof(struct pqi_sg_descriptor);
8349
8350 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8351 &request.data.ofa_memory_allocation.buffer_address);
8352 put_unaligned_le32(buffer_length,
8353 &request.data.ofa_memory_allocation.buffer_length);
8354 }
8355
8356 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8357}
8358
8359static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8360{
8361 ssleep(delay_secs);
8362
8363 return pqi_ctrl_init_resume(ctrl_info);
8364}
8365
8366static void pqi_perform_lockup_action(void)
8367{
8368 switch (pqi_lockup_action) {
8369 case PANIC:
8370 panic("FATAL: Smart Family Controller lockup detected");
8371 break;
8372 case REBOOT:
8373 emergency_restart();
8374 break;
8375 case NONE:
8376 default:
8377 break;
8378 }
8379}
8380
8381static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8382 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8383 .status = SAM_STAT_CHECK_CONDITION,
8384};
8385
8386static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8387{
8388 unsigned int i;
8389 struct pqi_io_request *io_request;
8390 struct scsi_cmnd *scmd;
8391
8392 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8393 io_request = &ctrl_info->io_request_pool[i];
8394 if (atomic_read(&io_request->refcount) == 0)
8395 continue;
8396
8397 scmd = io_request->scmd;
8398 if (scmd) {
8399 set_host_byte(scmd, DID_NO_CONNECT);
8400 } else {
8401 io_request->status = -ENXIO;
8402 io_request->error_info =
8403 &pqi_ctrl_offline_raid_error_info;
8404 }
8405
8406 io_request->io_complete_callback(io_request,
8407 io_request->context);
8408 }
8409}
8410
8411static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8412{
8413 pqi_perform_lockup_action();
8414 pqi_stop_heartbeat_timer(ctrl_info);
8415 pqi_free_interrupts(ctrl_info);
8416 pqi_cancel_rescan_worker(ctrl_info);
8417 pqi_cancel_update_time_worker(ctrl_info);
8418 pqi_ctrl_wait_until_quiesced(ctrl_info);
8419 pqi_fail_all_outstanding_requests(ctrl_info);
8420 pqi_ctrl_unblock_requests(ctrl_info);
8421}
8422
8423static void pqi_ctrl_offline_worker(struct work_struct *work)
8424{
8425 struct pqi_ctrl_info *ctrl_info;
8426
8427 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8428 pqi_take_ctrl_offline_deferred(ctrl_info);
8429}
8430
8431static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
8432{
8433 if (!ctrl_info->controller_online)
8434 return;
8435
8436 ctrl_info->controller_online = false;
8437 ctrl_info->pqi_mode_enabled = false;
8438 pqi_ctrl_block_requests(ctrl_info);
8439 if (!pqi_disable_ctrl_shutdown)
8440 sis_shutdown_ctrl(ctrl_info);
8441 pci_disable_device(ctrl_info->pci_dev);
8442 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8443 schedule_work(&ctrl_info->ctrl_offline_work);
8444}
8445
8446static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8447 const struct pci_device_id *id)
8448{
8449 char *ctrl_description;
8450
8451 if (id->driver_data)
8452 ctrl_description = (char *)id->driver_data;
8453 else
8454 ctrl_description = "Microsemi Smart Family Controller";
8455
8456 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8457}
8458
8459static int pqi_pci_probe(struct pci_dev *pci_dev,
8460 const struct pci_device_id *id)
8461{
8462 int rc;
8463 int node, cp_node;
8464 struct pqi_ctrl_info *ctrl_info;
8465
8466 pqi_print_ctrl_info(pci_dev, id);
8467
8468 if (pqi_disable_device_id_wildcards &&
8469 id->subvendor == PCI_ANY_ID &&
8470 id->subdevice == PCI_ANY_ID) {
8471 dev_warn(&pci_dev->dev,
8472 "controller not probed because device ID wildcards are disabled\n");
8473 return -ENODEV;
8474 }
8475
8476 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8477 dev_warn(&pci_dev->dev,
8478 "controller device ID matched using wildcards\n");
8479
8480 node = dev_to_node(&pci_dev->dev);
8481 if (node == NUMA_NO_NODE) {
8482 cp_node = cpu_to_node(0);
8483 if (cp_node == NUMA_NO_NODE)
8484 cp_node = 0;
8485 set_dev_node(&pci_dev->dev, cp_node);
8486 }
8487
8488 ctrl_info = pqi_alloc_ctrl_info(node);
8489 if (!ctrl_info) {
8490 dev_err(&pci_dev->dev,
8491 "failed to allocate controller info block\n");
8492 return -ENOMEM;
8493 }
8494
8495 ctrl_info->pci_dev = pci_dev;
8496
8497 rc = pqi_pci_init(ctrl_info);
8498 if (rc)
8499 goto error;
8500
8501 rc = pqi_ctrl_init(ctrl_info);
8502 if (rc)
8503 goto error;
8504
8505 return 0;
8506
8507error:
8508 pqi_remove_ctrl(ctrl_info);
8509
8510 return rc;
8511}
8512
8513static void pqi_pci_remove(struct pci_dev *pci_dev)
8514{
8515 struct pqi_ctrl_info *ctrl_info;
8516
8517 ctrl_info = pci_get_drvdata(pci_dev);
8518 if (!ctrl_info)
8519 return;
8520
8521 pqi_remove_ctrl(ctrl_info);
8522}
8523
8524static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8525{
8526 unsigned int i;
8527 struct pqi_io_request *io_request;
8528 struct scsi_cmnd *scmd;
8529
8530 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8531 io_request = &ctrl_info->io_request_pool[i];
8532 if (atomic_read(&io_request->refcount) == 0)
8533 continue;
8534 scmd = io_request->scmd;
8535 WARN_ON(scmd != NULL);
8536 WARN_ON(scmd == NULL);
8537 }
8538}
8539
8540static void pqi_shutdown(struct pci_dev *pci_dev)
8541{
8542 int rc;
8543 struct pqi_ctrl_info *ctrl_info;
8544
8545 ctrl_info = pci_get_drvdata(pci_dev);
8546 if (!ctrl_info) {
8547 dev_err(&pci_dev->dev,
8548 "cache could not be flushed\n");
8549 return;
8550 }
8551
8552 pqi_wait_until_ofa_finished(ctrl_info);
8553
8554 pqi_scsi_block_requests(ctrl_info);
8555 pqi_ctrl_block_device_reset(ctrl_info);
8556 pqi_ctrl_block_requests(ctrl_info);
8557 pqi_ctrl_wait_until_quiesced(ctrl_info);
8558
8559
8560
8561
8562
8563 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
8564 if (rc)
8565 dev_err(&pci_dev->dev,
8566 "unable to flush controller cache\n");
8567
8568 pqi_crash_if_pending_command(ctrl_info);
8569 pqi_reset(ctrl_info);
8570}
8571
8572static void pqi_process_lockup_action_param(void)
8573{
8574 unsigned int i;
8575
8576 if (!pqi_lockup_action_param)
8577 return;
8578
8579 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8580 if (strcmp(pqi_lockup_action_param,
8581 pqi_lockup_actions[i].name) == 0) {
8582 pqi_lockup_action = pqi_lockup_actions[i].action;
8583 return;
8584 }
8585 }
8586
8587 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8588 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8589}
8590
8591static void pqi_process_module_params(void)
8592{
8593 pqi_process_lockup_action_param();
8594}
8595
8596static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8597{
8598 struct pqi_ctrl_info *ctrl_info;
8599
8600 ctrl_info = pci_get_drvdata(pci_dev);
8601
8602 pqi_wait_until_ofa_finished(ctrl_info);
8603
8604 pqi_ctrl_block_scan(ctrl_info);
8605 pqi_scsi_block_requests(ctrl_info);
8606 pqi_ctrl_block_device_reset(ctrl_info);
8607 pqi_ctrl_block_requests(ctrl_info);
8608 pqi_ctrl_wait_until_quiesced(ctrl_info);
8609 pqi_flush_cache(ctrl_info, SUSPEND);
8610 pqi_stop_heartbeat_timer(ctrl_info);
8611
8612 pqi_crash_if_pending_command(ctrl_info);
8613
8614 if (state.event == PM_EVENT_FREEZE)
8615 return 0;
8616
8617 pci_save_state(pci_dev);
8618 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8619
8620 ctrl_info->controller_online = false;
8621 ctrl_info->pqi_mode_enabled = false;
8622
8623 return 0;
8624}
8625
8626static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8627{
8628 int rc;
8629 struct pqi_ctrl_info *ctrl_info;
8630
8631 ctrl_info = pci_get_drvdata(pci_dev);
8632
8633 if (pci_dev->current_state != PCI_D0) {
8634 ctrl_info->max_hw_queue_index = 0;
8635 pqi_free_interrupts(ctrl_info);
8636 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8637 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8638 IRQF_SHARED, DRIVER_NAME_SHORT,
8639 &ctrl_info->queue_groups[0]);
8640 if (rc) {
8641 dev_err(&ctrl_info->pci_dev->dev,
8642 "irq %u init failed with error %d\n",
8643 pci_dev->irq, rc);
8644 return rc;
8645 }
8646 pqi_ctrl_unblock_device_reset(ctrl_info);
8647 pqi_ctrl_unblock_requests(ctrl_info);
8648 pqi_scsi_unblock_requests(ctrl_info);
8649 pqi_ctrl_unblock_scan(ctrl_info);
8650 return 0;
8651 }
8652
8653 pci_set_power_state(pci_dev, PCI_D0);
8654 pci_restore_state(pci_dev);
8655
8656 pqi_ctrl_unblock_device_reset(ctrl_info);
8657 pqi_ctrl_unblock_requests(ctrl_info);
8658 pqi_scsi_unblock_requests(ctrl_info);
8659 pqi_ctrl_unblock_scan(ctrl_info);
8660
8661 return pqi_ctrl_init_resume(ctrl_info);
8662}
8663
8664
8665static const struct pci_device_id pqi_pci_id_table[] = {
8666 {
8667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8668 0x105b, 0x1211)
8669 },
8670 {
8671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8672 0x105b, 0x1321)
8673 },
8674 {
8675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8676 0x152d, 0x8a22)
8677 },
8678 {
8679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8680 0x152d, 0x8a23)
8681 },
8682 {
8683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8684 0x152d, 0x8a24)
8685 },
8686 {
8687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8688 0x152d, 0x8a36)
8689 },
8690 {
8691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8692 0x152d, 0x8a37)
8693 },
8694 {
8695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8696 0x193d, 0x8460)
8697 },
8698 {
8699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8700 0x193d, 0x1104)
8701 },
8702 {
8703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8704 0x193d, 0x1105)
8705 },
8706 {
8707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8708 0x193d, 0x1106)
8709 },
8710 {
8711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8712 0x193d, 0x1107)
8713 },
8714 {
8715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8716 0x193d, 0x8460)
8717 },
8718 {
8719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8720 0x193d, 0x8461)
8721 },
8722 {
8723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8724 0x193d, 0xc460)
8725 },
8726 {
8727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8728 0x193d, 0xc461)
8729 },
8730 {
8731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8732 0x193d, 0xf460)
8733 },
8734 {
8735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8736 0x193d, 0xf461)
8737 },
8738 {
8739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8740 0x1bd4, 0x0045)
8741 },
8742 {
8743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8744 0x1bd4, 0x0046)
8745 },
8746 {
8747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8748 0x1bd4, 0x0047)
8749 },
8750 {
8751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8752 0x1bd4, 0x0048)
8753 },
8754 {
8755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8756 0x1bd4, 0x004a)
8757 },
8758 {
8759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8760 0x1bd4, 0x004b)
8761 },
8762 {
8763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8764 0x1bd4, 0x004c)
8765 },
8766 {
8767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8768 0x1bd4, 0x004f)
8769 },
8770 {
8771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8772 0x1bd4, 0x0051)
8773 },
8774 {
8775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8776 0x1bd4, 0x0052)
8777 },
8778 {
8779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8780 0x1bd4, 0x0053)
8781 },
8782 {
8783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8784 0x1bd4, 0x0054)
8785 },
8786 {
8787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8788 0x19e5, 0xd227)
8789 },
8790 {
8791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8792 0x19e5, 0xd228)
8793 },
8794 {
8795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8796 0x19e5, 0xd229)
8797 },
8798 {
8799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8800 0x19e5, 0xd22a)
8801 },
8802 {
8803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8804 0x19e5, 0xd22b)
8805 },
8806 {
8807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8808 0x19e5, 0xd22c)
8809 },
8810 {
8811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8812 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8813 },
8814 {
8815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8816 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8817 },
8818 {
8819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8820 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8821 },
8822 {
8823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8824 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8825 },
8826 {
8827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8828 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8829 },
8830 {
8831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8832 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8833 },
8834 {
8835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8836 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8837 },
8838 {
8839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8840 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8841 },
8842 {
8843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8844 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8845 },
8846 {
8847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8848 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8849 },
8850 {
8851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8852 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8853 },
8854 {
8855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8856 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8857 },
8858 {
8859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8860 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
8861 },
8862 {
8863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8864 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8865 },
8866 {
8867 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8868 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8869 },
8870 {
8871 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8872 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8873 },
8874 {
8875 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8876 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8877 },
8878 {
8879 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8880 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8881 },
8882 {
8883 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8884 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8885 },
8886 {
8887 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8888 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8889 },
8890 {
8891 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8892 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8893 },
8894 {
8895 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8896 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8897 },
8898 {
8899 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8900 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8901 },
8902 {
8903 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8904 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8905 },
8906 {
8907 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8908 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8909 },
8910 {
8911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8912 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8913 },
8914 {
8915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8916 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8917 },
8918 {
8919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8920 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8921 },
8922 {
8923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8924 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8925 },
8926 {
8927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8928 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8929 },
8930 {
8931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8932 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8933 },
8934 {
8935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8936 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8937 },
8938 {
8939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8940 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8941 },
8942 {
8943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8944 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8945 },
8946 {
8947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8948 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
8949 },
8950 {
8951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8952 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
8953 },
8954 {
8955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8956 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
8957 },
8958 {
8959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8960 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
8961 },
8962 {
8963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8964 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
8965 },
8966 {
8967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8968 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
8969 },
8970 {
8971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8972 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
8973 },
8974 {
8975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8976 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
8977 },
8978 {
8979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8980 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
8981 },
8982 {
8983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8984 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
8985 },
8986 {
8987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8988 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
8989 },
8990 {
8991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8992 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
8993 },
8994 {
8995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8996 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
8997 },
8998 {
8999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9000 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9001 },
9002 {
9003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9004 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9005 },
9006 {
9007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9008 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9009 },
9010 {
9011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9012 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9013 },
9014 {
9015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9016 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9017 },
9018 {
9019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9020 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9021 },
9022 {
9023 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9024 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9025 },
9026 {
9027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9028 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9029 },
9030 {
9031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9032 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9033 },
9034 {
9035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9036 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9037 },
9038 {
9039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9040 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9041 },
9042 {
9043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9044 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9045 },
9046 {
9047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9048 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9049 },
9050 {
9051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9052 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9053 },
9054 {
9055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9056 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9057 },
9058 {
9059 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9060 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9061 },
9062 {
9063 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9064 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9065 },
9066 {
9067 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9068 PCI_VENDOR_ID_DELL, 0x1fe0)
9069 },
9070 {
9071 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9072 PCI_VENDOR_ID_HP, 0x0600)
9073 },
9074 {
9075 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9076 PCI_VENDOR_ID_HP, 0x0601)
9077 },
9078 {
9079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9080 PCI_VENDOR_ID_HP, 0x0602)
9081 },
9082 {
9083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9084 PCI_VENDOR_ID_HP, 0x0603)
9085 },
9086 {
9087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9088 PCI_VENDOR_ID_HP, 0x0609)
9089 },
9090 {
9091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9092 PCI_VENDOR_ID_HP, 0x0650)
9093 },
9094 {
9095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9096 PCI_VENDOR_ID_HP, 0x0651)
9097 },
9098 {
9099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9100 PCI_VENDOR_ID_HP, 0x0652)
9101 },
9102 {
9103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9104 PCI_VENDOR_ID_HP, 0x0653)
9105 },
9106 {
9107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9108 PCI_VENDOR_ID_HP, 0x0654)
9109 },
9110 {
9111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9112 PCI_VENDOR_ID_HP, 0x0655)
9113 },
9114 {
9115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9116 PCI_VENDOR_ID_HP, 0x0700)
9117 },
9118 {
9119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9120 PCI_VENDOR_ID_HP, 0x0701)
9121 },
9122 {
9123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9124 PCI_VENDOR_ID_HP, 0x1001)
9125 },
9126 {
9127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9128 PCI_VENDOR_ID_HP, 0x1002)
9129 },
9130 {
9131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9132 PCI_VENDOR_ID_HP, 0x1100)
9133 },
9134 {
9135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9136 PCI_VENDOR_ID_HP, 0x1101)
9137 },
9138 {
9139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9140 0x1590, 0x0294)
9141 },
9142 {
9143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9144 0x1590, 0x02db)
9145 },
9146 {
9147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9148 0x1590, 0x02dc)
9149 },
9150 {
9151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9152 0x1590, 0x032e)
9153 },
9154 {
9155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9156 0x1d8d, 0x0800)
9157 },
9158 {
9159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9160 0x1d8d, 0x0908)
9161 },
9162 {
9163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9164 0x1d8d, 0x0806)
9165 },
9166 {
9167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9168 0x1d8d, 0x0916)
9169 },
9170 {
9171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9172 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9173 },
9174 {
9175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9176 PCI_ANY_ID, PCI_ANY_ID)
9177 },
9178 { 0 }
9179};
9180
9181MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9182
9183static struct pci_driver pqi_pci_driver = {
9184 .name = DRIVER_NAME_SHORT,
9185 .id_table = pqi_pci_id_table,
9186 .probe = pqi_pci_probe,
9187 .remove = pqi_pci_remove,
9188 .shutdown = pqi_shutdown,
9189#if defined(CONFIG_PM)
9190 .suspend = pqi_suspend,
9191 .resume = pqi_resume,
9192#endif
9193};
9194
9195static int __init pqi_init(void)
9196{
9197 int rc;
9198
9199 pr_info(DRIVER_NAME "\n");
9200
9201 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
9202 if (!pqi_sas_transport_template)
9203 return -ENODEV;
9204
9205 pqi_process_module_params();
9206
9207 rc = pci_register_driver(&pqi_pci_driver);
9208 if (rc)
9209 sas_release_transport(pqi_sas_transport_template);
9210
9211 return rc;
9212}
9213
9214static void __exit pqi_cleanup(void)
9215{
9216 pci_unregister_driver(&pqi_pci_driver);
9217 sas_release_transport(pqi_sas_transport_template);
9218}
9219
9220module_init(pqi_init);
9221module_exit(pqi_cleanup);
9222
9223static void __attribute__((unused)) verify_structures(void)
9224{
9225 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9226 sis_host_to_ctrl_doorbell) != 0x20);
9227 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9228 sis_interrupt_mask) != 0x34);
9229 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9230 sis_ctrl_to_host_doorbell) != 0x9c);
9231 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9232 sis_ctrl_to_host_doorbell_clear) != 0xa0);
9233 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9234 sis_driver_scratch) != 0xb0);
9235 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9236 sis_product_identifier) != 0xb4);
9237 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9238 sis_firmware_status) != 0xbc);
9239 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9240 sis_mailbox) != 0x1000);
9241 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9242 pqi_registers) != 0x4000);
9243
9244 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9245 iu_type) != 0x0);
9246 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9247 iu_length) != 0x2);
9248 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9249 response_queue_id) != 0x4);
9250 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9251 driver_flags) != 0x6);
9252 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9253
9254 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9255 status) != 0x0);
9256 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9257 service_response) != 0x1);
9258 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9259 data_present) != 0x2);
9260 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9261 reserved) != 0x3);
9262 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9263 residual_count) != 0x4);
9264 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9265 data_length) != 0x8);
9266 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9267 reserved1) != 0xa);
9268 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9269 data) != 0xc);
9270 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9271
9272 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9273 data_in_result) != 0x0);
9274 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9275 data_out_result) != 0x1);
9276 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9277 reserved) != 0x2);
9278 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9279 status) != 0x5);
9280 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9281 status_qualifier) != 0x6);
9282 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9283 sense_data_length) != 0x8);
9284 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9285 response_data_length) != 0xa);
9286 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9287 data_in_transferred) != 0xc);
9288 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9289 data_out_transferred) != 0x10);
9290 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9291 data) != 0x14);
9292 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9293
9294 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9295 signature) != 0x0);
9296 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9297 function_and_status_code) != 0x8);
9298 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9299 max_admin_iq_elements) != 0x10);
9300 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9301 max_admin_oq_elements) != 0x11);
9302 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9303 admin_iq_element_length) != 0x12);
9304 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9305 admin_oq_element_length) != 0x13);
9306 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9307 max_reset_timeout) != 0x14);
9308 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9309 legacy_intx_status) != 0x18);
9310 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9311 legacy_intx_mask_set) != 0x1c);
9312 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9313 legacy_intx_mask_clear) != 0x20);
9314 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9315 device_status) != 0x40);
9316 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9317 admin_iq_pi_offset) != 0x48);
9318 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9319 admin_oq_ci_offset) != 0x50);
9320 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9321 admin_iq_element_array_addr) != 0x58);
9322 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9323 admin_oq_element_array_addr) != 0x60);
9324 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9325 admin_iq_ci_addr) != 0x68);
9326 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9327 admin_oq_pi_addr) != 0x70);
9328 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9329 admin_iq_num_elements) != 0x78);
9330 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9331 admin_oq_num_elements) != 0x79);
9332 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9333 admin_queue_int_msg_num) != 0x7a);
9334 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9335 device_error) != 0x80);
9336 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9337 error_details) != 0x88);
9338 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9339 device_reset) != 0x90);
9340 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9341 power_action) != 0x94);
9342 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
9343
9344 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9345 header.iu_type) != 0);
9346 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9347 header.iu_length) != 2);
9348 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9349 header.driver_flags) != 6);
9350 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9351 request_id) != 8);
9352 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9353 function_code) != 10);
9354 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9355 data.report_device_capability.buffer_length) != 44);
9356 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9357 data.report_device_capability.sg_descriptor) != 48);
9358 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9359 data.create_operational_iq.queue_id) != 12);
9360 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9361 data.create_operational_iq.element_array_addr) != 16);
9362 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9363 data.create_operational_iq.ci_addr) != 24);
9364 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9365 data.create_operational_iq.num_elements) != 32);
9366 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9367 data.create_operational_iq.element_length) != 34);
9368 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9369 data.create_operational_iq.queue_protocol) != 36);
9370 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9371 data.create_operational_oq.queue_id) != 12);
9372 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9373 data.create_operational_oq.element_array_addr) != 16);
9374 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9375 data.create_operational_oq.pi_addr) != 24);
9376 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9377 data.create_operational_oq.num_elements) != 32);
9378 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9379 data.create_operational_oq.element_length) != 34);
9380 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9381 data.create_operational_oq.queue_protocol) != 36);
9382 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9383 data.create_operational_oq.int_msg_num) != 40);
9384 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9385 data.create_operational_oq.coalescing_count) != 42);
9386 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9387 data.create_operational_oq.min_coalescing_time) != 44);
9388 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9389 data.create_operational_oq.max_coalescing_time) != 48);
9390 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9391 data.delete_operational_queue.queue_id) != 12);
9392 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
9393 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9394 data.create_operational_iq) != 64 - 11);
9395 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9396 data.create_operational_oq) != 64 - 11);
9397 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9398 data.delete_operational_queue) != 64 - 11);
9399
9400 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9401 header.iu_type) != 0);
9402 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9403 header.iu_length) != 2);
9404 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9405 header.driver_flags) != 6);
9406 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9407 request_id) != 8);
9408 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9409 function_code) != 10);
9410 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9411 status) != 11);
9412 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9413 data.create_operational_iq.status_descriptor) != 12);
9414 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9415 data.create_operational_iq.iq_pi_offset) != 16);
9416 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9417 data.create_operational_oq.status_descriptor) != 12);
9418 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9419 data.create_operational_oq.oq_ci_offset) != 16);
9420 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
9421
9422 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9423 header.iu_type) != 0);
9424 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9425 header.iu_length) != 2);
9426 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9427 header.response_queue_id) != 4);
9428 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9429 header.driver_flags) != 6);
9430 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9431 request_id) != 8);
9432 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9433 nexus_id) != 10);
9434 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9435 buffer_length) != 12);
9436 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9437 lun_number) != 16);
9438 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9439 protocol_specific) != 24);
9440 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9441 error_index) != 27);
9442 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9443 cdb) != 32);
9444 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9445 timeout) != 60);
9446 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9447 sg_descriptors) != 64);
9448 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
9449 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9450
9451 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9452 header.iu_type) != 0);
9453 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9454 header.iu_length) != 2);
9455 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9456 header.response_queue_id) != 4);
9457 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9458 header.driver_flags) != 6);
9459 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9460 request_id) != 8);
9461 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9462 nexus_id) != 12);
9463 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9464 buffer_length) != 16);
9465 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9466 data_encryption_key_index) != 22);
9467 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9468 encrypt_tweak_lower) != 24);
9469 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9470 encrypt_tweak_upper) != 28);
9471 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9472 cdb) != 32);
9473 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9474 error_index) != 48);
9475 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9476 num_sg_descriptors) != 50);
9477 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9478 cdb_length) != 51);
9479 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9480 lun_number) != 52);
9481 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9482 sg_descriptors) != 64);
9483 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
9484 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9485
9486 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9487 header.iu_type) != 0);
9488 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9489 header.iu_length) != 2);
9490 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9491 request_id) != 8);
9492 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9493 error_index) != 10);
9494
9495 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9496 header.iu_type) != 0);
9497 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9498 header.iu_length) != 2);
9499 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9500 header.response_queue_id) != 4);
9501 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9502 request_id) != 8);
9503 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9504 data.report_event_configuration.buffer_length) != 12);
9505 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9506 data.report_event_configuration.sg_descriptors) != 16);
9507 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9508 data.set_event_configuration.global_event_oq_id) != 10);
9509 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9510 data.set_event_configuration.buffer_length) != 12);
9511 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9512 data.set_event_configuration.sg_descriptors) != 16);
9513
9514 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9515 max_inbound_iu_length) != 6);
9516 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9517 max_outbound_iu_length) != 14);
9518 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
9519
9520 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9521 data_length) != 0);
9522 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9523 iq_arbitration_priority_support_bitmask) != 8);
9524 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9525 maximum_aw_a) != 9);
9526 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9527 maximum_aw_b) != 10);
9528 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9529 maximum_aw_c) != 11);
9530 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9531 max_inbound_queues) != 16);
9532 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9533 max_elements_per_iq) != 18);
9534 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9535 max_iq_element_length) != 24);
9536 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9537 min_iq_element_length) != 26);
9538 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9539 max_outbound_queues) != 30);
9540 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9541 max_elements_per_oq) != 32);
9542 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9543 intr_coalescing_time_granularity) != 34);
9544 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9545 max_oq_element_length) != 36);
9546 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9547 min_oq_element_length) != 38);
9548 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9549 iu_layer_descriptors) != 64);
9550 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
9551
9552 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9553 event_type) != 0);
9554 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9555 oq_id) != 2);
9556 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
9557
9558 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9559 num_event_descriptors) != 2);
9560 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9561 descriptors) != 4);
9562
9563 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
9564 ARRAY_SIZE(pqi_supported_event_types));
9565
9566 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9567 header.iu_type) != 0);
9568 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9569 header.iu_length) != 2);
9570 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9571 event_type) != 8);
9572 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9573 event_id) != 10);
9574 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9575 additional_event_id) != 12);
9576 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9577 data) != 16);
9578 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
9579
9580 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9581 header.iu_type) != 0);
9582 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9583 header.iu_length) != 2);
9584 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9585 event_type) != 8);
9586 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9587 event_id) != 10);
9588 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9589 additional_event_id) != 12);
9590 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
9591
9592 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9593 header.iu_type) != 0);
9594 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9595 header.iu_length) != 2);
9596 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9597 request_id) != 8);
9598 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9599 nexus_id) != 10);
9600 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9601 timeout) != 14);
9602 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9603 lun_number) != 16);
9604 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9605 protocol_specific) != 24);
9606 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9607 outbound_queue_id_to_manage) != 26);
9608 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9609 request_id_to_manage) != 28);
9610 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9611 task_management_function) != 30);
9612 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
9613
9614 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9615 header.iu_type) != 0);
9616 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9617 header.iu_length) != 2);
9618 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9619 request_id) != 8);
9620 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9621 nexus_id) != 10);
9622 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9623 additional_response_info) != 12);
9624 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9625 response_code) != 15);
9626 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
9627
9628 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9629 configured_logical_drive_count) != 0);
9630 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9631 configuration_signature) != 1);
9632 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9633 firmware_version_short) != 5);
9634 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9635 extended_logical_unit_count) != 154);
9636 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9637 firmware_build_number) != 190);
9638 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9639 vendor_id) != 200);
9640 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9641 product_id) != 208);
9642 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9643 extra_controller_flags) != 286);
9644 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9645 controller_mode) != 292);
9646 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9647 spare_part_number) != 293);
9648 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9649 firmware_version_long) != 325);
9650
9651 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9652 phys_bay_in_box) != 115);
9653 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9654 device_type) != 120);
9655 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9656 redundant_path_present_map) != 1736);
9657 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9658 active_path_number) != 1738);
9659 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9660 alternate_paths_phys_connector) != 1739);
9661 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9662 alternate_paths_phys_box_on_port) != 1755);
9663 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9664 current_queue_depth_limit) != 1796);
9665 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
9666
9667 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
9668 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9669 page_code) != 0);
9670 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9671 subpage_code) != 1);
9672 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9673 buffer_length) != 2);
9674
9675 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
9676 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9677 page_code) != 0);
9678 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9679 subpage_code) != 1);
9680 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9681 page_length) != 2);
9682
9683 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
9684 != 18);
9685 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9686 header) != 0);
9687 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9688 firmware_read_support) != 4);
9689 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9690 driver_read_support) != 5);
9691 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9692 firmware_write_support) != 6);
9693 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9694 driver_write_support) != 7);
9695 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9696 max_transfer_encrypted_sas_sata) != 8);
9697 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9698 max_transfer_encrypted_nvme) != 10);
9699 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9700 max_write_raid_5_6) != 12);
9701 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9702 max_write_raid_1_10_2drive) != 14);
9703 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9704 max_write_raid_1_10_3drive) != 16);
9705
9706 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
9707 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
9708 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
9709 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9710 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
9711 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9712 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
9713 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
9714 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9715 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
9716 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
9717 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9718
9719 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
9720 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
9721 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
9722}
9723