1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81#include <linux/module.h>
82#include <linux/reboot.h>
83#include <linux/spinlock.h>
84#include <linux/interrupt.h>
85#include <linux/moduleparam.h>
86#include <linux/errno.h>
87#include <linux/types.h>
88#include <linux/delay.h>
89#include <linux/pci.h>
90#include <linux/time.h>
91#include <linux/mutex.h>
92#include <linux/slab.h>
93#include <asm/io.h>
94#include <asm/irq.h>
95#include <linux/uaccess.h>
96#include <scsi/scsi.h>
97#include <scsi/scsi_host.h>
98#include <scsi/scsi_tcq.h>
99#include <scsi/scsi_cmnd.h>
100#include "3w-9xxx.h"
101
102
103#define TW_DRIVER_VERSION "2.26.02.014"
104static DEFINE_MUTEX(twa_chrdev_mutex);
105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106static unsigned int twa_device_extension_count;
107static int twa_major = -1;
108extern struct timezone sys_tz;
109
110
111MODULE_AUTHOR ("LSI");
112MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113MODULE_LICENSE("GPL");
114MODULE_VERSION(TW_DRIVER_VERSION);
115
116static int use_msi = 0;
117module_param(use_msi, int, S_IRUGO);
118MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
119
120
121static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123static char *twa_aen_severity_lookup(unsigned char severity_code);
124static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126static int twa_chrdev_open(struct inode *inode, struct file *file);
127static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131 u32 set_features, unsigned short current_fw_srl,
132 unsigned short current_fw_arch_id,
133 unsigned short current_fw_branch,
134 unsigned short current_fw_build,
135 unsigned short *fw_on_ctlr_srl,
136 unsigned short *fw_on_ctlr_arch_id,
137 unsigned short *fw_on_ctlr_branch,
138 unsigned short *fw_on_ctlr_build,
139 u32 *init_connect_result);
140static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
147 unsigned char *cdb, int use_sg,
148 TW_SG_Entry *sglistarg);
149static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
150static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
151
152
153
154
155static ssize_t twa_show_stats(struct device *dev,
156 struct device_attribute *attr, char *buf)
157{
158 struct Scsi_Host *host = class_to_shost(dev);
159 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
160 unsigned long flags = 0;
161 ssize_t len;
162
163 spin_lock_irqsave(tw_dev->host->host_lock, flags);
164 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
165 "Current commands posted: %4d\n"
166 "Max commands posted: %4d\n"
167 "Current pending commands: %4d\n"
168 "Max pending commands: %4d\n"
169 "Last sgl length: %4d\n"
170 "Max sgl length: %4d\n"
171 "Last sector count: %4d\n"
172 "Max sector count: %4d\n"
173 "SCSI Host Resets: %4d\n"
174 "AEN's: %4d\n",
175 TW_DRIVER_VERSION,
176 tw_dev->posted_request_count,
177 tw_dev->max_posted_request_count,
178 tw_dev->pending_request_count,
179 tw_dev->max_pending_request_count,
180 tw_dev->sgl_entries,
181 tw_dev->max_sgl_entries,
182 tw_dev->sector_count,
183 tw_dev->max_sector_count,
184 tw_dev->num_resets,
185 tw_dev->aen_count);
186 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
187 return len;
188}
189
190
191static struct device_attribute twa_host_stats_attr = {
192 .attr = {
193 .name = "stats",
194 .mode = S_IRUGO,
195 },
196 .show = twa_show_stats
197};
198
199
200static struct device_attribute *twa_host_attrs[] = {
201 &twa_host_stats_attr,
202 NULL,
203};
204
205
206static const struct file_operations twa_fops = {
207 .owner = THIS_MODULE,
208 .unlocked_ioctl = twa_chrdev_ioctl,
209 .open = twa_chrdev_open,
210 .release = NULL,
211 .llseek = noop_llseek,
212};
213
214
215
216
217
218
219static bool twa_command_mapped(struct scsi_cmnd *cmd)
220{
221 return scsi_sg_count(cmd) != 1 ||
222 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
223}
224
225
226static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
227{
228 TW_Command_Full *full_command_packet;
229 TW_Command *command_packet;
230 TW_Command_Apache_Header *header;
231 unsigned short aen;
232 int retval = 1;
233
234 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
235 tw_dev->posted_request_count--;
236 aen = le16_to_cpu(header->status_block.error);
237 full_command_packet = tw_dev->command_packet_virt[request_id];
238 command_packet = &full_command_packet->command.oldcommand;
239
240
241 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
242
243 if (twa_aen_read_queue(tw_dev, request_id))
244 goto out2;
245 else {
246 retval = 0;
247 goto out;
248 }
249 }
250
251 switch (aen) {
252 case TW_AEN_QUEUE_EMPTY:
253
254 break;
255 case TW_AEN_SYNC_TIME_WITH_HOST:
256 twa_aen_sync_time(tw_dev, request_id);
257 retval = 0;
258 goto out;
259 default:
260 twa_aen_queue_event(tw_dev, header);
261
262
263 if (twa_aen_read_queue(tw_dev, request_id))
264 goto out2;
265 else {
266 retval = 0;
267 goto out;
268 }
269 }
270 retval = 0;
271out2:
272 tw_dev->state[request_id] = TW_S_COMPLETED;
273 twa_free_request_id(tw_dev, request_id);
274 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
275out:
276 return retval;
277}
278
279
280static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
281{
282 int request_id = 0;
283 unsigned char cdb[TW_MAX_CDB_LEN];
284 TW_SG_Entry sglist[1];
285 int finished = 0, count = 0;
286 TW_Command_Full *full_command_packet;
287 TW_Command_Apache_Header *header;
288 unsigned short aen;
289 int first_reset = 0, queue = 0, retval = 1;
290
291 if (no_check_reset)
292 first_reset = 0;
293 else
294 first_reset = 1;
295
296 full_command_packet = tw_dev->command_packet_virt[request_id];
297 memset(full_command_packet, 0, sizeof(TW_Command_Full));
298
299
300 memset(&cdb, 0, TW_MAX_CDB_LEN);
301 cdb[0] = REQUEST_SENSE;
302 cdb[4] = TW_ALLOCATION_LENGTH;
303
304
305 memset(&sglist, 0, sizeof(TW_SG_Entry));
306 sglist[0].length = TW_SECTOR_SIZE;
307 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
308
309 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
310 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
311 goto out;
312 }
313
314
315 tw_dev->srb[request_id] = NULL;
316
317 do {
318
319 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
320 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
321 goto out;
322 }
323
324
325 if (twa_poll_response(tw_dev, request_id, 30)) {
326 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
327 tw_dev->posted_request_count--;
328 goto out;
329 }
330
331 tw_dev->posted_request_count--;
332 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
333 aen = le16_to_cpu(header->status_block.error);
334 queue = 0;
335 count++;
336
337 switch (aen) {
338 case TW_AEN_QUEUE_EMPTY:
339 if (first_reset != 1)
340 goto out;
341 else
342 finished = 1;
343 break;
344 case TW_AEN_SOFT_RESET:
345 if (first_reset == 0)
346 first_reset = 1;
347 else
348 queue = 1;
349 break;
350 case TW_AEN_SYNC_TIME_WITH_HOST:
351 break;
352 default:
353 queue = 1;
354 }
355
356
357 if (queue)
358 twa_aen_queue_event(tw_dev, header);
359 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
360
361 if (count == TW_MAX_AEN_DRAIN)
362 goto out;
363
364 retval = 0;
365out:
366 tw_dev->state[request_id] = TW_S_INITIAL;
367 return retval;
368}
369
370
371static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
372{
373 u32 local_time;
374 TW_Event *event;
375 unsigned short aen;
376 char host[16];
377 char *error_str;
378
379 tw_dev->aen_count++;
380
381
382 event = tw_dev->event_queue[tw_dev->error_index];
383
384
385 host[0] = '\0';
386 if (tw_dev->host) {
387 sprintf(host, " scsi%d:", tw_dev->host->host_no);
388 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
389 tw_dev->aen_clobber = 1;
390 }
391
392 aen = le16_to_cpu(header->status_block.error);
393 memset(event, 0, sizeof(TW_Event));
394
395 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
396
397 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
398 event->time_stamp_sec = local_time;
399 event->aen_code = aen;
400 event->retrieved = TW_AEN_NOT_RETRIEVED;
401 event->sequence_id = tw_dev->error_sequence_id;
402 tw_dev->error_sequence_id++;
403
404
405 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
406
407 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
408 event->parameter_len = strlen(header->err_specific_desc);
409 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
410 if (event->severity != TW_AEN_SEVERITY_DEBUG)
411 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
412 host,
413 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
414 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
415 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
416 header->err_specific_desc);
417 else
418 tw_dev->aen_count--;
419
420 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
421 tw_dev->event_queue_wrapped = 1;
422 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
423}
424
425
426static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
427{
428 unsigned char cdb[TW_MAX_CDB_LEN];
429 TW_SG_Entry sglist[1];
430 TW_Command_Full *full_command_packet;
431 int retval = 1;
432
433 full_command_packet = tw_dev->command_packet_virt[request_id];
434 memset(full_command_packet, 0, sizeof(TW_Command_Full));
435
436
437 memset(&cdb, 0, TW_MAX_CDB_LEN);
438 cdb[0] = REQUEST_SENSE;
439 cdb[4] = TW_ALLOCATION_LENGTH;
440
441
442 memset(&sglist, 0, sizeof(TW_SG_Entry));
443 sglist[0].length = TW_SECTOR_SIZE;
444 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
445
446
447 tw_dev->srb[request_id] = NULL;
448
449
450 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
451 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
452 goto out;
453 }
454 retval = 0;
455out:
456 return retval;
457}
458
459
460static char *twa_aen_severity_lookup(unsigned char severity_code)
461{
462 char *retval = NULL;
463
464 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
465 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
466 goto out;
467
468 retval = twa_aen_severity_table[severity_code];
469out:
470 return retval;
471}
472
473
474static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
475{
476 u32 schedulertime;
477 TW_Command_Full *full_command_packet;
478 TW_Command *command_packet;
479 TW_Param_Apache *param;
480 time64_t local_time;
481
482
483 full_command_packet = tw_dev->command_packet_virt[request_id];
484 memset(full_command_packet, 0, sizeof(TW_Command_Full));
485 command_packet = &full_command_packet->command.oldcommand;
486 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
487 command_packet->request_id = request_id;
488 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
489 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
490 command_packet->size = TW_COMMAND_SIZE;
491 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
492
493
494 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
495 memset(param, 0, TW_SECTOR_SIZE);
496 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000);
497 param->parameter_id = cpu_to_le16(0x3);
498 param->parameter_size_bytes = cpu_to_le16(4);
499
500
501
502 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
503 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
504 schedulertime = cpu_to_le32(schedulertime % 604800);
505
506 memcpy(param->data, &schedulertime, sizeof(u32));
507
508
509 tw_dev->srb[request_id] = NULL;
510
511
512 twa_post_command_packet(tw_dev, request_id, 1);
513}
514
515
516static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
517{
518 int i;
519 dma_addr_t dma_handle;
520 unsigned long *cpu_addr;
521 int retval = 1;
522
523 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
524 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
525 if (!cpu_addr) {
526 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
527 goto out;
528 }
529
530 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
531 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
532 dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
533 cpu_addr, dma_handle);
534 goto out;
535 }
536
537 memset(cpu_addr, 0, size*TW_Q_LENGTH);
538
539 for (i = 0; i < TW_Q_LENGTH; i++) {
540 switch(which) {
541 case 0:
542 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
543 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
544 break;
545 case 1:
546 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
547 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
548 break;
549 }
550 }
551 retval = 0;
552out:
553 return retval;
554}
555
556
557static int twa_check_bits(u32 status_reg_value)
558{
559 int retval = 1;
560
561 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
562 goto out;
563 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
564 goto out;
565
566 retval = 0;
567out:
568 return retval;
569}
570
571
572static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
573{
574 int retval = 1;
575 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
576 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
577 u32 init_connect_result = 0;
578
579 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
580 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
581 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
582 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
583 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
584 &fw_on_ctlr_build, &init_connect_result)) {
585 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
586 goto out;
587 }
588
589 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
590 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
591 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
592
593
594 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
595 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
596 TW_EXTENDED_INIT_CONNECT,
597 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
598 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
599 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
600 &fw_on_ctlr_branch, &fw_on_ctlr_build,
601 &init_connect_result)) {
602 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
603 goto out;
604 }
605 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
606 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
607 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
608 } else {
609 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
610 }
611 goto out;
612 }
613 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
614 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
615 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
616 }
617
618
619 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
620 sizeof(tw_dev->tw_compat_info.driver_version));
621 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
622 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
623 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
624 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
625 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
626 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
627 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
628 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
629 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
630
631 retval = 0;
632out:
633 return retval;
634}
635
636
637static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
638{
639 struct inode *inode = file_inode(file);
640 long timeout;
641 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
642 dma_addr_t dma_handle;
643 int request_id = 0;
644 unsigned int sequence_id = 0;
645 unsigned char event_index, start_index;
646 TW_Ioctl_Driver_Command driver_command;
647 TW_Ioctl_Buf_Apache *tw_ioctl;
648 TW_Lock *tw_lock;
649 TW_Command_Full *full_command_packet;
650 TW_Compatibility_Info *tw_compat_info;
651 TW_Event *event;
652 ktime_t current_time;
653 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
654 int retval = TW_IOCTL_ERROR_OS_EFAULT;
655 void __user *argp = (void __user *)arg;
656
657 mutex_lock(&twa_chrdev_mutex);
658
659
660 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
661 retval = TW_IOCTL_ERROR_OS_EINTR;
662 goto out;
663 }
664
665
666 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
667 goto out2;
668
669
670 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
671 retval = TW_IOCTL_ERROR_OS_EINVAL;
672 goto out2;
673 }
674
675
676 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
677
678
679 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
680 if (!cpu_addr) {
681 retval = TW_IOCTL_ERROR_OS_ENOMEM;
682 goto out2;
683 }
684
685 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
686
687
688 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
689 goto out3;
690
691
692 switch (cmd) {
693 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
694 spin_lock_irqsave(tw_dev->host->host_lock, flags);
695 twa_get_request_id(tw_dev, &request_id);
696
697
698 tw_dev->srb[request_id] = NULL;
699
700
701 tw_dev->chrdev_request_id = request_id;
702
703 full_command_packet = &tw_ioctl->firmware_command;
704
705
706 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
707
708 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
709
710
711 twa_post_command_packet(tw_dev, request_id, 1);
712 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
713
714 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
715
716
717 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
718
719
720 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
721
722 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
723 tw_dev->host->host_no, TW_DRIVER, 0x37,
724 cmd);
725 retval = TW_IOCTL_ERROR_OS_EIO;
726 twa_reset_device_extension(tw_dev);
727 goto out3;
728 }
729
730
731 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
732
733
734 spin_lock_irqsave(tw_dev->host->host_lock, flags);
735 tw_dev->posted_request_count--;
736 tw_dev->state[request_id] = TW_S_COMPLETED;
737 twa_free_request_id(tw_dev, request_id);
738 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
739 break;
740 case TW_IOCTL_GET_COMPATIBILITY_INFO:
741 tw_ioctl->driver_command.status = 0;
742
743 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
744 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
745 break;
746 case TW_IOCTL_GET_LAST_EVENT:
747 if (tw_dev->event_queue_wrapped) {
748 if (tw_dev->aen_clobber) {
749 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
750 tw_dev->aen_clobber = 0;
751 } else
752 tw_ioctl->driver_command.status = 0;
753 } else {
754 if (!tw_dev->error_index) {
755 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
756 break;
757 }
758 tw_ioctl->driver_command.status = 0;
759 }
760 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
761 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
762 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
763 break;
764 case TW_IOCTL_GET_FIRST_EVENT:
765 if (tw_dev->event_queue_wrapped) {
766 if (tw_dev->aen_clobber) {
767 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
768 tw_dev->aen_clobber = 0;
769 } else
770 tw_ioctl->driver_command.status = 0;
771 event_index = tw_dev->error_index;
772 } else {
773 if (!tw_dev->error_index) {
774 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
775 break;
776 }
777 tw_ioctl->driver_command.status = 0;
778 event_index = 0;
779 }
780 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
781 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
782 break;
783 case TW_IOCTL_GET_NEXT_EVENT:
784 event = (TW_Event *)tw_ioctl->data_buffer;
785 sequence_id = event->sequence_id;
786 tw_ioctl->driver_command.status = 0;
787
788 if (tw_dev->event_queue_wrapped) {
789 if (tw_dev->aen_clobber) {
790 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
791 tw_dev->aen_clobber = 0;
792 }
793 start_index = tw_dev->error_index;
794 } else {
795 if (!tw_dev->error_index) {
796 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
797 break;
798 }
799 start_index = 0;
800 }
801 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
802
803 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
804 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
805 tw_dev->aen_clobber = 1;
806 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
807 break;
808 }
809 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
810 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
811 break;
812 case TW_IOCTL_GET_PREVIOUS_EVENT:
813 event = (TW_Event *)tw_ioctl->data_buffer;
814 sequence_id = event->sequence_id;
815 tw_ioctl->driver_command.status = 0;
816
817 if (tw_dev->event_queue_wrapped) {
818 if (tw_dev->aen_clobber) {
819 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
820 tw_dev->aen_clobber = 0;
821 }
822 start_index = tw_dev->error_index;
823 } else {
824 if (!tw_dev->error_index) {
825 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
826 break;
827 }
828 start_index = 0;
829 }
830 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
831
832 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
833 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
834 tw_dev->aen_clobber = 1;
835 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
836 break;
837 }
838 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
839 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
840 break;
841 case TW_IOCTL_GET_LOCK:
842 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
843 current_time = ktime_get();
844
845 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
846 ktime_after(current_time, tw_dev->ioctl_time)) {
847 tw_dev->ioctl_sem_lock = 1;
848 tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
849 tw_ioctl->driver_command.status = 0;
850 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
851 } else {
852 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
853 tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
854 }
855 break;
856 case TW_IOCTL_RELEASE_LOCK:
857 if (tw_dev->ioctl_sem_lock == 1) {
858 tw_dev->ioctl_sem_lock = 0;
859 tw_ioctl->driver_command.status = 0;
860 } else {
861 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
862 }
863 break;
864 default:
865 retval = TW_IOCTL_ERROR_OS_ENOTTY;
866 goto out3;
867 }
868
869
870 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
871 retval = 0;
872out3:
873
874 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
875out2:
876 mutex_unlock(&tw_dev->ioctl_lock);
877out:
878 mutex_unlock(&twa_chrdev_mutex);
879 return retval;
880}
881
882
883
884static int twa_chrdev_open(struct inode *inode, struct file *file)
885{
886 unsigned int minor_number;
887 int retval = TW_IOCTL_ERROR_OS_ENODEV;
888
889 if (!capable(CAP_SYS_ADMIN)) {
890 retval = -EACCES;
891 goto out;
892 }
893
894 minor_number = iminor(inode);
895 if (minor_number >= twa_device_extension_count)
896 goto out;
897 retval = 0;
898out:
899 return retval;
900}
901
902
903static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
904{
905 int retval = 1;
906
907
908 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
909 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
910 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
911 }
912
913 if (status_reg_value & TW_STATUS_PCI_ABORT) {
914 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
915 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
916 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
917 }
918
919 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
920 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
921 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
922 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
923 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
924 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
925 }
926
927 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
928 if (tw_dev->reset_print == 0) {
929 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
930 tw_dev->reset_print = 1;
931 }
932 goto out;
933 }
934 retval = 0;
935out:
936 return retval;
937}
938
939
940static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
941{
942 u32 status_reg_value, response_que_value;
943 int count = 0, retval = 1;
944
945 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
946
947 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
948 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
949 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
950 count++;
951 }
952 if (count == TW_MAX_RESPONSE_DRAIN)
953 goto out;
954
955 retval = 0;
956out:
957 return retval;
958}
959
960
961static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
962{
963 u32 response_que_value = 0;
964 unsigned long before;
965 int retval = 1;
966
967 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
968 before = jiffies;
969 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
970 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
971 msleep(1);
972 if (time_after(jiffies, before + HZ * 30))
973 goto out;
974 }
975
976 msleep(500);
977 retval = 0;
978 } else
979 retval = 0;
980out:
981 return retval;
982}
983
984
985static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
986{
987 TW_Command_Full *full_command_packet;
988 unsigned short error;
989 int retval = 1;
990 char *error_str;
991
992 full_command_packet = tw_dev->command_packet_virt[request_id];
993
994
995 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
996
997
998 error = le16_to_cpu(full_command_packet->header.status_block.error);
999 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1000 if (print_host)
1001 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1002 tw_dev->host->host_no,
1003 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1004 full_command_packet->header.status_block.error,
1005 error_str[0] == '\0' ?
1006 twa_string_lookup(twa_error_table,
1007 full_command_packet->header.status_block.error) : error_str,
1008 full_command_packet->header.err_specific_desc);
1009 else
1010 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1011 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1012 full_command_packet->header.status_block.error,
1013 error_str[0] == '\0' ?
1014 twa_string_lookup(twa_error_table,
1015 full_command_packet->header.status_block.error) : error_str,
1016 full_command_packet->header.err_specific_desc);
1017 }
1018
1019 if (copy_sense) {
1020 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1021 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1022 retval = TW_ISR_DONT_RESULT;
1023 goto out;
1024 }
1025 retval = 0;
1026out:
1027 return retval;
1028}
1029
1030
1031static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1032{
1033 if (tw_dev->command_packet_virt[0])
1034 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1035 sizeof(TW_Command_Full) * TW_Q_LENGTH,
1036 tw_dev->command_packet_virt[0],
1037 tw_dev->command_packet_phys[0]);
1038
1039 if (tw_dev->generic_buffer_virt[0])
1040 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1041 TW_SECTOR_SIZE * TW_Q_LENGTH,
1042 tw_dev->generic_buffer_virt[0],
1043 tw_dev->generic_buffer_phys[0]);
1044
1045 kfree(tw_dev->event_queue[0]);
1046}
1047
1048
1049static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1050{
1051 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1052 tw_dev->state[request_id] = TW_S_FINISHED;
1053 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1054}
1055
1056
1057static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1058{
1059 TW_Command_Full *full_command_packet;
1060 TW_Command *command_packet;
1061 TW_Param_Apache *param;
1062 void *retval = NULL;
1063
1064
1065 full_command_packet = tw_dev->command_packet_virt[request_id];
1066 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1067 command_packet = &full_command_packet->command.oldcommand;
1068
1069 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1070 command_packet->size = TW_COMMAND_SIZE;
1071 command_packet->request_id = request_id;
1072 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1073
1074
1075 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1076 memset(param, 0, TW_SECTOR_SIZE);
1077 param->table_id = cpu_to_le16(table_id | 0x8000);
1078 param->parameter_id = cpu_to_le16(parameter_id);
1079 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1080
1081 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1082 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1083
1084
1085 twa_post_command_packet(tw_dev, request_id, 1);
1086
1087
1088 if (twa_poll_response(tw_dev, request_id, 30))
1089 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1090 else
1091 retval = (void *)&(param->data[0]);
1092
1093 tw_dev->posted_request_count--;
1094 tw_dev->state[request_id] = TW_S_INITIAL;
1095
1096 return retval;
1097}
1098
1099
1100static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1101{
1102 *request_id = tw_dev->free_queue[tw_dev->free_head];
1103 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1104 tw_dev->state[*request_id] = TW_S_STARTED;
1105}
1106
1107
1108static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1109 u32 set_features, unsigned short current_fw_srl,
1110 unsigned short current_fw_arch_id,
1111 unsigned short current_fw_branch,
1112 unsigned short current_fw_build,
1113 unsigned short *fw_on_ctlr_srl,
1114 unsigned short *fw_on_ctlr_arch_id,
1115 unsigned short *fw_on_ctlr_branch,
1116 unsigned short *fw_on_ctlr_build,
1117 u32 *init_connect_result)
1118{
1119 TW_Command_Full *full_command_packet;
1120 TW_Initconnect *tw_initconnect;
1121 int request_id = 0, retval = 1;
1122
1123
1124 full_command_packet = tw_dev->command_packet_virt[request_id];
1125 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1126 full_command_packet->header.header_desc.size_header = 128;
1127
1128 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1129 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1130 tw_initconnect->request_id = request_id;
1131 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1132 tw_initconnect->features = set_features;
1133
1134
1135 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1136
1137 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1138
1139 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1140 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1141 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1142 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1143 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1144 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1145 } else
1146 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1147
1148
1149 twa_post_command_packet(tw_dev, request_id, 1);
1150
1151
1152 if (twa_poll_response(tw_dev, request_id, 30)) {
1153 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1154 } else {
1155 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1156 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1157 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1158 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1159 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1160 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1161 }
1162 retval = 0;
1163 }
1164
1165 tw_dev->posted_request_count--;
1166 tw_dev->state[request_id] = TW_S_INITIAL;
1167
1168 return retval;
1169}
1170
1171
1172static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1173{
1174 int i, retval = 1;
1175
1176
1177 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1178 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1179 goto out;
1180 }
1181
1182
1183 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1184 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1185 goto out;
1186 }
1187
1188
1189 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1190 if (!tw_dev->event_queue[0]) {
1191 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1192 goto out;
1193 }
1194
1195
1196 for (i = 0; i < TW_Q_LENGTH; i++) {
1197 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1198 tw_dev->free_queue[i] = i;
1199 tw_dev->state[i] = TW_S_INITIAL;
1200 }
1201
1202 tw_dev->pending_head = TW_Q_START;
1203 tw_dev->pending_tail = TW_Q_START;
1204 tw_dev->free_head = TW_Q_START;
1205 tw_dev->free_tail = TW_Q_START;
1206 tw_dev->error_sequence_id = 1;
1207 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1208
1209 mutex_init(&tw_dev->ioctl_lock);
1210 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1211
1212 retval = 0;
1213out:
1214 return retval;
1215}
1216
1217
1218static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1219{
1220 int request_id, error = 0;
1221 u32 status_reg_value;
1222 TW_Response_Queue response_que;
1223 TW_Command_Full *full_command_packet;
1224 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1225 int handled = 0;
1226
1227
1228 spin_lock(tw_dev->host->host_lock);
1229
1230
1231 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1232
1233
1234 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1235 goto twa_interrupt_bail;
1236
1237 handled = 1;
1238
1239
1240 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1241 goto twa_interrupt_bail;
1242
1243
1244 if (twa_check_bits(status_reg_value)) {
1245 if (twa_decode_bits(tw_dev, status_reg_value)) {
1246 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1247 goto twa_interrupt_bail;
1248 }
1249 }
1250
1251
1252 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1253 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1254
1255
1256 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1257 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1258 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1259 twa_get_request_id(tw_dev, &request_id);
1260
1261 error = twa_aen_read_queue(tw_dev, request_id);
1262 if (error) {
1263 tw_dev->state[request_id] = TW_S_COMPLETED;
1264 twa_free_request_id(tw_dev, request_id);
1265 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1266 }
1267 }
1268 }
1269
1270
1271 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1272 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1273
1274 while (tw_dev->pending_request_count > 0) {
1275 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1276 if (tw_dev->state[request_id] != TW_S_PENDING) {
1277 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1278 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1279 goto twa_interrupt_bail;
1280 }
1281 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1282 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1283 tw_dev->pending_request_count--;
1284 } else {
1285
1286 break;
1287 }
1288 }
1289 }
1290
1291
1292 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1293
1294
1295 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1296
1297 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1298 request_id = TW_RESID_OUT(response_que.response_id);
1299 full_command_packet = tw_dev->command_packet_virt[request_id];
1300 error = 0;
1301
1302 if (full_command_packet->command.newcommand.status != 0) {
1303 if (tw_dev->srb[request_id] != NULL) {
1304 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1305 } else {
1306
1307 if (request_id != tw_dev->chrdev_request_id) {
1308 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1309 }
1310 }
1311 }
1312
1313
1314 if (tw_dev->state[request_id] != TW_S_POSTED) {
1315 if (tw_dev->srb[request_id] != NULL) {
1316 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1317 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1318 goto twa_interrupt_bail;
1319 }
1320 }
1321
1322
1323 if (tw_dev->srb[request_id] == NULL) {
1324 if (request_id != tw_dev->chrdev_request_id) {
1325 if (twa_aen_complete(tw_dev, request_id))
1326 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1327 } else {
1328 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1329 wake_up(&tw_dev->ioctl_wqueue);
1330 }
1331 } else {
1332 struct scsi_cmnd *cmd;
1333
1334 cmd = tw_dev->srb[request_id];
1335
1336 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1337
1338 if (error == 0) {
1339 cmd->result = (DID_OK << 16);
1340 }
1341
1342
1343 if (error == 1) {
1344
1345 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1346 }
1347
1348
1349 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1350 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1351 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1352 }
1353
1354
1355 if (twa_command_mapped(cmd))
1356 scsi_dma_unmap(cmd);
1357 cmd->scsi_done(cmd);
1358 tw_dev->state[request_id] = TW_S_COMPLETED;
1359 twa_free_request_id(tw_dev, request_id);
1360 tw_dev->posted_request_count--;
1361 }
1362
1363
1364 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1365 if (twa_check_bits(status_reg_value)) {
1366 if (twa_decode_bits(tw_dev, status_reg_value)) {
1367 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1368 goto twa_interrupt_bail;
1369 }
1370 }
1371 }
1372 }
1373
1374twa_interrupt_bail:
1375 spin_unlock(tw_dev->host->host_lock);
1376 return IRQ_RETVAL(handled);
1377}
1378
1379
1380static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1381{
1382 TW_Command *oldcommand;
1383 TW_Command_Apache *newcommand;
1384 TW_SG_Entry *sgl;
1385 unsigned int pae = 0;
1386
1387 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1388 pae = 1;
1389
1390 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1391 newcommand = &full_command_packet->command.newcommand;
1392 newcommand->request_id__lunl =
1393 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1394 if (length) {
1395 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1396 newcommand->sg_list[0].length = cpu_to_le32(length);
1397 }
1398 newcommand->sgl_entries__lunh =
1399 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1400 } else {
1401 oldcommand = &full_command_packet->command.oldcommand;
1402 oldcommand->request_id = request_id;
1403
1404 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1405
1406 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1407 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1408 else
1409 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1410 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1411 sgl->length = cpu_to_le32(length);
1412
1413 oldcommand->size += pae;
1414 }
1415 }
1416}
1417
1418
1419static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1420{
1421 int retval = 1, found = 0, response_request_id;
1422 TW_Response_Queue response_queue;
1423 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1424
1425 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1426 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1427 response_request_id = TW_RESID_OUT(response_queue.response_id);
1428 if (request_id != response_request_id) {
1429 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1430 goto out;
1431 }
1432 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1433 if (full_command_packet->command.newcommand.status != 0) {
1434
1435 twa_fill_sense(tw_dev, request_id, 0, 0);
1436 goto out;
1437 }
1438 found = 1;
1439 } else {
1440 if (full_command_packet->command.oldcommand.status != 0) {
1441
1442 twa_fill_sense(tw_dev, request_id, 0, 0);
1443 goto out;
1444 }
1445 found = 1;
1446 }
1447 }
1448
1449 if (found)
1450 retval = 0;
1451out:
1452 return retval;
1453}
1454
1455
1456static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1457{
1458 u32 status_reg_value;
1459 unsigned long before;
1460 int retval = 1;
1461
1462 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1463 before = jiffies;
1464
1465 if (twa_check_bits(status_reg_value))
1466 twa_decode_bits(tw_dev, status_reg_value);
1467
1468 while ((status_reg_value & flag) != flag) {
1469 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1470
1471 if (twa_check_bits(status_reg_value))
1472 twa_decode_bits(tw_dev, status_reg_value);
1473
1474 if (time_after(jiffies, before + HZ * seconds))
1475 goto out;
1476
1477 msleep(50);
1478 }
1479 retval = 0;
1480out:
1481 return retval;
1482}
1483
1484
1485static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1486{
1487 u32 status_reg_value;
1488 unsigned long before;
1489 int retval = 1;
1490
1491 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1492 before = jiffies;
1493
1494 if (twa_check_bits(status_reg_value))
1495 twa_decode_bits(tw_dev, status_reg_value);
1496
1497 while ((status_reg_value & flag) != 0) {
1498 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1499 if (twa_check_bits(status_reg_value))
1500 twa_decode_bits(tw_dev, status_reg_value);
1501
1502 if (time_after(jiffies, before + HZ * seconds))
1503 goto out;
1504
1505 msleep(50);
1506 }
1507 retval = 0;
1508out:
1509 return retval;
1510}
1511
1512
1513static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1514{
1515 u32 status_reg_value;
1516 dma_addr_t command_que_value;
1517 int retval = 1;
1518
1519 command_que_value = tw_dev->command_packet_phys[request_id];
1520
1521
1522 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1523 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1524 command_que_value += TW_COMMAND_OFFSET;
1525 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1526 }
1527
1528 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1529
1530 if (twa_check_bits(status_reg_value))
1531 twa_decode_bits(tw_dev, status_reg_value);
1532
1533 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1534
1535
1536 if (!internal) {
1537 retval = SCSI_MLQUEUE_HOST_BUSY;
1538 goto out;
1539 }
1540
1541
1542 if (tw_dev->state[request_id] != TW_S_PENDING) {
1543 tw_dev->state[request_id] = TW_S_PENDING;
1544 tw_dev->pending_request_count++;
1545 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1546 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1547 }
1548 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1549 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1550 }
1551 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1552 goto out;
1553 } else {
1554 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1555 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1556
1557 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1558 } else {
1559 if (sizeof(dma_addr_t) > 4) {
1560 command_que_value += TW_COMMAND_OFFSET;
1561 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1562 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1563 } else {
1564 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1565 }
1566 }
1567 tw_dev->state[request_id] = TW_S_POSTED;
1568 tw_dev->posted_request_count++;
1569 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1570 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1571 }
1572 }
1573 retval = 0;
1574out:
1575 return retval;
1576}
1577
1578
1579static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1580{
1581 int i = 0;
1582 int retval = 1;
1583 unsigned long flags = 0;
1584
1585 set_bit(TW_IN_RESET, &tw_dev->flags);
1586 TW_DISABLE_INTERRUPTS(tw_dev);
1587 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1588 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1589
1590
1591 for (i = 0; i < TW_Q_LENGTH; i++) {
1592 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1593 (tw_dev->state[i] != TW_S_INITIAL) &&
1594 (tw_dev->state[i] != TW_S_COMPLETED)) {
1595 if (tw_dev->srb[i]) {
1596 struct scsi_cmnd *cmd = tw_dev->srb[i];
1597
1598 cmd->result = (DID_RESET << 16);
1599 if (twa_command_mapped(cmd))
1600 scsi_dma_unmap(cmd);
1601 cmd->scsi_done(cmd);
1602 }
1603 }
1604 }
1605
1606
1607 for (i = 0; i < TW_Q_LENGTH; i++) {
1608 tw_dev->free_queue[i] = i;
1609 tw_dev->state[i] = TW_S_INITIAL;
1610 }
1611 tw_dev->free_head = TW_Q_START;
1612 tw_dev->free_tail = TW_Q_START;
1613 tw_dev->posted_request_count = 0;
1614 tw_dev->pending_request_count = 0;
1615 tw_dev->pending_head = TW_Q_START;
1616 tw_dev->pending_tail = TW_Q_START;
1617 tw_dev->reset_print = 0;
1618
1619 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1620
1621 if (twa_reset_sequence(tw_dev, 1))
1622 goto out;
1623
1624 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1625 clear_bit(TW_IN_RESET, &tw_dev->flags);
1626 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1627
1628 retval = 0;
1629out:
1630 return retval;
1631}
1632
1633
1634static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1635{
1636 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1637
1638 while (tries < TW_MAX_RESET_TRIES) {
1639 if (do_soft_reset) {
1640 TW_SOFT_RESET(tw_dev);
1641
1642 if (twa_empty_response_queue_large(tw_dev)) {
1643 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1644 do_soft_reset = 1;
1645 tries++;
1646 continue;
1647 }
1648 }
1649
1650
1651 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1652 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1653 do_soft_reset = 1;
1654 tries++;
1655 continue;
1656 }
1657
1658
1659 if (twa_empty_response_queue(tw_dev)) {
1660 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1661 do_soft_reset = 1;
1662 tries++;
1663 continue;
1664 }
1665
1666 flashed = 0;
1667
1668
1669 if (twa_check_srl(tw_dev, &flashed)) {
1670 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1671 do_soft_reset = 1;
1672 tries++;
1673 continue;
1674 } else {
1675 if (flashed) {
1676 tries++;
1677 continue;
1678 }
1679 }
1680
1681
1682 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1683 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1684 do_soft_reset = 1;
1685 tries++;
1686 continue;
1687 }
1688
1689
1690 retval = 0;
1691 goto out;
1692 }
1693out:
1694 return retval;
1695}
1696
1697
1698static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1699{
1700 int heads, sectors, cylinders;
1701 TW_Device_Extension *tw_dev;
1702
1703 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1704
1705 if (capacity >= 0x200000) {
1706 heads = 255;
1707 sectors = 63;
1708 cylinders = sector_div(capacity, heads * sectors);
1709 } else {
1710 heads = 64;
1711 sectors = 32;
1712 cylinders = sector_div(capacity, heads * sectors);
1713 }
1714
1715 geom[0] = heads;
1716 geom[1] = sectors;
1717 geom[2] = cylinders;
1718
1719 return 0;
1720}
1721
1722
1723static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1724{
1725 TW_Device_Extension *tw_dev = NULL;
1726 int retval = FAILED;
1727
1728 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1729
1730 tw_dev->num_resets++;
1731
1732 sdev_printk(KERN_WARNING, SCpnt->device,
1733 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1734 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1735
1736
1737 mutex_lock(&tw_dev->ioctl_lock);
1738
1739
1740 if (twa_reset_device_extension(tw_dev)) {
1741 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1742 goto out;
1743 }
1744
1745 retval = SUCCESS;
1746out:
1747 mutex_unlock(&tw_dev->ioctl_lock);
1748 return retval;
1749}
1750
1751
1752static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1753{
1754 int request_id, retval;
1755 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1756
1757
1758 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1759 retval = SCSI_MLQUEUE_HOST_BUSY;
1760 goto out;
1761 }
1762
1763
1764 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1765 SCpnt->result = (DID_BAD_TARGET << 16);
1766 done(SCpnt);
1767 retval = 0;
1768 goto out;
1769 }
1770
1771
1772 SCpnt->scsi_done = done;
1773
1774
1775 twa_get_request_id(tw_dev, &request_id);
1776
1777
1778 tw_dev->srb[request_id] = SCpnt;
1779
1780 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1781 switch (retval) {
1782 case SCSI_MLQUEUE_HOST_BUSY:
1783 if (twa_command_mapped(SCpnt))
1784 scsi_dma_unmap(SCpnt);
1785 twa_free_request_id(tw_dev, request_id);
1786 break;
1787 case 1:
1788 SCpnt->result = (DID_ERROR << 16);
1789 if (twa_command_mapped(SCpnt))
1790 scsi_dma_unmap(SCpnt);
1791 done(SCpnt);
1792 tw_dev->state[request_id] = TW_S_COMPLETED;
1793 twa_free_request_id(tw_dev, request_id);
1794 retval = 0;
1795 }
1796out:
1797 return retval;
1798}
1799
1800static DEF_SCSI_QCMD(twa_scsi_queue)
1801
1802
1803static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1804 unsigned char *cdb, int use_sg,
1805 TW_SG_Entry *sglistarg)
1806{
1807 TW_Command_Full *full_command_packet;
1808 TW_Command_Apache *command_packet;
1809 u32 num_sectors = 0x0;
1810 int i, sg_count;
1811 struct scsi_cmnd *srb = NULL;
1812 struct scatterlist *sglist = NULL, *sg;
1813 int retval = 1;
1814
1815 if (tw_dev->srb[request_id]) {
1816 srb = tw_dev->srb[request_id];
1817 if (scsi_sglist(srb))
1818 sglist = scsi_sglist(srb);
1819 }
1820
1821
1822 full_command_packet = tw_dev->command_packet_virt[request_id];
1823 full_command_packet->header.header_desc.size_header = 128;
1824 full_command_packet->header.status_block.error = 0;
1825 full_command_packet->header.status_block.severity__reserved = 0;
1826
1827 command_packet = &full_command_packet->command.newcommand;
1828 command_packet->status = 0;
1829 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1830
1831
1832 if (!cdb)
1833 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1834 else
1835 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1836
1837 if (srb) {
1838 command_packet->unit = srb->device->id;
1839 command_packet->request_id__lunl =
1840 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1841 } else {
1842 command_packet->request_id__lunl =
1843 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1844 command_packet->unit = 0;
1845 }
1846
1847 command_packet->sgl_offset = 16;
1848
1849 if (!sglistarg) {
1850
1851
1852 if (scsi_sg_count(srb)) {
1853 if (!twa_command_mapped(srb)) {
1854 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1855 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1856 scsi_sg_copy_to_buffer(srb,
1857 tw_dev->generic_buffer_virt[request_id],
1858 TW_SECTOR_SIZE);
1859 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1860 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1861 } else {
1862 sg_count = scsi_dma_map(srb);
1863 if (sg_count < 0)
1864 goto out;
1865
1866 scsi_for_each_sg(srb, sg, sg_count, i) {
1867 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1868 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1869 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1870 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1871 goto out;
1872 }
1873 }
1874 }
1875 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1876 }
1877 } else {
1878
1879 for (i = 0; i < use_sg; i++) {
1880 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1881 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1882 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1883 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1884 goto out;
1885 }
1886 }
1887 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1888 }
1889
1890 if (srb) {
1891 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1892 num_sectors = (u32)srb->cmnd[4];
1893
1894 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1895 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1896 }
1897
1898
1899 tw_dev->sector_count = num_sectors;
1900 if (tw_dev->sector_count > tw_dev->max_sector_count)
1901 tw_dev->max_sector_count = tw_dev->sector_count;
1902
1903
1904 if (srb) {
1905 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1906 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1907 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1908 }
1909
1910
1911 if (srb) {
1912 retval = twa_post_command_packet(tw_dev, request_id, 0);
1913 } else {
1914 twa_post_command_packet(tw_dev, request_id, 1);
1915 retval = 0;
1916 }
1917out:
1918 return retval;
1919}
1920
1921
1922static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1923{
1924 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1925
1926 if (!twa_command_mapped(cmd) &&
1927 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1928 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1929 if (scsi_sg_count(cmd) == 1) {
1930 void *buf = tw_dev->generic_buffer_virt[request_id];
1931
1932 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1933 }
1934 }
1935}
1936
1937
1938static void __twa_shutdown(TW_Device_Extension *tw_dev)
1939{
1940
1941 TW_DISABLE_INTERRUPTS(tw_dev);
1942
1943
1944 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1945
1946 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1947
1948
1949 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1950 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1951 } else {
1952 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1953 }
1954
1955
1956 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1957}
1958
1959
1960static void twa_shutdown(struct pci_dev *pdev)
1961{
1962 struct Scsi_Host *host = pci_get_drvdata(pdev);
1963 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1964
1965 __twa_shutdown(tw_dev);
1966}
1967
1968
1969static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1970{
1971 int index;
1972
1973 for (index = 0; ((code != table[index].code) &&
1974 (table[index].text != (char *)0)); index++);
1975 return(table[index].text);
1976}
1977
1978
1979static int twa_slave_configure(struct scsi_device *sdev)
1980{
1981
1982 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1983
1984 return 0;
1985}
1986
1987
1988static struct scsi_host_template driver_template = {
1989 .module = THIS_MODULE,
1990 .name = "3ware 9000 Storage Controller",
1991 .queuecommand = twa_scsi_queue,
1992 .eh_host_reset_handler = twa_scsi_eh_reset,
1993 .bios_param = twa_scsi_biosparam,
1994 .change_queue_depth = scsi_change_queue_depth,
1995 .can_queue = TW_Q_LENGTH-2,
1996 .slave_configure = twa_slave_configure,
1997 .this_id = -1,
1998 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1999 .max_sectors = TW_MAX_SECTORS,
2000 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2001 .shost_attrs = twa_host_attrs,
2002 .emulated = 1,
2003 .no_write_same = 1,
2004};
2005
2006
2007static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2008{
2009 struct Scsi_Host *host = NULL;
2010 TW_Device_Extension *tw_dev;
2011 unsigned long mem_addr, mem_len;
2012 int retval;
2013
2014 retval = pci_enable_device(pdev);
2015 if (retval) {
2016 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2017 goto out_disable_device;
2018 }
2019
2020 pci_set_master(pdev);
2021 pci_try_set_mwi(pdev);
2022
2023 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2024 if (retval)
2025 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2026 if (retval) {
2027 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2028 retval = -ENODEV;
2029 goto out_disable_device;
2030 }
2031
2032 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2033 if (!host) {
2034 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2035 retval = -ENOMEM;
2036 goto out_disable_device;
2037 }
2038 tw_dev = (TW_Device_Extension *)host->hostdata;
2039
2040
2041 tw_dev->host = host;
2042 tw_dev->tw_pci_dev = pdev;
2043
2044 if (twa_initialize_device_extension(tw_dev)) {
2045 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2046 retval = -ENOMEM;
2047 goto out_free_device_extension;
2048 }
2049
2050
2051 retval = pci_request_regions(pdev, "3w-9xxx");
2052 if (retval) {
2053 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2054 goto out_free_device_extension;
2055 }
2056
2057 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2058 mem_addr = pci_resource_start(pdev, 1);
2059 mem_len = pci_resource_len(pdev, 1);
2060 } else {
2061 mem_addr = pci_resource_start(pdev, 2);
2062 mem_len = pci_resource_len(pdev, 2);
2063 }
2064
2065
2066 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2067 if (!tw_dev->base_addr) {
2068 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2069 retval = -ENOMEM;
2070 goto out_release_mem_region;
2071 }
2072
2073
2074 TW_DISABLE_INTERRUPTS(tw_dev);
2075
2076
2077 if (twa_reset_sequence(tw_dev, 0)) {
2078 retval = -ENOMEM;
2079 goto out_iounmap;
2080 }
2081
2082
2083 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2084 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2085 host->max_id = TW_MAX_UNITS_9650SE;
2086 else
2087 host->max_id = TW_MAX_UNITS;
2088
2089 host->max_cmd_len = TW_MAX_CDB_LEN;
2090
2091
2092 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2093 host->max_channel = 0;
2094
2095
2096 retval = scsi_add_host(host, &pdev->dev);
2097 if (retval) {
2098 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2099 goto out_iounmap;
2100 }
2101
2102 pci_set_drvdata(pdev, host);
2103
2104 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2105 host->host_no, mem_addr, pdev->irq);
2106 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2107 host->host_no,
2108 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2109 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2110 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2111 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2112 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2113 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2114
2115
2116 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2117 !pci_enable_msi(pdev))
2118 set_bit(TW_USING_MSI, &tw_dev->flags);
2119
2120
2121 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2122 if (retval) {
2123 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2124 goto out_remove_host;
2125 }
2126
2127 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2128 twa_device_extension_count++;
2129
2130
2131 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2132
2133
2134 scsi_scan_host(host);
2135
2136 if (twa_major == -1) {
2137 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2138 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2139 }
2140 return 0;
2141
2142out_remove_host:
2143 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2144 pci_disable_msi(pdev);
2145 scsi_remove_host(host);
2146out_iounmap:
2147 iounmap(tw_dev->base_addr);
2148out_release_mem_region:
2149 pci_release_regions(pdev);
2150out_free_device_extension:
2151 twa_free_device_extension(tw_dev);
2152 scsi_host_put(host);
2153out_disable_device:
2154 pci_disable_device(pdev);
2155
2156 return retval;
2157}
2158
2159
2160static void twa_remove(struct pci_dev *pdev)
2161{
2162 struct Scsi_Host *host = pci_get_drvdata(pdev);
2163 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2164
2165 scsi_remove_host(tw_dev->host);
2166
2167
2168 if (twa_major >= 0) {
2169 unregister_chrdev(twa_major, "twa");
2170 twa_major = -1;
2171 }
2172
2173
2174 __twa_shutdown(tw_dev);
2175
2176
2177 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2178 pci_disable_msi(pdev);
2179
2180
2181 iounmap(tw_dev->base_addr);
2182
2183
2184 pci_release_regions(pdev);
2185
2186
2187 twa_free_device_extension(tw_dev);
2188
2189 scsi_host_put(tw_dev->host);
2190 pci_disable_device(pdev);
2191 twa_device_extension_count--;
2192}
2193
2194#ifdef CONFIG_PM
2195
2196static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2197{
2198 struct Scsi_Host *host = pci_get_drvdata(pdev);
2199 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2200
2201 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2202
2203 TW_DISABLE_INTERRUPTS(tw_dev);
2204 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2205
2206 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2207 pci_disable_msi(pdev);
2208
2209
2210 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2211 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2212 } else {
2213 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2214 }
2215 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2216
2217 pci_save_state(pdev);
2218 pci_disable_device(pdev);
2219 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2220
2221 return 0;
2222}
2223
2224
2225static int twa_resume(struct pci_dev *pdev)
2226{
2227 int retval = 0;
2228 struct Scsi_Host *host = pci_get_drvdata(pdev);
2229 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2230
2231 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2232 pci_set_power_state(pdev, PCI_D0);
2233 pci_enable_wake(pdev, PCI_D0, 0);
2234 pci_restore_state(pdev);
2235
2236 retval = pci_enable_device(pdev);
2237 if (retval) {
2238 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2239 return retval;
2240 }
2241
2242 pci_set_master(pdev);
2243 pci_try_set_mwi(pdev);
2244
2245 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2246 if (retval)
2247 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2248 if (retval) {
2249 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2250 retval = -ENODEV;
2251 goto out_disable_device;
2252 }
2253
2254
2255 if (twa_reset_sequence(tw_dev, 0)) {
2256 retval = -ENODEV;
2257 goto out_disable_device;
2258 }
2259
2260
2261 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2262 if (retval) {
2263 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2264 retval = -ENODEV;
2265 goto out_disable_device;
2266 }
2267
2268
2269 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2270 pci_enable_msi(pdev);
2271
2272
2273 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2274
2275 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2276 return 0;
2277
2278out_disable_device:
2279 scsi_remove_host(host);
2280 pci_disable_device(pdev);
2281
2282 return retval;
2283}
2284#endif
2285
2286
2287static struct pci_device_id twa_pci_tbl[] = {
2288 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2289 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2290 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2291 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2292 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2293 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2294 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2295 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2296 { }
2297};
2298MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2299
2300
2301static struct pci_driver twa_driver = {
2302 .name = "3w-9xxx",
2303 .id_table = twa_pci_tbl,
2304 .probe = twa_probe,
2305 .remove = twa_remove,
2306#ifdef CONFIG_PM
2307 .suspend = twa_suspend,
2308 .resume = twa_resume,
2309#endif
2310 .shutdown = twa_shutdown
2311};
2312
2313
2314static int __init twa_init(void)
2315{
2316 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2317
2318 return pci_register_driver(&twa_driver);
2319}
2320
2321
2322static void __exit twa_exit(void)
2323{
2324 pci_unregister_driver(&twa_driver);
2325}
2326
2327module_init(twa_init);
2328module_exit(twa_exit);
2329
2330