1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84#include <linux/module.h>
85#include <linux/reboot.h>
86#include <linux/spinlock.h>
87#include <linux/interrupt.h>
88#include <linux/moduleparam.h>
89#include <linux/errno.h>
90#include <linux/types.h>
91#include <linux/delay.h>
92#include <linux/pci.h>
93#include <linux/time.h>
94#include <linux/mutex.h>
95#include <linux/slab.h>
96#include <asm/io.h>
97#include <asm/irq.h>
98#include <asm/uaccess.h>
99#include <scsi/scsi.h>
100#include <scsi/scsi_host.h>
101#include <scsi/scsi_tcq.h>
102#include <scsi/scsi_cmnd.h>
103#include "3w-9xxx.h"
104
105
106#define TW_DRIVER_VERSION "2.26.02.014"
107static DEFINE_MUTEX(twa_chrdev_mutex);
108static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
109static unsigned int twa_device_extension_count;
110static int twa_major = -1;
111extern struct timezone sys_tz;
112
113
114MODULE_AUTHOR ("LSI");
115MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
116MODULE_LICENSE("GPL");
117MODULE_VERSION(TW_DRIVER_VERSION);
118
119static int use_msi = 0;
120module_param(use_msi, int, S_IRUGO);
121MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
122
123
124static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
125static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126static char *twa_aen_severity_lookup(unsigned char severity_code);
127static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129static int twa_chrdev_open(struct inode *inode, struct file *file);
130static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
132static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
133static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
134 u32 set_features, unsigned short current_fw_srl,
135 unsigned short current_fw_arch_id,
136 unsigned short current_fw_branch,
137 unsigned short current_fw_build,
138 unsigned short *fw_on_ctlr_srl,
139 unsigned short *fw_on_ctlr_arch_id,
140 unsigned short *fw_on_ctlr_branch,
141 unsigned short *fw_on_ctlr_build,
142 u32 *init_connect_result);
143static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
144static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
145static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
146static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
147static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
148static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152
153
154
155
156static ssize_t twa_show_stats(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 struct Scsi_Host *host = class_to_shost(dev);
160 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
161 unsigned long flags = 0;
162 ssize_t len;
163
164 spin_lock_irqsave(tw_dev->host->host_lock, flags);
165 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
166 "Current commands posted: %4d\n"
167 "Max commands posted: %4d\n"
168 "Current pending commands: %4d\n"
169 "Max pending commands: %4d\n"
170 "Last sgl length: %4d\n"
171 "Max sgl length: %4d\n"
172 "Last sector count: %4d\n"
173 "Max sector count: %4d\n"
174 "SCSI Host Resets: %4d\n"
175 "AEN's: %4d\n",
176 TW_DRIVER_VERSION,
177 tw_dev->posted_request_count,
178 tw_dev->max_posted_request_count,
179 tw_dev->pending_request_count,
180 tw_dev->max_pending_request_count,
181 tw_dev->sgl_entries,
182 tw_dev->max_sgl_entries,
183 tw_dev->sector_count,
184 tw_dev->max_sector_count,
185 tw_dev->num_resets,
186 tw_dev->aen_count);
187 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
188 return len;
189}
190
191
192static struct device_attribute twa_host_stats_attr = {
193 .attr = {
194 .name = "stats",
195 .mode = S_IRUGO,
196 },
197 .show = twa_show_stats
198};
199
200
201static struct device_attribute *twa_host_attrs[] = {
202 &twa_host_stats_attr,
203 NULL,
204};
205
206
207static const struct file_operations twa_fops = {
208 .owner = THIS_MODULE,
209 .unlocked_ioctl = twa_chrdev_ioctl,
210 .open = twa_chrdev_open,
211 .release = NULL,
212 .llseek = noop_llseek,
213};
214
215
216
217
218
219
220static bool twa_command_mapped(struct scsi_cmnd *cmd)
221{
222 return scsi_sg_count(cmd) != 1 ||
223 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
224}
225
226
227static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
228{
229 TW_Command_Full *full_command_packet;
230 TW_Command *command_packet;
231 TW_Command_Apache_Header *header;
232 unsigned short aen;
233 int retval = 1;
234
235 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
236 tw_dev->posted_request_count--;
237 aen = le16_to_cpu(header->status_block.error);
238 full_command_packet = tw_dev->command_packet_virt[request_id];
239 command_packet = &full_command_packet->command.oldcommand;
240
241
242 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
243
244 if (twa_aen_read_queue(tw_dev, request_id))
245 goto out2;
246 else {
247 retval = 0;
248 goto out;
249 }
250 }
251
252 switch (aen) {
253 case TW_AEN_QUEUE_EMPTY:
254
255 break;
256 case TW_AEN_SYNC_TIME_WITH_HOST:
257 twa_aen_sync_time(tw_dev, request_id);
258 retval = 0;
259 goto out;
260 default:
261 twa_aen_queue_event(tw_dev, header);
262
263
264 if (twa_aen_read_queue(tw_dev, request_id))
265 goto out2;
266 else {
267 retval = 0;
268 goto out;
269 }
270 }
271 retval = 0;
272out2:
273 tw_dev->state[request_id] = TW_S_COMPLETED;
274 twa_free_request_id(tw_dev, request_id);
275 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
276out:
277 return retval;
278}
279
280
281static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
282{
283 int request_id = 0;
284 char cdb[TW_MAX_CDB_LEN];
285 TW_SG_Entry sglist[1];
286 int finished = 0, count = 0;
287 TW_Command_Full *full_command_packet;
288 TW_Command_Apache_Header *header;
289 unsigned short aen;
290 int first_reset = 0, queue = 0, retval = 1;
291
292 if (no_check_reset)
293 first_reset = 0;
294 else
295 first_reset = 1;
296
297 full_command_packet = tw_dev->command_packet_virt[request_id];
298 memset(full_command_packet, 0, sizeof(TW_Command_Full));
299
300
301 memset(&cdb, 0, TW_MAX_CDB_LEN);
302 cdb[0] = REQUEST_SENSE;
303 cdb[4] = TW_ALLOCATION_LENGTH;
304
305
306 memset(&sglist, 0, sizeof(TW_SG_Entry));
307 sglist[0].length = TW_SECTOR_SIZE;
308 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
309
310 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
311 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
312 goto out;
313 }
314
315
316 tw_dev->srb[request_id] = NULL;
317
318 do {
319
320 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
321 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
322 goto out;
323 }
324
325
326 if (twa_poll_response(tw_dev, request_id, 30)) {
327 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
328 tw_dev->posted_request_count--;
329 goto out;
330 }
331
332 tw_dev->posted_request_count--;
333 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
334 aen = le16_to_cpu(header->status_block.error);
335 queue = 0;
336 count++;
337
338 switch (aen) {
339 case TW_AEN_QUEUE_EMPTY:
340 if (first_reset != 1)
341 goto out;
342 else
343 finished = 1;
344 break;
345 case TW_AEN_SOFT_RESET:
346 if (first_reset == 0)
347 first_reset = 1;
348 else
349 queue = 1;
350 break;
351 case TW_AEN_SYNC_TIME_WITH_HOST:
352 break;
353 default:
354 queue = 1;
355 }
356
357
358 if (queue)
359 twa_aen_queue_event(tw_dev, header);
360 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
361
362 if (count == TW_MAX_AEN_DRAIN)
363 goto out;
364
365 retval = 0;
366out:
367 tw_dev->state[request_id] = TW_S_INITIAL;
368 return retval;
369}
370
371
372static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
373{
374 u32 local_time;
375 struct timeval time;
376 TW_Event *event;
377 unsigned short aen;
378 char host[16];
379 char *error_str;
380
381 tw_dev->aen_count++;
382
383
384 event = tw_dev->event_queue[tw_dev->error_index];
385
386
387 host[0] = '\0';
388 if (tw_dev->host) {
389 sprintf(host, " scsi%d:", tw_dev->host->host_no);
390 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
391 tw_dev->aen_clobber = 1;
392 }
393
394 aen = le16_to_cpu(header->status_block.error);
395 memset(event, 0, sizeof(TW_Event));
396
397 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
398 do_gettimeofday(&time);
399 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
400 event->time_stamp_sec = local_time;
401 event->aen_code = aen;
402 event->retrieved = TW_AEN_NOT_RETRIEVED;
403 event->sequence_id = tw_dev->error_sequence_id;
404 tw_dev->error_sequence_id++;
405
406
407 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
408
409 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
410 event->parameter_len = strlen(header->err_specific_desc);
411 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
412 if (event->severity != TW_AEN_SEVERITY_DEBUG)
413 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
414 host,
415 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
416 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
417 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
418 header->err_specific_desc);
419 else
420 tw_dev->aen_count--;
421
422 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
423 tw_dev->event_queue_wrapped = 1;
424 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
425}
426
427
428static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
429{
430 char cdb[TW_MAX_CDB_LEN];
431 TW_SG_Entry sglist[1];
432 TW_Command_Full *full_command_packet;
433 int retval = 1;
434
435 full_command_packet = tw_dev->command_packet_virt[request_id];
436 memset(full_command_packet, 0, sizeof(TW_Command_Full));
437
438
439 memset(&cdb, 0, TW_MAX_CDB_LEN);
440 cdb[0] = REQUEST_SENSE;
441 cdb[4] = TW_ALLOCATION_LENGTH;
442
443
444 memset(&sglist, 0, sizeof(TW_SG_Entry));
445 sglist[0].length = TW_SECTOR_SIZE;
446 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
447
448
449 tw_dev->srb[request_id] = NULL;
450
451
452 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
453 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
454 goto out;
455 }
456 retval = 0;
457out:
458 return retval;
459}
460
461
462static char *twa_aen_severity_lookup(unsigned char severity_code)
463{
464 char *retval = NULL;
465
466 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
467 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
468 goto out;
469
470 retval = twa_aen_severity_table[severity_code];
471out:
472 return retval;
473}
474
475
476static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
477{
478 u32 schedulertime;
479 struct timeval utc;
480 TW_Command_Full *full_command_packet;
481 TW_Command *command_packet;
482 TW_Param_Apache *param;
483 u32 local_time;
484
485
486 full_command_packet = tw_dev->command_packet_virt[request_id];
487 memset(full_command_packet, 0, sizeof(TW_Command_Full));
488 command_packet = &full_command_packet->command.oldcommand;
489 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
490 command_packet->request_id = request_id;
491 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
492 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
493 command_packet->size = TW_COMMAND_SIZE;
494 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
495
496
497 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
498 memset(param, 0, TW_SECTOR_SIZE);
499 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000);
500 param->parameter_id = cpu_to_le16(0x3);
501 param->parameter_size_bytes = cpu_to_le16(4);
502
503
504
505 do_gettimeofday(&utc);
506 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
507 schedulertime = local_time - (3 * 86400);
508 schedulertime = cpu_to_le32(schedulertime % 604800);
509
510 memcpy(param->data, &schedulertime, sizeof(u32));
511
512
513 tw_dev->srb[request_id] = NULL;
514
515
516 twa_post_command_packet(tw_dev, request_id, 1);
517}
518
519
520static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
521{
522 int i;
523 dma_addr_t dma_handle;
524 unsigned long *cpu_addr;
525 int retval = 1;
526
527 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
528 if (!cpu_addr) {
529 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
530 goto out;
531 }
532
533 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
534 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
535 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
536 goto out;
537 }
538
539 memset(cpu_addr, 0, size*TW_Q_LENGTH);
540
541 for (i = 0; i < TW_Q_LENGTH; i++) {
542 switch(which) {
543 case 0:
544 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
545 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
546 break;
547 case 1:
548 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
549 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
550 break;
551 }
552 }
553 retval = 0;
554out:
555 return retval;
556}
557
558
559static int twa_check_bits(u32 status_reg_value)
560{
561 int retval = 1;
562
563 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
564 goto out;
565 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
566 goto out;
567
568 retval = 0;
569out:
570 return retval;
571}
572
573
574static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
575{
576 int retval = 1;
577 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
578 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
579 u32 init_connect_result = 0;
580
581 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
582 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
583 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
584 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
585 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
586 &fw_on_ctlr_build, &init_connect_result)) {
587 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
588 goto out;
589 }
590
591 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
592 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
593 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
594
595
596 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
597 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
598 TW_EXTENDED_INIT_CONNECT,
599 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
600 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
601 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
602 &fw_on_ctlr_branch, &fw_on_ctlr_build,
603 &init_connect_result)) {
604 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
605 goto out;
606 }
607 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
608 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
609 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
610 } else {
611 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
612 }
613 goto out;
614 }
615 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
616 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
617 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
618 }
619
620
621 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
622 sizeof(tw_dev->tw_compat_info.driver_version));
623 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
624 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
625 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
626 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
627 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
628 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
629 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
630 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
631 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
632
633 retval = 0;
634out:
635 return retval;
636}
637
638
639static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
640{
641 struct inode *inode = file_inode(file);
642 long timeout;
643 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
644 dma_addr_t dma_handle;
645 int request_id = 0;
646 unsigned int sequence_id = 0;
647 unsigned char event_index, start_index;
648 TW_Ioctl_Driver_Command driver_command;
649 TW_Ioctl_Buf_Apache *tw_ioctl;
650 TW_Lock *tw_lock;
651 TW_Command_Full *full_command_packet;
652 TW_Compatibility_Info *tw_compat_info;
653 TW_Event *event;
654 struct timeval current_time;
655 u32 current_time_ms;
656 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
657 int retval = TW_IOCTL_ERROR_OS_EFAULT;
658 void __user *argp = (void __user *)arg;
659
660 mutex_lock(&twa_chrdev_mutex);
661
662
663 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
664 retval = TW_IOCTL_ERROR_OS_EINTR;
665 goto out;
666 }
667
668
669 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
670 goto out2;
671
672
673 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
674 retval = TW_IOCTL_ERROR_OS_EINVAL;
675 goto out2;
676 }
677
678
679 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
680
681
682 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
683 if (!cpu_addr) {
684 retval = TW_IOCTL_ERROR_OS_ENOMEM;
685 goto out2;
686 }
687
688 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
689
690
691 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
692 goto out3;
693
694
695 switch (cmd) {
696 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
697 spin_lock_irqsave(tw_dev->host->host_lock, flags);
698 twa_get_request_id(tw_dev, &request_id);
699
700
701 tw_dev->srb[request_id] = NULL;
702
703
704 tw_dev->chrdev_request_id = request_id;
705
706 full_command_packet = &tw_ioctl->firmware_command;
707
708
709 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
710
711 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
712
713
714 twa_post_command_packet(tw_dev, request_id, 1);
715 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
716
717 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
718
719
720 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
721
722
723 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
724
725 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
726 tw_dev->host->host_no, TW_DRIVER, 0x37,
727 cmd);
728 retval = TW_IOCTL_ERROR_OS_EIO;
729 twa_reset_device_extension(tw_dev);
730 goto out3;
731 }
732
733
734 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
735
736
737 spin_lock_irqsave(tw_dev->host->host_lock, flags);
738 tw_dev->posted_request_count--;
739 tw_dev->state[request_id] = TW_S_COMPLETED;
740 twa_free_request_id(tw_dev, request_id);
741 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
742 break;
743 case TW_IOCTL_GET_COMPATIBILITY_INFO:
744 tw_ioctl->driver_command.status = 0;
745
746 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
747 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
748 break;
749 case TW_IOCTL_GET_LAST_EVENT:
750 if (tw_dev->event_queue_wrapped) {
751 if (tw_dev->aen_clobber) {
752 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
753 tw_dev->aen_clobber = 0;
754 } else
755 tw_ioctl->driver_command.status = 0;
756 } else {
757 if (!tw_dev->error_index) {
758 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
759 break;
760 }
761 tw_ioctl->driver_command.status = 0;
762 }
763 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
764 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
765 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
766 break;
767 case TW_IOCTL_GET_FIRST_EVENT:
768 if (tw_dev->event_queue_wrapped) {
769 if (tw_dev->aen_clobber) {
770 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
771 tw_dev->aen_clobber = 0;
772 } else
773 tw_ioctl->driver_command.status = 0;
774 event_index = tw_dev->error_index;
775 } else {
776 if (!tw_dev->error_index) {
777 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
778 break;
779 }
780 tw_ioctl->driver_command.status = 0;
781 event_index = 0;
782 }
783 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
784 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
785 break;
786 case TW_IOCTL_GET_NEXT_EVENT:
787 event = (TW_Event *)tw_ioctl->data_buffer;
788 sequence_id = event->sequence_id;
789 tw_ioctl->driver_command.status = 0;
790
791 if (tw_dev->event_queue_wrapped) {
792 if (tw_dev->aen_clobber) {
793 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
794 tw_dev->aen_clobber = 0;
795 }
796 start_index = tw_dev->error_index;
797 } else {
798 if (!tw_dev->error_index) {
799 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
800 break;
801 }
802 start_index = 0;
803 }
804 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
805
806 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
807 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
808 tw_dev->aen_clobber = 1;
809 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
810 break;
811 }
812 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
813 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
814 break;
815 case TW_IOCTL_GET_PREVIOUS_EVENT:
816 event = (TW_Event *)tw_ioctl->data_buffer;
817 sequence_id = event->sequence_id;
818 tw_ioctl->driver_command.status = 0;
819
820 if (tw_dev->event_queue_wrapped) {
821 if (tw_dev->aen_clobber) {
822 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
823 tw_dev->aen_clobber = 0;
824 }
825 start_index = tw_dev->error_index;
826 } else {
827 if (!tw_dev->error_index) {
828 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
829 break;
830 }
831 start_index = 0;
832 }
833 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
834
835 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
836 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
837 tw_dev->aen_clobber = 1;
838 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
839 break;
840 }
841 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
842 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
843 break;
844 case TW_IOCTL_GET_LOCK:
845 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
846 do_gettimeofday(¤t_time);
847 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
848
849 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
850 tw_dev->ioctl_sem_lock = 1;
851 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
852 tw_ioctl->driver_command.status = 0;
853 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
854 } else {
855 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
856 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
857 }
858 break;
859 case TW_IOCTL_RELEASE_LOCK:
860 if (tw_dev->ioctl_sem_lock == 1) {
861 tw_dev->ioctl_sem_lock = 0;
862 tw_ioctl->driver_command.status = 0;
863 } else {
864 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
865 }
866 break;
867 default:
868 retval = TW_IOCTL_ERROR_OS_ENOTTY;
869 goto out3;
870 }
871
872
873 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
874 retval = 0;
875out3:
876
877 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
878out2:
879 mutex_unlock(&tw_dev->ioctl_lock);
880out:
881 mutex_unlock(&twa_chrdev_mutex);
882 return retval;
883}
884
885
886
887static int twa_chrdev_open(struct inode *inode, struct file *file)
888{
889 unsigned int minor_number;
890 int retval = TW_IOCTL_ERROR_OS_ENODEV;
891
892 minor_number = iminor(inode);
893 if (minor_number >= twa_device_extension_count)
894 goto out;
895 retval = 0;
896out:
897 return retval;
898}
899
900
901static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
902{
903 int retval = 1;
904
905
906 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
907 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
908 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
909 }
910
911 if (status_reg_value & TW_STATUS_PCI_ABORT) {
912 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
913 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
914 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
915 }
916
917 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
918 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
919 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
920 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
921 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
922 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
923 }
924
925 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
926 if (tw_dev->reset_print == 0) {
927 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
928 tw_dev->reset_print = 1;
929 }
930 goto out;
931 }
932 retval = 0;
933out:
934 return retval;
935}
936
937
938static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
939{
940 u32 status_reg_value, response_que_value;
941 int count = 0, retval = 1;
942
943 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
944
945 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
946 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
947 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
948 count++;
949 }
950 if (count == TW_MAX_RESPONSE_DRAIN)
951 goto out;
952
953 retval = 0;
954out:
955 return retval;
956}
957
958
959static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
960{
961 u32 response_que_value = 0;
962 unsigned long before;
963 int retval = 1;
964
965 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
966 before = jiffies;
967 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
968 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
969 msleep(1);
970 if (time_after(jiffies, before + HZ * 30))
971 goto out;
972 }
973
974 msleep(500);
975 retval = 0;
976 } else
977 retval = 0;
978out:
979 return retval;
980}
981
982
983static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
984{
985 TW_Command_Full *full_command_packet;
986 unsigned short error;
987 int retval = 1;
988 char *error_str;
989
990 full_command_packet = tw_dev->command_packet_virt[request_id];
991
992
993 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
994
995
996 error = le16_to_cpu(full_command_packet->header.status_block.error);
997 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
998 if (print_host)
999 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1000 tw_dev->host->host_no,
1001 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1002 full_command_packet->header.status_block.error,
1003 error_str[0] == '\0' ?
1004 twa_string_lookup(twa_error_table,
1005 full_command_packet->header.status_block.error) : error_str,
1006 full_command_packet->header.err_specific_desc);
1007 else
1008 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1009 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1010 full_command_packet->header.status_block.error,
1011 error_str[0] == '\0' ?
1012 twa_string_lookup(twa_error_table,
1013 full_command_packet->header.status_block.error) : error_str,
1014 full_command_packet->header.err_specific_desc);
1015 }
1016
1017 if (copy_sense) {
1018 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1019 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1020 retval = TW_ISR_DONT_RESULT;
1021 goto out;
1022 }
1023 retval = 0;
1024out:
1025 return retval;
1026}
1027
1028
1029static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1030{
1031 if (tw_dev->command_packet_virt[0])
1032 pci_free_consistent(tw_dev->tw_pci_dev,
1033 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1034 tw_dev->command_packet_virt[0],
1035 tw_dev->command_packet_phys[0]);
1036
1037 if (tw_dev->generic_buffer_virt[0])
1038 pci_free_consistent(tw_dev->tw_pci_dev,
1039 TW_SECTOR_SIZE*TW_Q_LENGTH,
1040 tw_dev->generic_buffer_virt[0],
1041 tw_dev->generic_buffer_phys[0]);
1042
1043 kfree(tw_dev->event_queue[0]);
1044}
1045
1046
1047static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1048{
1049 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1050 tw_dev->state[request_id] = TW_S_FINISHED;
1051 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1052}
1053
1054
1055static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1056{
1057 TW_Command_Full *full_command_packet;
1058 TW_Command *command_packet;
1059 TW_Param_Apache *param;
1060 void *retval = NULL;
1061
1062
1063 full_command_packet = tw_dev->command_packet_virt[request_id];
1064 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1065 command_packet = &full_command_packet->command.oldcommand;
1066
1067 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1068 command_packet->size = TW_COMMAND_SIZE;
1069 command_packet->request_id = request_id;
1070 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1071
1072
1073 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1074 memset(param, 0, TW_SECTOR_SIZE);
1075 param->table_id = cpu_to_le16(table_id | 0x8000);
1076 param->parameter_id = cpu_to_le16(parameter_id);
1077 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1078
1079 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1080 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1081
1082
1083 twa_post_command_packet(tw_dev, request_id, 1);
1084
1085
1086 if (twa_poll_response(tw_dev, request_id, 30))
1087 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1088 else
1089 retval = (void *)&(param->data[0]);
1090
1091 tw_dev->posted_request_count--;
1092 tw_dev->state[request_id] = TW_S_INITIAL;
1093
1094 return retval;
1095}
1096
1097
1098static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1099{
1100 *request_id = tw_dev->free_queue[tw_dev->free_head];
1101 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1102 tw_dev->state[*request_id] = TW_S_STARTED;
1103}
1104
1105
1106static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1107 u32 set_features, unsigned short current_fw_srl,
1108 unsigned short current_fw_arch_id,
1109 unsigned short current_fw_branch,
1110 unsigned short current_fw_build,
1111 unsigned short *fw_on_ctlr_srl,
1112 unsigned short *fw_on_ctlr_arch_id,
1113 unsigned short *fw_on_ctlr_branch,
1114 unsigned short *fw_on_ctlr_build,
1115 u32 *init_connect_result)
1116{
1117 TW_Command_Full *full_command_packet;
1118 TW_Initconnect *tw_initconnect;
1119 int request_id = 0, retval = 1;
1120
1121
1122 full_command_packet = tw_dev->command_packet_virt[request_id];
1123 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1124 full_command_packet->header.header_desc.size_header = 128;
1125
1126 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1127 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1128 tw_initconnect->request_id = request_id;
1129 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1130 tw_initconnect->features = set_features;
1131
1132
1133 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1134
1135 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1136
1137 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1138 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1139 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1140 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1141 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1142 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1143 } else
1144 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1145
1146
1147 twa_post_command_packet(tw_dev, request_id, 1);
1148
1149
1150 if (twa_poll_response(tw_dev, request_id, 30)) {
1151 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1152 } else {
1153 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1154 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1155 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1156 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1157 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1158 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1159 }
1160 retval = 0;
1161 }
1162
1163 tw_dev->posted_request_count--;
1164 tw_dev->state[request_id] = TW_S_INITIAL;
1165
1166 return retval;
1167}
1168
1169
1170static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1171{
1172 int i, retval = 1;
1173
1174
1175 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1176 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1177 goto out;
1178 }
1179
1180
1181 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1182 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1183 goto out;
1184 }
1185
1186
1187 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1188 if (!tw_dev->event_queue[0]) {
1189 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1190 goto out;
1191 }
1192
1193
1194 for (i = 0; i < TW_Q_LENGTH; i++) {
1195 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1196 tw_dev->free_queue[i] = i;
1197 tw_dev->state[i] = TW_S_INITIAL;
1198 }
1199
1200 tw_dev->pending_head = TW_Q_START;
1201 tw_dev->pending_tail = TW_Q_START;
1202 tw_dev->free_head = TW_Q_START;
1203 tw_dev->free_tail = TW_Q_START;
1204 tw_dev->error_sequence_id = 1;
1205 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1206
1207 mutex_init(&tw_dev->ioctl_lock);
1208 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1209
1210 retval = 0;
1211out:
1212 return retval;
1213}
1214
1215
1216static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1217{
1218 int request_id, error = 0;
1219 u32 status_reg_value;
1220 TW_Response_Queue response_que;
1221 TW_Command_Full *full_command_packet;
1222 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1223 int handled = 0;
1224
1225
1226 spin_lock(tw_dev->host->host_lock);
1227
1228
1229 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1230
1231
1232 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1233 goto twa_interrupt_bail;
1234
1235 handled = 1;
1236
1237
1238 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1239 goto twa_interrupt_bail;
1240
1241
1242 if (twa_check_bits(status_reg_value)) {
1243 if (twa_decode_bits(tw_dev, status_reg_value)) {
1244 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1245 goto twa_interrupt_bail;
1246 }
1247 }
1248
1249
1250 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1251 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1252
1253
1254 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1255 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1256 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1257 twa_get_request_id(tw_dev, &request_id);
1258
1259 error = twa_aen_read_queue(tw_dev, request_id);
1260 if (error) {
1261 tw_dev->state[request_id] = TW_S_COMPLETED;
1262 twa_free_request_id(tw_dev, request_id);
1263 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1264 }
1265 }
1266 }
1267
1268
1269 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1270 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1271
1272 while (tw_dev->pending_request_count > 0) {
1273 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1274 if (tw_dev->state[request_id] != TW_S_PENDING) {
1275 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1276 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1277 goto twa_interrupt_bail;
1278 }
1279 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1280 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1281 tw_dev->pending_request_count--;
1282 } else {
1283
1284 break;
1285 }
1286 }
1287 }
1288
1289
1290 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1291
1292
1293 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1294
1295 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1296 request_id = TW_RESID_OUT(response_que.response_id);
1297 full_command_packet = tw_dev->command_packet_virt[request_id];
1298 error = 0;
1299
1300 if (full_command_packet->command.newcommand.status != 0) {
1301 if (tw_dev->srb[request_id] != NULL) {
1302 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1303 } else {
1304
1305 if (request_id != tw_dev->chrdev_request_id) {
1306 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1307 }
1308 }
1309 }
1310
1311
1312 if (tw_dev->state[request_id] != TW_S_POSTED) {
1313 if (tw_dev->srb[request_id] != NULL) {
1314 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1315 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1316 goto twa_interrupt_bail;
1317 }
1318 }
1319
1320
1321 if (tw_dev->srb[request_id] == NULL) {
1322 if (request_id != tw_dev->chrdev_request_id) {
1323 if (twa_aen_complete(tw_dev, request_id))
1324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1325 } else {
1326 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1327 wake_up(&tw_dev->ioctl_wqueue);
1328 }
1329 } else {
1330 struct scsi_cmnd *cmd;
1331
1332 cmd = tw_dev->srb[request_id];
1333
1334 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1335
1336 if (error == 0) {
1337 cmd->result = (DID_OK << 16);
1338 }
1339
1340
1341 if (error == 1) {
1342
1343 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1344 }
1345
1346
1347 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1348 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1349 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1350 }
1351
1352
1353 if (twa_command_mapped(cmd))
1354 scsi_dma_unmap(cmd);
1355 cmd->scsi_done(cmd);
1356 tw_dev->state[request_id] = TW_S_COMPLETED;
1357 twa_free_request_id(tw_dev, request_id);
1358 tw_dev->posted_request_count--;
1359 }
1360
1361
1362 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1363 if (twa_check_bits(status_reg_value)) {
1364 if (twa_decode_bits(tw_dev, status_reg_value)) {
1365 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1366 goto twa_interrupt_bail;
1367 }
1368 }
1369 }
1370 }
1371
1372twa_interrupt_bail:
1373 spin_unlock(tw_dev->host->host_lock);
1374 return IRQ_RETVAL(handled);
1375}
1376
1377
1378static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1379{
1380 TW_Command *oldcommand;
1381 TW_Command_Apache *newcommand;
1382 TW_SG_Entry *sgl;
1383 unsigned int pae = 0;
1384
1385 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1386 pae = 1;
1387
1388 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1389 newcommand = &full_command_packet->command.newcommand;
1390 newcommand->request_id__lunl =
1391 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1392 if (length) {
1393 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1394 newcommand->sg_list[0].length = cpu_to_le32(length);
1395 }
1396 newcommand->sgl_entries__lunh =
1397 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1398 } else {
1399 oldcommand = &full_command_packet->command.oldcommand;
1400 oldcommand->request_id = request_id;
1401
1402 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1403
1404 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1405 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1406 else
1407 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1408 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1409 sgl->length = cpu_to_le32(length);
1410
1411 oldcommand->size += pae;
1412 }
1413 }
1414}
1415
1416
1417static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1418{
1419 int retval = 1, found = 0, response_request_id;
1420 TW_Response_Queue response_queue;
1421 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1422
1423 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1424 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1425 response_request_id = TW_RESID_OUT(response_queue.response_id);
1426 if (request_id != response_request_id) {
1427 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1428 goto out;
1429 }
1430 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1431 if (full_command_packet->command.newcommand.status != 0) {
1432
1433 twa_fill_sense(tw_dev, request_id, 0, 0);
1434 goto out;
1435 }
1436 found = 1;
1437 } else {
1438 if (full_command_packet->command.oldcommand.status != 0) {
1439
1440 twa_fill_sense(tw_dev, request_id, 0, 0);
1441 goto out;
1442 }
1443 found = 1;
1444 }
1445 }
1446
1447 if (found)
1448 retval = 0;
1449out:
1450 return retval;
1451}
1452
1453
1454static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1455{
1456 u32 status_reg_value;
1457 unsigned long before;
1458 int retval = 1;
1459
1460 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1461 before = jiffies;
1462
1463 if (twa_check_bits(status_reg_value))
1464 twa_decode_bits(tw_dev, status_reg_value);
1465
1466 while ((status_reg_value & flag) != flag) {
1467 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1468
1469 if (twa_check_bits(status_reg_value))
1470 twa_decode_bits(tw_dev, status_reg_value);
1471
1472 if (time_after(jiffies, before + HZ * seconds))
1473 goto out;
1474
1475 msleep(50);
1476 }
1477 retval = 0;
1478out:
1479 return retval;
1480}
1481
1482
1483static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1484{
1485 u32 status_reg_value;
1486 unsigned long before;
1487 int retval = 1;
1488
1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490 before = jiffies;
1491
1492 if (twa_check_bits(status_reg_value))
1493 twa_decode_bits(tw_dev, status_reg_value);
1494
1495 while ((status_reg_value & flag) != 0) {
1496 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1497 if (twa_check_bits(status_reg_value))
1498 twa_decode_bits(tw_dev, status_reg_value);
1499
1500 if (time_after(jiffies, before + HZ * seconds))
1501 goto out;
1502
1503 msleep(50);
1504 }
1505 retval = 0;
1506out:
1507 return retval;
1508}
1509
1510
1511static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1512{
1513 u32 status_reg_value;
1514 dma_addr_t command_que_value;
1515 int retval = 1;
1516
1517 command_que_value = tw_dev->command_packet_phys[request_id];
1518
1519
1520 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1521 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1522 command_que_value += TW_COMMAND_OFFSET;
1523 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1524 }
1525
1526 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1527
1528 if (twa_check_bits(status_reg_value))
1529 twa_decode_bits(tw_dev, status_reg_value);
1530
1531 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1532
1533
1534 if (!internal) {
1535 retval = SCSI_MLQUEUE_HOST_BUSY;
1536 goto out;
1537 }
1538
1539
1540 if (tw_dev->state[request_id] != TW_S_PENDING) {
1541 tw_dev->state[request_id] = TW_S_PENDING;
1542 tw_dev->pending_request_count++;
1543 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1544 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1545 }
1546 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1547 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1548 }
1549 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1550 goto out;
1551 } else {
1552 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1553 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1554
1555 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1556 } else {
1557 if (sizeof(dma_addr_t) > 4) {
1558 command_que_value += TW_COMMAND_OFFSET;
1559 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1560 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1561 } else {
1562 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1563 }
1564 }
1565 tw_dev->state[request_id] = TW_S_POSTED;
1566 tw_dev->posted_request_count++;
1567 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1568 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1569 }
1570 }
1571 retval = 0;
1572out:
1573 return retval;
1574}
1575
1576
1577static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1578{
1579 int i = 0;
1580 int retval = 1;
1581 unsigned long flags = 0;
1582
1583 set_bit(TW_IN_RESET, &tw_dev->flags);
1584 TW_DISABLE_INTERRUPTS(tw_dev);
1585 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1586 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1587
1588
1589 for (i = 0; i < TW_Q_LENGTH; i++) {
1590 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1591 (tw_dev->state[i] != TW_S_INITIAL) &&
1592 (tw_dev->state[i] != TW_S_COMPLETED)) {
1593 if (tw_dev->srb[i]) {
1594 struct scsi_cmnd *cmd = tw_dev->srb[i];
1595
1596 cmd->result = (DID_RESET << 16);
1597 if (twa_command_mapped(cmd))
1598 scsi_dma_unmap(cmd);
1599 cmd->scsi_done(cmd);
1600 }
1601 }
1602 }
1603
1604
1605 for (i = 0; i < TW_Q_LENGTH; i++) {
1606 tw_dev->free_queue[i] = i;
1607 tw_dev->state[i] = TW_S_INITIAL;
1608 }
1609 tw_dev->free_head = TW_Q_START;
1610 tw_dev->free_tail = TW_Q_START;
1611 tw_dev->posted_request_count = 0;
1612 tw_dev->pending_request_count = 0;
1613 tw_dev->pending_head = TW_Q_START;
1614 tw_dev->pending_tail = TW_Q_START;
1615 tw_dev->reset_print = 0;
1616
1617 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1618
1619 if (twa_reset_sequence(tw_dev, 1))
1620 goto out;
1621
1622 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1623 clear_bit(TW_IN_RESET, &tw_dev->flags);
1624 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1625
1626 retval = 0;
1627out:
1628 return retval;
1629}
1630
1631
1632static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1633{
1634 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1635
1636 while (tries < TW_MAX_RESET_TRIES) {
1637 if (do_soft_reset) {
1638 TW_SOFT_RESET(tw_dev);
1639
1640 if (twa_empty_response_queue_large(tw_dev)) {
1641 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1642 do_soft_reset = 1;
1643 tries++;
1644 continue;
1645 }
1646 }
1647
1648
1649 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1650 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1651 do_soft_reset = 1;
1652 tries++;
1653 continue;
1654 }
1655
1656
1657 if (twa_empty_response_queue(tw_dev)) {
1658 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1659 do_soft_reset = 1;
1660 tries++;
1661 continue;
1662 }
1663
1664 flashed = 0;
1665
1666
1667 if (twa_check_srl(tw_dev, &flashed)) {
1668 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1669 do_soft_reset = 1;
1670 tries++;
1671 continue;
1672 } else {
1673 if (flashed) {
1674 tries++;
1675 continue;
1676 }
1677 }
1678
1679
1680 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1681 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1682 do_soft_reset = 1;
1683 tries++;
1684 continue;
1685 }
1686
1687
1688 retval = 0;
1689 goto out;
1690 }
1691out:
1692 return retval;
1693}
1694
1695
1696static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1697{
1698 int heads, sectors, cylinders;
1699 TW_Device_Extension *tw_dev;
1700
1701 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1702
1703 if (capacity >= 0x200000) {
1704 heads = 255;
1705 sectors = 63;
1706 cylinders = sector_div(capacity, heads * sectors);
1707 } else {
1708 heads = 64;
1709 sectors = 32;
1710 cylinders = sector_div(capacity, heads * sectors);
1711 }
1712
1713 geom[0] = heads;
1714 geom[1] = sectors;
1715 geom[2] = cylinders;
1716
1717 return 0;
1718}
1719
1720
1721static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1722{
1723 TW_Device_Extension *tw_dev = NULL;
1724 int retval = FAILED;
1725
1726 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1727
1728 tw_dev->num_resets++;
1729
1730 sdev_printk(KERN_WARNING, SCpnt->device,
1731 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1732 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1733
1734
1735 mutex_lock(&tw_dev->ioctl_lock);
1736
1737
1738 if (twa_reset_device_extension(tw_dev)) {
1739 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1740 goto out;
1741 }
1742
1743 retval = SUCCESS;
1744out:
1745 mutex_unlock(&tw_dev->ioctl_lock);
1746 return retval;
1747}
1748
1749
1750static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1751{
1752 int request_id, retval;
1753 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1754
1755
1756 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1757 retval = SCSI_MLQUEUE_HOST_BUSY;
1758 goto out;
1759 }
1760
1761
1762 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1763 SCpnt->result = (DID_BAD_TARGET << 16);
1764 done(SCpnt);
1765 retval = 0;
1766 goto out;
1767 }
1768
1769
1770 SCpnt->scsi_done = done;
1771
1772
1773 twa_get_request_id(tw_dev, &request_id);
1774
1775
1776 tw_dev->srb[request_id] = SCpnt;
1777
1778 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1779 switch (retval) {
1780 case SCSI_MLQUEUE_HOST_BUSY:
1781 if (twa_command_mapped(SCpnt))
1782 scsi_dma_unmap(SCpnt);
1783 twa_free_request_id(tw_dev, request_id);
1784 break;
1785 case 1:
1786 SCpnt->result = (DID_ERROR << 16);
1787 if (twa_command_mapped(SCpnt))
1788 scsi_dma_unmap(SCpnt);
1789 done(SCpnt);
1790 tw_dev->state[request_id] = TW_S_COMPLETED;
1791 twa_free_request_id(tw_dev, request_id);
1792 retval = 0;
1793 }
1794out:
1795 return retval;
1796}
1797
1798static DEF_SCSI_QCMD(twa_scsi_queue)
1799
1800
1801static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1802{
1803 TW_Command_Full *full_command_packet;
1804 TW_Command_Apache *command_packet;
1805 u32 num_sectors = 0x0;
1806 int i, sg_count;
1807 struct scsi_cmnd *srb = NULL;
1808 struct scatterlist *sglist = NULL, *sg;
1809 int retval = 1;
1810
1811 if (tw_dev->srb[request_id]) {
1812 srb = tw_dev->srb[request_id];
1813 if (scsi_sglist(srb))
1814 sglist = scsi_sglist(srb);
1815 }
1816
1817
1818 full_command_packet = tw_dev->command_packet_virt[request_id];
1819 full_command_packet->header.header_desc.size_header = 128;
1820 full_command_packet->header.status_block.error = 0;
1821 full_command_packet->header.status_block.severity__reserved = 0;
1822
1823 command_packet = &full_command_packet->command.newcommand;
1824 command_packet->status = 0;
1825 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1826
1827
1828 if (!cdb)
1829 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1830 else
1831 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1832
1833 if (srb) {
1834 command_packet->unit = srb->device->id;
1835 command_packet->request_id__lunl =
1836 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1837 } else {
1838 command_packet->request_id__lunl =
1839 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1840 command_packet->unit = 0;
1841 }
1842
1843 command_packet->sgl_offset = 16;
1844
1845 if (!sglistarg) {
1846
1847
1848 if (scsi_sg_count(srb)) {
1849 if (!twa_command_mapped(srb)) {
1850 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1851 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1852 scsi_sg_copy_to_buffer(srb,
1853 tw_dev->generic_buffer_virt[request_id],
1854 TW_SECTOR_SIZE);
1855 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1856 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1857 } else {
1858 sg_count = scsi_dma_map(srb);
1859 if (sg_count < 0)
1860 goto out;
1861
1862 scsi_for_each_sg(srb, sg, sg_count, i) {
1863 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1864 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1865 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1866 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1867 goto out;
1868 }
1869 }
1870 }
1871 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1872 }
1873 } else {
1874
1875 for (i = 0; i < use_sg; i++) {
1876 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1877 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1878 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1879 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1880 goto out;
1881 }
1882 }
1883 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1884 }
1885
1886 if (srb) {
1887 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1888 num_sectors = (u32)srb->cmnd[4];
1889
1890 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1891 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1892 }
1893
1894
1895 tw_dev->sector_count = num_sectors;
1896 if (tw_dev->sector_count > tw_dev->max_sector_count)
1897 tw_dev->max_sector_count = tw_dev->sector_count;
1898
1899
1900 if (srb) {
1901 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1902 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1903 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1904 }
1905
1906
1907 if (srb) {
1908 retval = twa_post_command_packet(tw_dev, request_id, 0);
1909 } else {
1910 twa_post_command_packet(tw_dev, request_id, 1);
1911 retval = 0;
1912 }
1913out:
1914 return retval;
1915}
1916
1917
1918static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1919{
1920 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1921
1922 if (!twa_command_mapped(cmd) &&
1923 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1924 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1925 if (scsi_sg_count(cmd) == 1) {
1926 void *buf = tw_dev->generic_buffer_virt[request_id];
1927
1928 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1929 }
1930 }
1931}
1932
1933
1934static void __twa_shutdown(TW_Device_Extension *tw_dev)
1935{
1936
1937 TW_DISABLE_INTERRUPTS(tw_dev);
1938
1939
1940 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1941
1942 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1943
1944
1945 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1946 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1947 } else {
1948 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1949 }
1950
1951
1952 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1953}
1954
1955
1956static void twa_shutdown(struct pci_dev *pdev)
1957{
1958 struct Scsi_Host *host = pci_get_drvdata(pdev);
1959 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1960
1961 __twa_shutdown(tw_dev);
1962}
1963
1964
1965static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1966{
1967 int index;
1968
1969 for (index = 0; ((code != table[index].code) &&
1970 (table[index].text != (char *)0)); index++);
1971 return(table[index].text);
1972}
1973
1974
1975static int twa_slave_configure(struct scsi_device *sdev)
1976{
1977
1978 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1979
1980 return 0;
1981}
1982
1983
1984static struct scsi_host_template driver_template = {
1985 .module = THIS_MODULE,
1986 .name = "3ware 9000 Storage Controller",
1987 .queuecommand = twa_scsi_queue,
1988 .eh_host_reset_handler = twa_scsi_eh_reset,
1989 .bios_param = twa_scsi_biosparam,
1990 .change_queue_depth = scsi_change_queue_depth,
1991 .can_queue = TW_Q_LENGTH-2,
1992 .slave_configure = twa_slave_configure,
1993 .this_id = -1,
1994 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1995 .max_sectors = TW_MAX_SECTORS,
1996 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1997 .use_clustering = ENABLE_CLUSTERING,
1998 .shost_attrs = twa_host_attrs,
1999 .emulated = 1,
2000 .no_write_same = 1,
2001};
2002
2003
2004static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2005{
2006 struct Scsi_Host *host = NULL;
2007 TW_Device_Extension *tw_dev;
2008 unsigned long mem_addr, mem_len;
2009 int retval = -ENODEV;
2010
2011 retval = pci_enable_device(pdev);
2012 if (retval) {
2013 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2014 goto out_disable_device;
2015 }
2016
2017 pci_set_master(pdev);
2018 pci_try_set_mwi(pdev);
2019
2020 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2021 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2022 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2023 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2024 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2025 retval = -ENODEV;
2026 goto out_disable_device;
2027 }
2028
2029 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2030 if (!host) {
2031 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2032 retval = -ENOMEM;
2033 goto out_disable_device;
2034 }
2035 tw_dev = (TW_Device_Extension *)host->hostdata;
2036
2037
2038 tw_dev->host = host;
2039 tw_dev->tw_pci_dev = pdev;
2040
2041 if (twa_initialize_device_extension(tw_dev)) {
2042 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2043 goto out_free_device_extension;
2044 }
2045
2046
2047 retval = pci_request_regions(pdev, "3w-9xxx");
2048 if (retval) {
2049 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2050 goto out_free_device_extension;
2051 }
2052
2053 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2054 mem_addr = pci_resource_start(pdev, 1);
2055 mem_len = pci_resource_len(pdev, 1);
2056 } else {
2057 mem_addr = pci_resource_start(pdev, 2);
2058 mem_len = pci_resource_len(pdev, 2);
2059 }
2060
2061
2062 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2063 if (!tw_dev->base_addr) {
2064 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2065 goto out_release_mem_region;
2066 }
2067
2068
2069 TW_DISABLE_INTERRUPTS(tw_dev);
2070
2071
2072 if (twa_reset_sequence(tw_dev, 0))
2073 goto out_iounmap;
2074
2075
2076 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2077 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2078 host->max_id = TW_MAX_UNITS_9650SE;
2079 else
2080 host->max_id = TW_MAX_UNITS;
2081
2082 host->max_cmd_len = TW_MAX_CDB_LEN;
2083
2084
2085 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2086 host->max_channel = 0;
2087
2088
2089 retval = scsi_add_host(host, &pdev->dev);
2090 if (retval) {
2091 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2092 goto out_iounmap;
2093 }
2094
2095 pci_set_drvdata(pdev, host);
2096
2097 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2098 host->host_no, mem_addr, pdev->irq);
2099 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2100 host->host_no,
2101 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2102 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2103 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2104 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2105 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2106 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2107
2108
2109 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2110 !pci_enable_msi(pdev))
2111 set_bit(TW_USING_MSI, &tw_dev->flags);
2112
2113
2114 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2115 if (retval) {
2116 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2117 goto out_remove_host;
2118 }
2119
2120 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2121 twa_device_extension_count++;
2122
2123
2124 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2125
2126
2127 scsi_scan_host(host);
2128
2129 if (twa_major == -1) {
2130 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2131 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2132 }
2133 return 0;
2134
2135out_remove_host:
2136 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2137 pci_disable_msi(pdev);
2138 scsi_remove_host(host);
2139out_iounmap:
2140 iounmap(tw_dev->base_addr);
2141out_release_mem_region:
2142 pci_release_regions(pdev);
2143out_free_device_extension:
2144 twa_free_device_extension(tw_dev);
2145 scsi_host_put(host);
2146out_disable_device:
2147 pci_disable_device(pdev);
2148
2149 return retval;
2150}
2151
2152
2153static void twa_remove(struct pci_dev *pdev)
2154{
2155 struct Scsi_Host *host = pci_get_drvdata(pdev);
2156 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2157
2158 scsi_remove_host(tw_dev->host);
2159
2160
2161 if (twa_major >= 0) {
2162 unregister_chrdev(twa_major, "twa");
2163 twa_major = -1;
2164 }
2165
2166
2167 __twa_shutdown(tw_dev);
2168
2169
2170 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2171 pci_disable_msi(pdev);
2172
2173
2174 iounmap(tw_dev->base_addr);
2175
2176
2177 pci_release_regions(pdev);
2178
2179
2180 twa_free_device_extension(tw_dev);
2181
2182 scsi_host_put(tw_dev->host);
2183 pci_disable_device(pdev);
2184 twa_device_extension_count--;
2185}
2186
2187#ifdef CONFIG_PM
2188
2189static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2190{
2191 struct Scsi_Host *host = pci_get_drvdata(pdev);
2192 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2193
2194 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2195
2196 TW_DISABLE_INTERRUPTS(tw_dev);
2197 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2198
2199 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2200 pci_disable_msi(pdev);
2201
2202
2203 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2204 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2205 } else {
2206 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2207 }
2208 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2209
2210 pci_save_state(pdev);
2211 pci_disable_device(pdev);
2212 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2213
2214 return 0;
2215}
2216
2217
2218static int twa_resume(struct pci_dev *pdev)
2219{
2220 int retval = 0;
2221 struct Scsi_Host *host = pci_get_drvdata(pdev);
2222 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2223
2224 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2225 pci_set_power_state(pdev, PCI_D0);
2226 pci_enable_wake(pdev, PCI_D0, 0);
2227 pci_restore_state(pdev);
2228
2229 retval = pci_enable_device(pdev);
2230 if (retval) {
2231 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2232 return retval;
2233 }
2234
2235 pci_set_master(pdev);
2236 pci_try_set_mwi(pdev);
2237
2238 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2239 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2240 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2241 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2242 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2243 retval = -ENODEV;
2244 goto out_disable_device;
2245 }
2246
2247
2248 if (twa_reset_sequence(tw_dev, 0)) {
2249 retval = -ENODEV;
2250 goto out_disable_device;
2251 }
2252
2253
2254 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2255 if (retval) {
2256 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2257 retval = -ENODEV;
2258 goto out_disable_device;
2259 }
2260
2261
2262 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2263 pci_enable_msi(pdev);
2264
2265
2266 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2267
2268 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2269 return 0;
2270
2271out_disable_device:
2272 scsi_remove_host(host);
2273 pci_disable_device(pdev);
2274
2275 return retval;
2276}
2277#endif
2278
2279
2280static struct pci_device_id twa_pci_tbl[] = {
2281 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2282 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2283 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2284 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2285 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2286 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2287 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2288 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2289 { }
2290};
2291MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2292
2293
2294static struct pci_driver twa_driver = {
2295 .name = "3w-9xxx",
2296 .id_table = twa_pci_tbl,
2297 .probe = twa_probe,
2298 .remove = twa_remove,
2299#ifdef CONFIG_PM
2300 .suspend = twa_suspend,
2301 .resume = twa_resume,
2302#endif
2303 .shutdown = twa_shutdown
2304};
2305
2306
2307static int __init twa_init(void)
2308{
2309 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2310
2311 return pci_register_driver(&twa_driver);
2312}
2313
2314
2315static void __exit twa_exit(void)
2316{
2317 pci_unregister_driver(&twa_driver);
2318}
2319
2320module_init(twa_init);
2321module_exit(twa_exit);
2322
2323